1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution expander,
11// which is used to generate the code corresponding to a given scalar evolution
12// expression.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Analysis/ScalarEvolutionExpander.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/LoopInfo.h"
21#include "llvm/Analysis/TargetTransformInfo.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Dominators.h"
24#include "llvm/IR/IntrinsicInst.h"
25#include "llvm/IR/LLVMContext.h"
26#include "llvm/Support/Debug.h"
27
28using namespace llvm;
29
30/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
31/// reusing an existing cast if a suitable one exists, moving an existing
32/// cast if a suitable one exists but isn't in the right place, or
33/// creating a new one.
34Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
35                                       Instruction::CastOps Op,
36                                       BasicBlock::iterator IP) {
37  // This function must be called with the builder having a valid insertion
38  // point. It doesn't need to be the actual IP where the uses of the returned
39  // cast will be added, but it must dominate such IP.
40  // We use this precondition to produce a cast that will dominate all its
41  // uses. In particular, this is crucial for the case where the builder's
42  // insertion point *is* the point where we were asked to put the cast.
43  // Since we don't know the builder's insertion point is actually
44  // where the uses will be added (only that it dominates it), we are
45  // not allowed to move it.
46  BasicBlock::iterator BIP = Builder.GetInsertPoint();
47
48  Instruction *Ret = nullptr;
49
50  // Check to see if there is already a cast!
51  for (User *U : V->users())
52    if (U->getType() == Ty)
53      if (CastInst *CI = dyn_cast<CastInst>(U))
54        if (CI->getOpcode() == Op) {
55          // If the cast isn't where we want it, create a new cast at IP.
56          // Likewise, do not reuse a cast at BIP because it must dominate
57          // instructions that might be inserted before BIP.
58          if (BasicBlock::iterator(CI) != IP || BIP == IP) {
59            // Create a new cast, and leave the old cast in place in case
60            // it is being used as an insert point. Clear its operand
61            // so that it doesn't hold anything live.
62            Ret = CastInst::Create(Op, V, Ty, "", IP);
63            Ret->takeName(CI);
64            CI->replaceAllUsesWith(Ret);
65            CI->setOperand(0, UndefValue::get(V->getType()));
66            break;
67          }
68          Ret = CI;
69          break;
70        }
71
72  // Create a new cast.
73  if (!Ret)
74    Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
75
76  // We assert at the end of the function since IP might point to an
77  // instruction with different dominance properties than a cast
78  // (an invoke for example) and not dominate BIP (but the cast does).
79  assert(SE.DT->dominates(Ret, BIP));
80
81  rememberInstruction(Ret);
82  return Ret;
83}
84
85/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
86/// which must be possible with a noop cast, doing what we can to share
87/// the casts.
88Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
89  Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
90  assert((Op == Instruction::BitCast ||
91          Op == Instruction::PtrToInt ||
92          Op == Instruction::IntToPtr) &&
93         "InsertNoopCastOfTo cannot perform non-noop casts!");
94  assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
95         "InsertNoopCastOfTo cannot change sizes!");
96
97  // Short-circuit unnecessary bitcasts.
98  if (Op == Instruction::BitCast) {
99    if (V->getType() == Ty)
100      return V;
101    if (CastInst *CI = dyn_cast<CastInst>(V)) {
102      if (CI->getOperand(0)->getType() == Ty)
103        return CI->getOperand(0);
104    }
105  }
106  // Short-circuit unnecessary inttoptr<->ptrtoint casts.
107  if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
108      SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
109    if (CastInst *CI = dyn_cast<CastInst>(V))
110      if ((CI->getOpcode() == Instruction::PtrToInt ||
111           CI->getOpcode() == Instruction::IntToPtr) &&
112          SE.getTypeSizeInBits(CI->getType()) ==
113          SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
114        return CI->getOperand(0);
115    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
116      if ((CE->getOpcode() == Instruction::PtrToInt ||
117           CE->getOpcode() == Instruction::IntToPtr) &&
118          SE.getTypeSizeInBits(CE->getType()) ==
119          SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
120        return CE->getOperand(0);
121  }
122
123  // Fold a cast of a constant.
124  if (Constant *C = dyn_cast<Constant>(V))
125    return ConstantExpr::getCast(Op, C, Ty);
126
127  // Cast the argument at the beginning of the entry block, after
128  // any bitcasts of other arguments.
129  if (Argument *A = dyn_cast<Argument>(V)) {
130    BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
131    while ((isa<BitCastInst>(IP) &&
132            isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
133            cast<BitCastInst>(IP)->getOperand(0) != A) ||
134           isa<DbgInfoIntrinsic>(IP) ||
135           isa<LandingPadInst>(IP))
136      ++IP;
137    return ReuseOrCreateCast(A, Ty, Op, IP);
138  }
139
140  // Cast the instruction immediately after the instruction.
141  Instruction *I = cast<Instruction>(V);
142  BasicBlock::iterator IP = I; ++IP;
143  if (InvokeInst *II = dyn_cast<InvokeInst>(I))
144    IP = II->getNormalDest()->begin();
145  while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
146    ++IP;
147  return ReuseOrCreateCast(I, Ty, Op, IP);
148}
149
150/// InsertBinop - Insert the specified binary operator, doing a small amount
151/// of work to avoid inserting an obviously redundant operation.
152Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
153                                 Value *LHS, Value *RHS) {
154  // Fold a binop with constant operands.
155  if (Constant *CLHS = dyn_cast<Constant>(LHS))
156    if (Constant *CRHS = dyn_cast<Constant>(RHS))
157      return ConstantExpr::get(Opcode, CLHS, CRHS);
158
159  // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
160  unsigned ScanLimit = 6;
161  BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
162  // Scanning starts from the last instruction before the insertion point.
163  BasicBlock::iterator IP = Builder.GetInsertPoint();
164  if (IP != BlockBegin) {
165    --IP;
166    for (; ScanLimit; --IP, --ScanLimit) {
167      // Don't count dbg.value against the ScanLimit, to avoid perturbing the
168      // generated code.
169      if (isa<DbgInfoIntrinsic>(IP))
170        ScanLimit++;
171      if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
172          IP->getOperand(1) == RHS)
173        return IP;
174      if (IP == BlockBegin) break;
175    }
176  }
177
178  // Save the original insertion point so we can restore it when we're done.
179  DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
180  BuilderType::InsertPointGuard Guard(Builder);
181
182  // Move the insertion point out of as many loops as we can.
183  while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
184    if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
185    BasicBlock *Preheader = L->getLoopPreheader();
186    if (!Preheader) break;
187
188    // Ok, move up a level.
189    Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
190  }
191
192  // If we haven't found this binop, insert it.
193  Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
194  BO->setDebugLoc(Loc);
195  rememberInstruction(BO);
196
197  return BO;
198}
199
200/// FactorOutConstant - Test if S is divisible by Factor, using signed
201/// division. If so, update S with Factor divided out and return true.
202/// S need not be evenly divisible if a reasonable remainder can be
203/// computed.
204/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
205/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
206/// check to see if the divide was folded.
207static bool FactorOutConstant(const SCEV *&S,
208                              const SCEV *&Remainder,
209                              const SCEV *Factor,
210                              ScalarEvolution &SE,
211                              const DataLayout *DL) {
212  // Everything is divisible by one.
213  if (Factor->isOne())
214    return true;
215
216  // x/x == 1.
217  if (S == Factor) {
218    S = SE.getConstant(S->getType(), 1);
219    return true;
220  }
221
222  // For a Constant, check for a multiple of the given factor.
223  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
224    // 0/x == 0.
225    if (C->isZero())
226      return true;
227    // Check for divisibility.
228    if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
229      ConstantInt *CI =
230        ConstantInt::get(SE.getContext(),
231                         C->getValue()->getValue().sdiv(
232                                                   FC->getValue()->getValue()));
233      // If the quotient is zero and the remainder is non-zero, reject
234      // the value at this scale. It will be considered for subsequent
235      // smaller scales.
236      if (!CI->isZero()) {
237        const SCEV *Div = SE.getConstant(CI);
238        S = Div;
239        Remainder =
240          SE.getAddExpr(Remainder,
241                        SE.getConstant(C->getValue()->getValue().srem(
242                                                  FC->getValue()->getValue())));
243        return true;
244      }
245    }
246  }
247
248  // In a Mul, check if there is a constant operand which is a multiple
249  // of the given factor.
250  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
251    if (DL) {
252      // With DataLayout, the size is known. Check if there is a constant
253      // operand which is a multiple of the given factor. If so, we can
254      // factor it.
255      const SCEVConstant *FC = cast<SCEVConstant>(Factor);
256      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
257        if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
258          SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
259          NewMulOps[0] =
260            SE.getConstant(C->getValue()->getValue().sdiv(
261                                                   FC->getValue()->getValue()));
262          S = SE.getMulExpr(NewMulOps);
263          return true;
264        }
265    } else {
266      // Without DataLayout, check if Factor can be factored out of any of the
267      // Mul's operands. If so, we can just remove it.
268      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
269        const SCEV *SOp = M->getOperand(i);
270        const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
271        if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) &&
272            Remainder->isZero()) {
273          SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
274          NewMulOps[i] = SOp;
275          S = SE.getMulExpr(NewMulOps);
276          return true;
277        }
278      }
279    }
280  }
281
282  // In an AddRec, check if both start and step are divisible.
283  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
284    const SCEV *Step = A->getStepRecurrence(SE);
285    const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
286    if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
287      return false;
288    if (!StepRem->isZero())
289      return false;
290    const SCEV *Start = A->getStart();
291    if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
292      return false;
293    S = SE.getAddRecExpr(Start, Step, A->getLoop(),
294                         A->getNoWrapFlags(SCEV::FlagNW));
295    return true;
296  }
297
298  return false;
299}
300
301/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
302/// is the number of SCEVAddRecExprs present, which are kept at the end of
303/// the list.
304///
305static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
306                                Type *Ty,
307                                ScalarEvolution &SE) {
308  unsigned NumAddRecs = 0;
309  for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
310    ++NumAddRecs;
311  // Group Ops into non-addrecs and addrecs.
312  SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
313  SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
314  // Let ScalarEvolution sort and simplify the non-addrecs list.
315  const SCEV *Sum = NoAddRecs.empty() ?
316                    SE.getConstant(Ty, 0) :
317                    SE.getAddExpr(NoAddRecs);
318  // If it returned an add, use the operands. Otherwise it simplified
319  // the sum into a single value, so just use that.
320  Ops.clear();
321  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
322    Ops.append(Add->op_begin(), Add->op_end());
323  else if (!Sum->isZero())
324    Ops.push_back(Sum);
325  // Then append the addrecs.
326  Ops.append(AddRecs.begin(), AddRecs.end());
327}
328
329/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
330/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
331/// This helps expose more opportunities for folding parts of the expressions
332/// into GEP indices.
333///
334static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
335                         Type *Ty,
336                         ScalarEvolution &SE) {
337  // Find the addrecs.
338  SmallVector<const SCEV *, 8> AddRecs;
339  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
340    while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
341      const SCEV *Start = A->getStart();
342      if (Start->isZero()) break;
343      const SCEV *Zero = SE.getConstant(Ty, 0);
344      AddRecs.push_back(SE.getAddRecExpr(Zero,
345                                         A->getStepRecurrence(SE),
346                                         A->getLoop(),
347                                         A->getNoWrapFlags(SCEV::FlagNW)));
348      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
349        Ops[i] = Zero;
350        Ops.append(Add->op_begin(), Add->op_end());
351        e += Add->getNumOperands();
352      } else {
353        Ops[i] = Start;
354      }
355    }
356  if (!AddRecs.empty()) {
357    // Add the addrecs onto the end of the list.
358    Ops.append(AddRecs.begin(), AddRecs.end());
359    // Resort the operand list, moving any constants to the front.
360    SimplifyAddOperands(Ops, Ty, SE);
361  }
362}
363
364/// expandAddToGEP - Expand an addition expression with a pointer type into
365/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
366/// BasicAliasAnalysis and other passes analyze the result. See the rules
367/// for getelementptr vs. inttoptr in
368/// http://llvm.org/docs/LangRef.html#pointeraliasing
369/// for details.
370///
371/// Design note: The correctness of using getelementptr here depends on
372/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
373/// they may introduce pointer arithmetic which may not be safely converted
374/// into getelementptr.
375///
376/// Design note: It might seem desirable for this function to be more
377/// loop-aware. If some of the indices are loop-invariant while others
378/// aren't, it might seem desirable to emit multiple GEPs, keeping the
379/// loop-invariant portions of the overall computation outside the loop.
380/// However, there are a few reasons this is not done here. Hoisting simple
381/// arithmetic is a low-level optimization that often isn't very
382/// important until late in the optimization process. In fact, passes
383/// like InstructionCombining will combine GEPs, even if it means
384/// pushing loop-invariant computation down into loops, so even if the
385/// GEPs were split here, the work would quickly be undone. The
386/// LoopStrengthReduction pass, which is usually run quite late (and
387/// after the last InstructionCombining pass), takes care of hoisting
388/// loop-invariant portions of expressions, after considering what
389/// can be folded using target addressing modes.
390///
391Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
392                                    const SCEV *const *op_end,
393                                    PointerType *PTy,
394                                    Type *Ty,
395                                    Value *V) {
396  Type *ElTy = PTy->getElementType();
397  SmallVector<Value *, 4> GepIndices;
398  SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
399  bool AnyNonZeroIndices = false;
400
401  // Split AddRecs up into parts as either of the parts may be usable
402  // without the other.
403  SplitAddRecs(Ops, Ty, SE);
404
405  Type *IntPtrTy = SE.DL
406                 ? SE.DL->getIntPtrType(PTy)
407                 : Type::getInt64Ty(PTy->getContext());
408
409  // Descend down the pointer's type and attempt to convert the other
410  // operands into GEP indices, at each level. The first index in a GEP
411  // indexes into the array implied by the pointer operand; the rest of
412  // the indices index into the element or field type selected by the
413  // preceding index.
414  for (;;) {
415    // If the scale size is not 0, attempt to factor out a scale for
416    // array indexing.
417    SmallVector<const SCEV *, 8> ScaledOps;
418    if (ElTy->isSized()) {
419      const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
420      if (!ElSize->isZero()) {
421        SmallVector<const SCEV *, 8> NewOps;
422        for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
423          const SCEV *Op = Ops[i];
424          const SCEV *Remainder = SE.getConstant(Ty, 0);
425          if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) {
426            // Op now has ElSize factored out.
427            ScaledOps.push_back(Op);
428            if (!Remainder->isZero())
429              NewOps.push_back(Remainder);
430            AnyNonZeroIndices = true;
431          } else {
432            // The operand was not divisible, so add it to the list of operands
433            // we'll scan next iteration.
434            NewOps.push_back(Ops[i]);
435          }
436        }
437        // If we made any changes, update Ops.
438        if (!ScaledOps.empty()) {
439          Ops = NewOps;
440          SimplifyAddOperands(Ops, Ty, SE);
441        }
442      }
443    }
444
445    // Record the scaled array index for this level of the type. If
446    // we didn't find any operands that could be factored, tentatively
447    // assume that element zero was selected (since the zero offset
448    // would obviously be folded away).
449    Value *Scaled = ScaledOps.empty() ?
450                    Constant::getNullValue(Ty) :
451                    expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
452    GepIndices.push_back(Scaled);
453
454    // Collect struct field index operands.
455    while (StructType *STy = dyn_cast<StructType>(ElTy)) {
456      bool FoundFieldNo = false;
457      // An empty struct has no fields.
458      if (STy->getNumElements() == 0) break;
459      if (SE.DL) {
460        // With DataLayout, field offsets are known. See if a constant offset
461        // falls within any of the struct fields.
462        if (Ops.empty()) break;
463        if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
464          if (SE.getTypeSizeInBits(C->getType()) <= 64) {
465            const StructLayout &SL = *SE.DL->getStructLayout(STy);
466            uint64_t FullOffset = C->getValue()->getZExtValue();
467            if (FullOffset < SL.getSizeInBytes()) {
468              unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
469              GepIndices.push_back(
470                  ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
471              ElTy = STy->getTypeAtIndex(ElIdx);
472              Ops[0] =
473                SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
474              AnyNonZeroIndices = true;
475              FoundFieldNo = true;
476            }
477          }
478      } else {
479        // Without DataLayout, just check for an offsetof expression of the
480        // appropriate struct type.
481        for (unsigned i = 0, e = Ops.size(); i != e; ++i)
482          if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
483            Type *CTy;
484            Constant *FieldNo;
485            if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
486              GepIndices.push_back(FieldNo);
487              ElTy =
488                STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
489              Ops[i] = SE.getConstant(Ty, 0);
490              AnyNonZeroIndices = true;
491              FoundFieldNo = true;
492              break;
493            }
494          }
495      }
496      // If no struct field offsets were found, tentatively assume that
497      // field zero was selected (since the zero offset would obviously
498      // be folded away).
499      if (!FoundFieldNo) {
500        ElTy = STy->getTypeAtIndex(0u);
501        GepIndices.push_back(
502          Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
503      }
504    }
505
506    if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
507      ElTy = ATy->getElementType();
508    else
509      break;
510  }
511
512  // If none of the operands were convertible to proper GEP indices, cast
513  // the base to i8* and do an ugly getelementptr with that. It's still
514  // better than ptrtoint+arithmetic+inttoptr at least.
515  if (!AnyNonZeroIndices) {
516    // Cast the base to i8*.
517    V = InsertNoopCastOfTo(V,
518       Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
519
520    assert(!isa<Instruction>(V) ||
521           SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
522
523    // Expand the operands for a plain byte offset.
524    Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
525
526    // Fold a GEP with constant operands.
527    if (Constant *CLHS = dyn_cast<Constant>(V))
528      if (Constant *CRHS = dyn_cast<Constant>(Idx))
529        return ConstantExpr::getGetElementPtr(CLHS, CRHS);
530
531    // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
532    unsigned ScanLimit = 6;
533    BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
534    // Scanning starts from the last instruction before the insertion point.
535    BasicBlock::iterator IP = Builder.GetInsertPoint();
536    if (IP != BlockBegin) {
537      --IP;
538      for (; ScanLimit; --IP, --ScanLimit) {
539        // Don't count dbg.value against the ScanLimit, to avoid perturbing the
540        // generated code.
541        if (isa<DbgInfoIntrinsic>(IP))
542          ScanLimit++;
543        if (IP->getOpcode() == Instruction::GetElementPtr &&
544            IP->getOperand(0) == V && IP->getOperand(1) == Idx)
545          return IP;
546        if (IP == BlockBegin) break;
547      }
548    }
549
550    // Save the original insertion point so we can restore it when we're done.
551    BuilderType::InsertPointGuard Guard(Builder);
552
553    // Move the insertion point out of as many loops as we can.
554    while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
555      if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
556      BasicBlock *Preheader = L->getLoopPreheader();
557      if (!Preheader) break;
558
559      // Ok, move up a level.
560      Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
561    }
562
563    // Emit a GEP.
564    Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
565    rememberInstruction(GEP);
566
567    return GEP;
568  }
569
570  // Save the original insertion point so we can restore it when we're done.
571  BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
572
573  // Move the insertion point out of as many loops as we can.
574  while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
575    if (!L->isLoopInvariant(V)) break;
576
577    bool AnyIndexNotLoopInvariant = false;
578    for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
579         E = GepIndices.end(); I != E; ++I)
580      if (!L->isLoopInvariant(*I)) {
581        AnyIndexNotLoopInvariant = true;
582        break;
583      }
584    if (AnyIndexNotLoopInvariant)
585      break;
586
587    BasicBlock *Preheader = L->getLoopPreheader();
588    if (!Preheader) break;
589
590    // Ok, move up a level.
591    Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
592  }
593
594  // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
595  // because ScalarEvolution may have changed the address arithmetic to
596  // compute a value which is beyond the end of the allocated object.
597  Value *Casted = V;
598  if (V->getType() != PTy)
599    Casted = InsertNoopCastOfTo(Casted, PTy);
600  Value *GEP = Builder.CreateGEP(Casted,
601                                 GepIndices,
602                                 "scevgep");
603  Ops.push_back(SE.getUnknown(GEP));
604  rememberInstruction(GEP);
605
606  // Restore the original insert point.
607  Builder.restoreIP(SaveInsertPt);
608
609  return expand(SE.getAddExpr(Ops));
610}
611
612/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
613/// SCEV expansion. If they are nested, this is the most nested. If they are
614/// neighboring, pick the later.
615static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
616                                        DominatorTree &DT) {
617  if (!A) return B;
618  if (!B) return A;
619  if (A->contains(B)) return B;
620  if (B->contains(A)) return A;
621  if (DT.dominates(A->getHeader(), B->getHeader())) return B;
622  if (DT.dominates(B->getHeader(), A->getHeader())) return A;
623  return A; // Arbitrarily break the tie.
624}
625
626/// getRelevantLoop - Get the most relevant loop associated with the given
627/// expression, according to PickMostRelevantLoop.
628const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
629  // Test whether we've already computed the most relevant loop for this SCEV.
630  std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
631    RelevantLoops.insert(std::make_pair(S, nullptr));
632  if (!Pair.second)
633    return Pair.first->second;
634
635  if (isa<SCEVConstant>(S))
636    // A constant has no relevant loops.
637    return nullptr;
638  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
639    if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
640      return Pair.first->second = SE.LI->getLoopFor(I->getParent());
641    // A non-instruction has no relevant loops.
642    return nullptr;
643  }
644  if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
645    const Loop *L = nullptr;
646    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
647      L = AR->getLoop();
648    for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
649         I != E; ++I)
650      L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
651    return RelevantLoops[N] = L;
652  }
653  if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
654    const Loop *Result = getRelevantLoop(C->getOperand());
655    return RelevantLoops[C] = Result;
656  }
657  if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
658    const Loop *Result =
659      PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
660                           getRelevantLoop(D->getRHS()),
661                           *SE.DT);
662    return RelevantLoops[D] = Result;
663  }
664  llvm_unreachable("Unexpected SCEV type!");
665}
666
667namespace {
668
669/// LoopCompare - Compare loops by PickMostRelevantLoop.
670class LoopCompare {
671  DominatorTree &DT;
672public:
673  explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
674
675  bool operator()(std::pair<const Loop *, const SCEV *> LHS,
676                  std::pair<const Loop *, const SCEV *> RHS) const {
677    // Keep pointer operands sorted at the end.
678    if (LHS.second->getType()->isPointerTy() !=
679        RHS.second->getType()->isPointerTy())
680      return LHS.second->getType()->isPointerTy();
681
682    // Compare loops with PickMostRelevantLoop.
683    if (LHS.first != RHS.first)
684      return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
685
686    // If one operand is a non-constant negative and the other is not,
687    // put the non-constant negative on the right so that a sub can
688    // be used instead of a negate and add.
689    if (LHS.second->isNonConstantNegative()) {
690      if (!RHS.second->isNonConstantNegative())
691        return false;
692    } else if (RHS.second->isNonConstantNegative())
693      return true;
694
695    // Otherwise they are equivalent according to this comparison.
696    return false;
697  }
698};
699
700}
701
702Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
703  Type *Ty = SE.getEffectiveSCEVType(S->getType());
704
705  // Collect all the add operands in a loop, along with their associated loops.
706  // Iterate in reverse so that constants are emitted last, all else equal, and
707  // so that pointer operands are inserted first, which the code below relies on
708  // to form more involved GEPs.
709  SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
710  for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
711       E(S->op_begin()); I != E; ++I)
712    OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
713
714  // Sort by loop. Use a stable sort so that constants follow non-constants and
715  // pointer operands precede non-pointer operands.
716  std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
717
718  // Emit instructions to add all the operands. Hoist as much as possible
719  // out of loops, and form meaningful getelementptrs where possible.
720  Value *Sum = nullptr;
721  for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
722       I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
723    const Loop *CurLoop = I->first;
724    const SCEV *Op = I->second;
725    if (!Sum) {
726      // This is the first operand. Just expand it.
727      Sum = expand(Op);
728      ++I;
729    } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
730      // The running sum expression is a pointer. Try to form a getelementptr
731      // at this level with that as the base.
732      SmallVector<const SCEV *, 4> NewOps;
733      for (; I != E && I->first == CurLoop; ++I) {
734        // If the operand is SCEVUnknown and not instructions, peek through
735        // it, to enable more of it to be folded into the GEP.
736        const SCEV *X = I->second;
737        if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
738          if (!isa<Instruction>(U->getValue()))
739            X = SE.getSCEV(U->getValue());
740        NewOps.push_back(X);
741      }
742      Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
743    } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
744      // The running sum is an integer, and there's a pointer at this level.
745      // Try to form a getelementptr. If the running sum is instructions,
746      // use a SCEVUnknown to avoid re-analyzing them.
747      SmallVector<const SCEV *, 4> NewOps;
748      NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
749                                               SE.getSCEV(Sum));
750      for (++I; I != E && I->first == CurLoop; ++I)
751        NewOps.push_back(I->second);
752      Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
753    } else if (Op->isNonConstantNegative()) {
754      // Instead of doing a negate and add, just do a subtract.
755      Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
756      Sum = InsertNoopCastOfTo(Sum, Ty);
757      Sum = InsertBinop(Instruction::Sub, Sum, W);
758      ++I;
759    } else {
760      // A simple add.
761      Value *W = expandCodeFor(Op, Ty);
762      Sum = InsertNoopCastOfTo(Sum, Ty);
763      // Canonicalize a constant to the RHS.
764      if (isa<Constant>(Sum)) std::swap(Sum, W);
765      Sum = InsertBinop(Instruction::Add, Sum, W);
766      ++I;
767    }
768  }
769
770  return Sum;
771}
772
773Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
774  Type *Ty = SE.getEffectiveSCEVType(S->getType());
775
776  // Collect all the mul operands in a loop, along with their associated loops.
777  // Iterate in reverse so that constants are emitted last, all else equal.
778  SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
779  for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
780       E(S->op_begin()); I != E; ++I)
781    OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
782
783  // Sort by loop. Use a stable sort so that constants follow non-constants.
784  std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
785
786  // Emit instructions to mul all the operands. Hoist as much as possible
787  // out of loops.
788  Value *Prod = nullptr;
789  for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
790       I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
791    const SCEV *Op = I->second;
792    if (!Prod) {
793      // This is the first operand. Just expand it.
794      Prod = expand(Op);
795      ++I;
796    } else if (Op->isAllOnesValue()) {
797      // Instead of doing a multiply by negative one, just do a negate.
798      Prod = InsertNoopCastOfTo(Prod, Ty);
799      Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
800      ++I;
801    } else {
802      // A simple mul.
803      Value *W = expandCodeFor(Op, Ty);
804      Prod = InsertNoopCastOfTo(Prod, Ty);
805      // Canonicalize a constant to the RHS.
806      if (isa<Constant>(Prod)) std::swap(Prod, W);
807      Prod = InsertBinop(Instruction::Mul, Prod, W);
808      ++I;
809    }
810  }
811
812  return Prod;
813}
814
815Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
816  Type *Ty = SE.getEffectiveSCEVType(S->getType());
817
818  Value *LHS = expandCodeFor(S->getLHS(), Ty);
819  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
820    const APInt &RHS = SC->getValue()->getValue();
821    if (RHS.isPowerOf2())
822      return InsertBinop(Instruction::LShr, LHS,
823                         ConstantInt::get(Ty, RHS.logBase2()));
824  }
825
826  Value *RHS = expandCodeFor(S->getRHS(), Ty);
827  return InsertBinop(Instruction::UDiv, LHS, RHS);
828}
829
830/// Move parts of Base into Rest to leave Base with the minimal
831/// expression that provides a pointer operand suitable for a
832/// GEP expansion.
833static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
834                              ScalarEvolution &SE) {
835  while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
836    Base = A->getStart();
837    Rest = SE.getAddExpr(Rest,
838                         SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
839                                          A->getStepRecurrence(SE),
840                                          A->getLoop(),
841                                          A->getNoWrapFlags(SCEV::FlagNW)));
842  }
843  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
844    Base = A->getOperand(A->getNumOperands()-1);
845    SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
846    NewAddOps.back() = Rest;
847    Rest = SE.getAddExpr(NewAddOps);
848    ExposePointerBase(Base, Rest, SE);
849  }
850}
851
852/// Determine if this is a well-behaved chain of instructions leading back to
853/// the PHI. If so, it may be reused by expanded expressions.
854bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
855                                         const Loop *L) {
856  if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
857      (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
858    return false;
859  // If any of the operands don't dominate the insert position, bail.
860  // Addrec operands are always loop-invariant, so this can only happen
861  // if there are instructions which haven't been hoisted.
862  if (L == IVIncInsertLoop) {
863    for (User::op_iterator OI = IncV->op_begin()+1,
864           OE = IncV->op_end(); OI != OE; ++OI)
865      if (Instruction *OInst = dyn_cast<Instruction>(OI))
866        if (!SE.DT->dominates(OInst, IVIncInsertPos))
867          return false;
868  }
869  // Advance to the next instruction.
870  IncV = dyn_cast<Instruction>(IncV->getOperand(0));
871  if (!IncV)
872    return false;
873
874  if (IncV->mayHaveSideEffects())
875    return false;
876
877  if (IncV != PN)
878    return true;
879
880  return isNormalAddRecExprPHI(PN, IncV, L);
881}
882
883/// getIVIncOperand returns an induction variable increment's induction
884/// variable operand.
885///
886/// If allowScale is set, any type of GEP is allowed as long as the nonIV
887/// operands dominate InsertPos.
888///
889/// If allowScale is not set, ensure that a GEP increment conforms to one of the
890/// simple patterns generated by getAddRecExprPHILiterally and
891/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
892Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
893                                           Instruction *InsertPos,
894                                           bool allowScale) {
895  if (IncV == InsertPos)
896    return nullptr;
897
898  switch (IncV->getOpcode()) {
899  default:
900    return nullptr;
901  // Check for a simple Add/Sub or GEP of a loop invariant step.
902  case Instruction::Add:
903  case Instruction::Sub: {
904    Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
905    if (!OInst || SE.DT->dominates(OInst, InsertPos))
906      return dyn_cast<Instruction>(IncV->getOperand(0));
907    return nullptr;
908  }
909  case Instruction::BitCast:
910    return dyn_cast<Instruction>(IncV->getOperand(0));
911  case Instruction::GetElementPtr:
912    for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
913         I != E; ++I) {
914      if (isa<Constant>(*I))
915        continue;
916      if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
917        if (!SE.DT->dominates(OInst, InsertPos))
918          return nullptr;
919      }
920      if (allowScale) {
921        // allow any kind of GEP as long as it can be hoisted.
922        continue;
923      }
924      // This must be a pointer addition of constants (pretty), which is already
925      // handled, or some number of address-size elements (ugly). Ugly geps
926      // have 2 operands. i1* is used by the expander to represent an
927      // address-size element.
928      if (IncV->getNumOperands() != 2)
929        return nullptr;
930      unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
931      if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
932          && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
933        return nullptr;
934      break;
935    }
936    return dyn_cast<Instruction>(IncV->getOperand(0));
937  }
938}
939
940/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
941/// it available to other uses in this loop. Recursively hoist any operands,
942/// until we reach a value that dominates InsertPos.
943bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
944  if (SE.DT->dominates(IncV, InsertPos))
945      return true;
946
947  // InsertPos must itself dominate IncV so that IncV's new position satisfies
948  // its existing users.
949  if (isa<PHINode>(InsertPos)
950      || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
951    return false;
952
953  // Check that the chain of IV operands leading back to Phi can be hoisted.
954  SmallVector<Instruction*, 4> IVIncs;
955  for(;;) {
956    Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
957    if (!Oper)
958      return false;
959    // IncV is safe to hoist.
960    IVIncs.push_back(IncV);
961    IncV = Oper;
962    if (SE.DT->dominates(IncV, InsertPos))
963      break;
964  }
965  for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
966         E = IVIncs.rend(); I != E; ++I) {
967    (*I)->moveBefore(InsertPos);
968  }
969  return true;
970}
971
972/// Determine if this cyclic phi is in a form that would have been generated by
973/// LSR. We don't care if the phi was actually expanded in this pass, as long
974/// as it is in a low-cost form, for example, no implied multiplication. This
975/// should match any patterns generated by getAddRecExprPHILiterally and
976/// expandAddtoGEP.
977bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
978                                           const Loop *L) {
979  for(Instruction *IVOper = IncV;
980      (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
981                                /*allowScale=*/false));) {
982    if (IVOper == PN)
983      return true;
984  }
985  return false;
986}
987
988/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
989/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
990/// need to materialize IV increments elsewhere to handle difficult situations.
991Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
992                                 Type *ExpandTy, Type *IntTy,
993                                 bool useSubtract) {
994  Value *IncV;
995  // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
996  if (ExpandTy->isPointerTy()) {
997    PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
998    // If the step isn't constant, don't use an implicitly scaled GEP, because
999    // that would require a multiply inside the loop.
1000    if (!isa<ConstantInt>(StepV))
1001      GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1002                                  GEPPtrTy->getAddressSpace());
1003    const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
1004    IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
1005    if (IncV->getType() != PN->getType()) {
1006      IncV = Builder.CreateBitCast(IncV, PN->getType());
1007      rememberInstruction(IncV);
1008    }
1009  } else {
1010    IncV = useSubtract ?
1011      Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1012      Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1013    rememberInstruction(IncV);
1014  }
1015  return IncV;
1016}
1017
1018/// \brief Hoist the addrec instruction chain rooted in the loop phi above the
1019/// position. This routine assumes that this is possible (has been checked).
1020static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
1021                           Instruction *Pos, PHINode *LoopPhi) {
1022  do {
1023    if (DT->dominates(InstToHoist, Pos))
1024      break;
1025    // Make sure the increment is where we want it. But don't move it
1026    // down past a potential existing post-inc user.
1027    InstToHoist->moveBefore(Pos);
1028    Pos = InstToHoist;
1029    InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
1030  } while (InstToHoist != LoopPhi);
1031}
1032
1033/// \brief Check whether we can cheaply express the requested SCEV in terms of
1034/// the available PHI SCEV by truncation and/or invertion of the step.
1035static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1036                                    const SCEVAddRecExpr *Phi,
1037                                    const SCEVAddRecExpr *Requested,
1038                                    bool &InvertStep) {
1039  Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1040  Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1041
1042  if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1043    return false;
1044
1045  // Try truncate it if necessary.
1046  Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1047  if (!Phi)
1048    return false;
1049
1050  // Check whether truncation will help.
1051  if (Phi == Requested) {
1052    InvertStep = false;
1053    return true;
1054  }
1055
1056  // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1057  if (SE.getAddExpr(Requested->getStart(),
1058                    SE.getNegativeSCEV(Requested)) == Phi) {
1059    InvertStep = true;
1060    return true;
1061  }
1062
1063  return false;
1064}
1065
1066/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1067/// the base addrec, which is the addrec without any non-loop-dominating
1068/// values, and return the PHI.
1069PHINode *
1070SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1071                                        const Loop *L,
1072                                        Type *ExpandTy,
1073                                        Type *IntTy,
1074                                        Type *&TruncTy,
1075                                        bool &InvertStep) {
1076  assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1077
1078  // Reuse a previously-inserted PHI, if present.
1079  BasicBlock *LatchBlock = L->getLoopLatch();
1080  if (LatchBlock) {
1081    PHINode *AddRecPhiMatch = nullptr;
1082    Instruction *IncV = nullptr;
1083    TruncTy = nullptr;
1084    InvertStep = false;
1085
1086    // Only try partially matching scevs that need truncation and/or
1087    // step-inversion if we know this loop is outside the current loop.
1088    bool TryNonMatchingSCEV = IVIncInsertLoop &&
1089      SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1090
1091    for (BasicBlock::iterator I = L->getHeader()->begin();
1092         PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1093      if (!SE.isSCEVable(PN->getType()))
1094        continue;
1095
1096      const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
1097      if (!PhiSCEV)
1098        continue;
1099
1100      bool IsMatchingSCEV = PhiSCEV == Normalized;
1101      // We only handle truncation and inversion of phi recurrences for the
1102      // expanded expression if the expanded expression's loop dominates the
1103      // loop we insert to. Check now, so we can bail out early.
1104      if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1105          continue;
1106
1107      Instruction *TempIncV =
1108          cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1109
1110      // Check whether we can reuse this PHI node.
1111      if (LSRMode) {
1112        if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
1113          continue;
1114        if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1115          continue;
1116      } else {
1117        if (!isNormalAddRecExprPHI(PN, TempIncV, L))
1118          continue;
1119      }
1120
1121      // Stop if we have found an exact match SCEV.
1122      if (IsMatchingSCEV) {
1123        IncV = TempIncV;
1124        TruncTy = nullptr;
1125        InvertStep = false;
1126        AddRecPhiMatch = PN;
1127        break;
1128      }
1129
1130      // Try whether the phi can be translated into the requested form
1131      // (truncated and/or offset by a constant).
1132      if ((!TruncTy || InvertStep) &&
1133          canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1134        // Record the phi node. But don't stop we might find an exact match
1135        // later.
1136        AddRecPhiMatch = PN;
1137        IncV = TempIncV;
1138        TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1139      }
1140    }
1141
1142    if (AddRecPhiMatch) {
1143      // Potentially, move the increment. We have made sure in
1144      // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1145      if (L == IVIncInsertLoop)
1146        hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1147
1148      // Ok, the add recurrence looks usable.
1149      // Remember this PHI, even in post-inc mode.
1150      InsertedValues.insert(AddRecPhiMatch);
1151      // Remember the increment.
1152      rememberInstruction(IncV);
1153      return AddRecPhiMatch;
1154    }
1155  }
1156
1157  // Save the original insertion point so we can restore it when we're done.
1158  BuilderType::InsertPointGuard Guard(Builder);
1159
1160  // Another AddRec may need to be recursively expanded below. For example, if
1161  // this AddRec is quadratic, the StepV may itself be an AddRec in this
1162  // loop. Remove this loop from the PostIncLoops set before expanding such
1163  // AddRecs. Otherwise, we cannot find a valid position for the step
1164  // (i.e. StepV can never dominate its loop header).  Ideally, we could do
1165  // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1166  // so it's not worth implementing SmallPtrSet::swap.
1167  PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1168  PostIncLoops.clear();
1169
1170  // Expand code for the start value.
1171  Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1172                                L->getHeader()->begin());
1173
1174  // StartV must be hoisted into L's preheader to dominate the new phi.
1175  assert(!isa<Instruction>(StartV) ||
1176         SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
1177                                  L->getHeader()));
1178
1179  // Expand code for the step value. Do this before creating the PHI so that PHI
1180  // reuse code doesn't see an incomplete PHI.
1181  const SCEV *Step = Normalized->getStepRecurrence(SE);
1182  // If the stride is negative, insert a sub instead of an add for the increment
1183  // (unless it's a constant, because subtracts of constants are canonicalized
1184  // to adds).
1185  bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1186  if (useSubtract)
1187    Step = SE.getNegativeSCEV(Step);
1188  // Expand the step somewhere that dominates the loop header.
1189  Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1190
1191  // Create the PHI.
1192  BasicBlock *Header = L->getHeader();
1193  Builder.SetInsertPoint(Header, Header->begin());
1194  pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1195  PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1196                                  Twine(IVName) + ".iv");
1197  rememberInstruction(PN);
1198
1199  // Create the step instructions and populate the PHI.
1200  for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1201    BasicBlock *Pred = *HPI;
1202
1203    // Add a start value.
1204    if (!L->contains(Pred)) {
1205      PN->addIncoming(StartV, Pred);
1206      continue;
1207    }
1208
1209    // Create a step value and add it to the PHI.
1210    // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1211    // instructions at IVIncInsertPos.
1212    Instruction *InsertPos = L == IVIncInsertLoop ?
1213      IVIncInsertPos : Pred->getTerminator();
1214    Builder.SetInsertPoint(InsertPos);
1215    Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1216    if (isa<OverflowingBinaryOperator>(IncV)) {
1217      if (Normalized->getNoWrapFlags(SCEV::FlagNUW))
1218        cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1219      if (Normalized->getNoWrapFlags(SCEV::FlagNSW))
1220        cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1221    }
1222    PN->addIncoming(IncV, Pred);
1223  }
1224
1225  // After expanding subexpressions, restore the PostIncLoops set so the caller
1226  // can ensure that IVIncrement dominates the current uses.
1227  PostIncLoops = SavedPostIncLoops;
1228
1229  // Remember this PHI, even in post-inc mode.
1230  InsertedValues.insert(PN);
1231
1232  return PN;
1233}
1234
1235Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1236  Type *STy = S->getType();
1237  Type *IntTy = SE.getEffectiveSCEVType(STy);
1238  const Loop *L = S->getLoop();
1239
1240  // Determine a normalized form of this expression, which is the expression
1241  // before any post-inc adjustment is made.
1242  const SCEVAddRecExpr *Normalized = S;
1243  if (PostIncLoops.count(L)) {
1244    PostIncLoopSet Loops;
1245    Loops.insert(L);
1246    Normalized =
1247      cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
1248                                                  nullptr, Loops, SE, *SE.DT));
1249  }
1250
1251  // Strip off any non-loop-dominating component from the addrec start.
1252  const SCEV *Start = Normalized->getStart();
1253  const SCEV *PostLoopOffset = nullptr;
1254  if (!SE.properlyDominates(Start, L->getHeader())) {
1255    PostLoopOffset = Start;
1256    Start = SE.getConstant(Normalized->getType(), 0);
1257    Normalized = cast<SCEVAddRecExpr>(
1258      SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1259                       Normalized->getLoop(),
1260                       Normalized->getNoWrapFlags(SCEV::FlagNW)));
1261  }
1262
1263  // Strip off any non-loop-dominating component from the addrec step.
1264  const SCEV *Step = Normalized->getStepRecurrence(SE);
1265  const SCEV *PostLoopScale = nullptr;
1266  if (!SE.dominates(Step, L->getHeader())) {
1267    PostLoopScale = Step;
1268    Step = SE.getConstant(Normalized->getType(), 1);
1269    Normalized =
1270      cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1271                             Start, Step, Normalized->getLoop(),
1272                             Normalized->getNoWrapFlags(SCEV::FlagNW)));
1273  }
1274
1275  // Expand the core addrec. If we need post-loop scaling, force it to
1276  // expand to an integer type to avoid the need for additional casting.
1277  Type *ExpandTy = PostLoopScale ? IntTy : STy;
1278  // In some cases, we decide to reuse an existing phi node but need to truncate
1279  // it and/or invert the step.
1280  Type *TruncTy = nullptr;
1281  bool InvertStep = false;
1282  PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
1283                                          TruncTy, InvertStep);
1284
1285  // Accommodate post-inc mode, if necessary.
1286  Value *Result;
1287  if (!PostIncLoops.count(L))
1288    Result = PN;
1289  else {
1290    // In PostInc mode, use the post-incremented value.
1291    BasicBlock *LatchBlock = L->getLoopLatch();
1292    assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1293    Result = PN->getIncomingValueForBlock(LatchBlock);
1294
1295    // For an expansion to use the postinc form, the client must call
1296    // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1297    // or dominated by IVIncInsertPos.
1298    if (isa<Instruction>(Result)
1299        && !SE.DT->dominates(cast<Instruction>(Result),
1300                             Builder.GetInsertPoint())) {
1301      // The induction variable's postinc expansion does not dominate this use.
1302      // IVUsers tries to prevent this case, so it is rare. However, it can
1303      // happen when an IVUser outside the loop is not dominated by the latch
1304      // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1305      // all cases. Consider a phi outide whose operand is replaced during
1306      // expansion with the value of the postinc user. Without fundamentally
1307      // changing the way postinc users are tracked, the only remedy is
1308      // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1309      // but hopefully expandCodeFor handles that.
1310      bool useSubtract =
1311        !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1312      if (useSubtract)
1313        Step = SE.getNegativeSCEV(Step);
1314      Value *StepV;
1315      {
1316        // Expand the step somewhere that dominates the loop header.
1317        BuilderType::InsertPointGuard Guard(Builder);
1318        StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1319      }
1320      Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1321    }
1322  }
1323
1324  // We have decided to reuse an induction variable of a dominating loop. Apply
1325  // truncation and/or invertion of the step.
1326  if (TruncTy) {
1327    Type *ResTy = Result->getType();
1328    // Normalize the result type.
1329    if (ResTy != SE.getEffectiveSCEVType(ResTy))
1330      Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1331    // Truncate the result.
1332    if (TruncTy != Result->getType()) {
1333      Result = Builder.CreateTrunc(Result, TruncTy);
1334      rememberInstruction(Result);
1335    }
1336    // Invert the result.
1337    if (InvertStep) {
1338      Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
1339                                 Result);
1340      rememberInstruction(Result);
1341    }
1342  }
1343
1344  // Re-apply any non-loop-dominating scale.
1345  if (PostLoopScale) {
1346    assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1347    Result = InsertNoopCastOfTo(Result, IntTy);
1348    Result = Builder.CreateMul(Result,
1349                               expandCodeFor(PostLoopScale, IntTy));
1350    rememberInstruction(Result);
1351  }
1352
1353  // Re-apply any non-loop-dominating offset.
1354  if (PostLoopOffset) {
1355    if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1356      const SCEV *const OffsetArray[1] = { PostLoopOffset };
1357      Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1358    } else {
1359      Result = InsertNoopCastOfTo(Result, IntTy);
1360      Result = Builder.CreateAdd(Result,
1361                                 expandCodeFor(PostLoopOffset, IntTy));
1362      rememberInstruction(Result);
1363    }
1364  }
1365
1366  return Result;
1367}
1368
1369Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1370  if (!CanonicalMode) return expandAddRecExprLiterally(S);
1371
1372  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1373  const Loop *L = S->getLoop();
1374
1375  // First check for an existing canonical IV in a suitable type.
1376  PHINode *CanonicalIV = nullptr;
1377  if (PHINode *PN = L->getCanonicalInductionVariable())
1378    if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1379      CanonicalIV = PN;
1380
1381  // Rewrite an AddRec in terms of the canonical induction variable, if
1382  // its type is more narrow.
1383  if (CanonicalIV &&
1384      SE.getTypeSizeInBits(CanonicalIV->getType()) >
1385      SE.getTypeSizeInBits(Ty)) {
1386    SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1387    for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1388      NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1389    Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1390                                       S->getNoWrapFlags(SCEV::FlagNW)));
1391    BasicBlock::iterator NewInsertPt =
1392      std::next(BasicBlock::iterator(cast<Instruction>(V)));
1393    BuilderType::InsertPointGuard Guard(Builder);
1394    while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1395           isa<LandingPadInst>(NewInsertPt))
1396      ++NewInsertPt;
1397    V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1398                      NewInsertPt);
1399    return V;
1400  }
1401
1402  // {X,+,F} --> X + {0,+,F}
1403  if (!S->getStart()->isZero()) {
1404    SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1405    NewOps[0] = SE.getConstant(Ty, 0);
1406    const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1407                                        S->getNoWrapFlags(SCEV::FlagNW));
1408
1409    // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1410    // comments on expandAddToGEP for details.
1411    const SCEV *Base = S->getStart();
1412    const SCEV *RestArray[1] = { Rest };
1413    // Dig into the expression to find the pointer base for a GEP.
1414    ExposePointerBase(Base, RestArray[0], SE);
1415    // If we found a pointer, expand the AddRec with a GEP.
1416    if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1417      // Make sure the Base isn't something exotic, such as a multiplied
1418      // or divided pointer value. In those cases, the result type isn't
1419      // actually a pointer type.
1420      if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1421        Value *StartV = expand(Base);
1422        assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1423        return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1424      }
1425    }
1426
1427    // Just do a normal add. Pre-expand the operands to suppress folding.
1428    return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1429                                SE.getUnknown(expand(Rest))));
1430  }
1431
1432  // If we don't yet have a canonical IV, create one.
1433  if (!CanonicalIV) {
1434    // Create and insert the PHI node for the induction variable in the
1435    // specified loop.
1436    BasicBlock *Header = L->getHeader();
1437    pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1438    CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1439                                  Header->begin());
1440    rememberInstruction(CanonicalIV);
1441
1442    SmallSet<BasicBlock *, 4> PredSeen;
1443    Constant *One = ConstantInt::get(Ty, 1);
1444    for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1445      BasicBlock *HP = *HPI;
1446      if (!PredSeen.insert(HP))
1447        continue;
1448
1449      if (L->contains(HP)) {
1450        // Insert a unit add instruction right before the terminator
1451        // corresponding to the back-edge.
1452        Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1453                                                     "indvar.next",
1454                                                     HP->getTerminator());
1455        Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1456        rememberInstruction(Add);
1457        CanonicalIV->addIncoming(Add, HP);
1458      } else {
1459        CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1460      }
1461    }
1462  }
1463
1464  // {0,+,1} --> Insert a canonical induction variable into the loop!
1465  if (S->isAffine() && S->getOperand(1)->isOne()) {
1466    assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1467           "IVs with types different from the canonical IV should "
1468           "already have been handled!");
1469    return CanonicalIV;
1470  }
1471
1472  // {0,+,F} --> {0,+,1} * F
1473
1474  // If this is a simple linear addrec, emit it now as a special case.
1475  if (S->isAffine())    // {0,+,F} --> i*F
1476    return
1477      expand(SE.getTruncateOrNoop(
1478        SE.getMulExpr(SE.getUnknown(CanonicalIV),
1479                      SE.getNoopOrAnyExtend(S->getOperand(1),
1480                                            CanonicalIV->getType())),
1481        Ty));
1482
1483  // If this is a chain of recurrences, turn it into a closed form, using the
1484  // folders, then expandCodeFor the closed form.  This allows the folders to
1485  // simplify the expression without having to build a bunch of special code
1486  // into this folder.
1487  const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1488
1489  // Promote S up to the canonical IV type, if the cast is foldable.
1490  const SCEV *NewS = S;
1491  const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1492  if (isa<SCEVAddRecExpr>(Ext))
1493    NewS = Ext;
1494
1495  const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1496  //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
1497
1498  // Truncate the result down to the original type, if needed.
1499  const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1500  return expand(T);
1501}
1502
1503Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1504  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1505  Value *V = expandCodeFor(S->getOperand(),
1506                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1507  Value *I = Builder.CreateTrunc(V, Ty);
1508  rememberInstruction(I);
1509  return I;
1510}
1511
1512Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1513  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1514  Value *V = expandCodeFor(S->getOperand(),
1515                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1516  Value *I = Builder.CreateZExt(V, Ty);
1517  rememberInstruction(I);
1518  return I;
1519}
1520
1521Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1522  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1523  Value *V = expandCodeFor(S->getOperand(),
1524                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1525  Value *I = Builder.CreateSExt(V, Ty);
1526  rememberInstruction(I);
1527  return I;
1528}
1529
1530Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1531  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1532  Type *Ty = LHS->getType();
1533  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1534    // In the case of mixed integer and pointer types, do the
1535    // rest of the comparisons as integer.
1536    if (S->getOperand(i)->getType() != Ty) {
1537      Ty = SE.getEffectiveSCEVType(Ty);
1538      LHS = InsertNoopCastOfTo(LHS, Ty);
1539    }
1540    Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1541    Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1542    rememberInstruction(ICmp);
1543    Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1544    rememberInstruction(Sel);
1545    LHS = Sel;
1546  }
1547  // In the case of mixed integer and pointer types, cast the
1548  // final result back to the pointer type.
1549  if (LHS->getType() != S->getType())
1550    LHS = InsertNoopCastOfTo(LHS, S->getType());
1551  return LHS;
1552}
1553
1554Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1555  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1556  Type *Ty = LHS->getType();
1557  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1558    // In the case of mixed integer and pointer types, do the
1559    // rest of the comparisons as integer.
1560    if (S->getOperand(i)->getType() != Ty) {
1561      Ty = SE.getEffectiveSCEVType(Ty);
1562      LHS = InsertNoopCastOfTo(LHS, Ty);
1563    }
1564    Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1565    Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1566    rememberInstruction(ICmp);
1567    Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1568    rememberInstruction(Sel);
1569    LHS = Sel;
1570  }
1571  // In the case of mixed integer and pointer types, cast the
1572  // final result back to the pointer type.
1573  if (LHS->getType() != S->getType())
1574    LHS = InsertNoopCastOfTo(LHS, S->getType());
1575  return LHS;
1576}
1577
1578Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1579                                   Instruction *IP) {
1580  Builder.SetInsertPoint(IP->getParent(), IP);
1581  return expandCodeFor(SH, Ty);
1582}
1583
1584Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1585  // Expand the code for this SCEV.
1586  Value *V = expand(SH);
1587  if (Ty) {
1588    assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1589           "non-trivial casts should be done with the SCEVs directly!");
1590    V = InsertNoopCastOfTo(V, Ty);
1591  }
1592  return V;
1593}
1594
1595Value *SCEVExpander::expand(const SCEV *S) {
1596  // Compute an insertion point for this SCEV object. Hoist the instructions
1597  // as far out in the loop nest as possible.
1598  Instruction *InsertPt = Builder.GetInsertPoint();
1599  for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1600       L = L->getParentLoop())
1601    if (SE.isLoopInvariant(S, L)) {
1602      if (!L) break;
1603      if (BasicBlock *Preheader = L->getLoopPreheader())
1604        InsertPt = Preheader->getTerminator();
1605      else {
1606        // LSR sets the insertion point for AddRec start/step values to the
1607        // block start to simplify value reuse, even though it's an invalid
1608        // position. SCEVExpander must correct for this in all cases.
1609        InsertPt = L->getHeader()->getFirstInsertionPt();
1610      }
1611    } else {
1612      // If the SCEV is computable at this level, insert it into the header
1613      // after the PHIs (and after any other instructions that we've inserted
1614      // there) so that it is guaranteed to dominate any user inside the loop.
1615      if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1616        InsertPt = L->getHeader()->getFirstInsertionPt();
1617      while (InsertPt != Builder.GetInsertPoint()
1618             && (isInsertedInstruction(InsertPt)
1619                 || isa<DbgInfoIntrinsic>(InsertPt))) {
1620        InsertPt = std::next(BasicBlock::iterator(InsertPt));
1621      }
1622      break;
1623    }
1624
1625  // Check to see if we already expanded this here.
1626  std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator
1627    I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1628  if (I != InsertedExpressions.end())
1629    return I->second;
1630
1631  BuilderType::InsertPointGuard Guard(Builder);
1632  Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1633
1634  // Expand the expression into instructions.
1635  Value *V = visit(S);
1636
1637  // Remember the expanded value for this SCEV at this location.
1638  //
1639  // This is independent of PostIncLoops. The mapped value simply materializes
1640  // the expression at this insertion point. If the mapped value happened to be
1641  // a postinc expansion, it could be reused by a non-postinc user, but only if
1642  // its insertion point was already at the head of the loop.
1643  InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1644  return V;
1645}
1646
1647void SCEVExpander::rememberInstruction(Value *I) {
1648  if (!PostIncLoops.empty())
1649    InsertedPostIncValues.insert(I);
1650  else
1651    InsertedValues.insert(I);
1652}
1653
1654/// getOrInsertCanonicalInductionVariable - This method returns the
1655/// canonical induction variable of the specified type for the specified
1656/// loop (inserting one if there is none).  A canonical induction variable
1657/// starts at zero and steps by one on each iteration.
1658PHINode *
1659SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1660                                                    Type *Ty) {
1661  assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1662
1663  // Build a SCEV for {0,+,1}<L>.
1664  // Conservatively use FlagAnyWrap for now.
1665  const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1666                                   SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1667
1668  // Emit code for it.
1669  BuilderType::InsertPointGuard Guard(Builder);
1670  PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
1671                                           L->getHeader()->begin()));
1672
1673  return V;
1674}
1675
1676/// replaceCongruentIVs - Check for congruent phis in this loop header and
1677/// replace them with their most canonical representative. Return the number of
1678/// phis eliminated.
1679///
1680/// This does not depend on any SCEVExpander state but should be used in
1681/// the same context that SCEVExpander is used.
1682unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1683                                           SmallVectorImpl<WeakVH> &DeadInsts,
1684                                           const TargetTransformInfo *TTI) {
1685  // Find integer phis in order of increasing width.
1686  SmallVector<PHINode*, 8> Phis;
1687  for (BasicBlock::iterator I = L->getHeader()->begin();
1688       PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
1689    Phis.push_back(Phi);
1690  }
1691  if (TTI)
1692    std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
1693      // Put pointers at the back and make sure pointer < pointer = false.
1694      if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1695        return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1696      return RHS->getType()->getPrimitiveSizeInBits() <
1697             LHS->getType()->getPrimitiveSizeInBits();
1698    });
1699
1700  unsigned NumElim = 0;
1701  DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1702  // Process phis from wide to narrow. Mapping wide phis to the their truncation
1703  // so narrow phis can reuse them.
1704  for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
1705         PEnd = Phis.end(); PIter != PEnd; ++PIter) {
1706    PHINode *Phi = *PIter;
1707
1708    // Fold constant phis. They may be congruent to other constant phis and
1709    // would confuse the logic below that expects proper IVs.
1710    if (Value *V = SimplifyInstruction(Phi, SE.DL, SE.TLI, SE.DT)) {
1711      Phi->replaceAllUsesWith(V);
1712      DeadInsts.push_back(Phi);
1713      ++NumElim;
1714      DEBUG_WITH_TYPE(DebugType, dbgs()
1715                      << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1716      continue;
1717    }
1718
1719    if (!SE.isSCEVable(Phi->getType()))
1720      continue;
1721
1722    PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1723    if (!OrigPhiRef) {
1724      OrigPhiRef = Phi;
1725      if (Phi->getType()->isIntegerTy() && TTI
1726          && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1727        // This phi can be freely truncated to the narrowest phi type. Map the
1728        // truncated expression to it so it will be reused for narrow types.
1729        const SCEV *TruncExpr =
1730          SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1731        ExprToIVMap[TruncExpr] = Phi;
1732      }
1733      continue;
1734    }
1735
1736    // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1737    // sense.
1738    if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1739      continue;
1740
1741    if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1742      Instruction *OrigInc =
1743        cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1744      Instruction *IsomorphicInc =
1745        cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1746
1747      // If this phi has the same width but is more canonical, replace the
1748      // original with it. As part of the "more canonical" determination,
1749      // respect a prior decision to use an IV chain.
1750      if (OrigPhiRef->getType() == Phi->getType()
1751          && !(ChainedPhis.count(Phi)
1752               || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
1753          && (ChainedPhis.count(Phi)
1754              || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1755        std::swap(OrigPhiRef, Phi);
1756        std::swap(OrigInc, IsomorphicInc);
1757      }
1758      // Replacing the congruent phi is sufficient because acyclic redundancy
1759      // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1760      // that a phi is congruent, it's often the head of an IV user cycle that
1761      // is isomorphic with the original phi. It's worth eagerly cleaning up the
1762      // common case of a single IV increment so that DeleteDeadPHIs can remove
1763      // cycles that had postinc uses.
1764      const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1765                                                   IsomorphicInc->getType());
1766      if (OrigInc != IsomorphicInc
1767          && TruncExpr == SE.getSCEV(IsomorphicInc)
1768          && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
1769              || hoistIVInc(OrigInc, IsomorphicInc))) {
1770        DEBUG_WITH_TYPE(DebugType, dbgs()
1771                        << "INDVARS: Eliminated congruent iv.inc: "
1772                        << *IsomorphicInc << '\n');
1773        Value *NewInc = OrigInc;
1774        if (OrigInc->getType() != IsomorphicInc->getType()) {
1775          Instruction *IP = isa<PHINode>(OrigInc)
1776            ? (Instruction*)L->getHeader()->getFirstInsertionPt()
1777            : OrigInc->getNextNode();
1778          IRBuilder<> Builder(IP);
1779          Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1780          NewInc = Builder.
1781            CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1782        }
1783        IsomorphicInc->replaceAllUsesWith(NewInc);
1784        DeadInsts.push_back(IsomorphicInc);
1785      }
1786    }
1787    DEBUG_WITH_TYPE(DebugType, dbgs()
1788                    << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1789    ++NumElim;
1790    Value *NewIV = OrigPhiRef;
1791    if (OrigPhiRef->getType() != Phi->getType()) {
1792      IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
1793      Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1794      NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1795    }
1796    Phi->replaceAllUsesWith(NewIV);
1797    DeadInsts.push_back(Phi);
1798  }
1799  return NumElim;
1800}
1801
1802namespace {
1803// Search for a SCEV subexpression that is not safe to expand.  Any expression
1804// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
1805// UDiv expressions. We don't know if the UDiv is derived from an IR divide
1806// instruction, but the important thing is that we prove the denominator is
1807// nonzero before expansion.
1808//
1809// IVUsers already checks that IV-derived expressions are safe. So this check is
1810// only needed when the expression includes some subexpression that is not IV
1811// derived.
1812//
1813// Currently, we only allow division by a nonzero constant here. If this is
1814// inadequate, we could easily allow division by SCEVUnknown by using
1815// ValueTracking to check isKnownNonZero().
1816//
1817// We cannot generally expand recurrences unless the step dominates the loop
1818// header. The expander handles the special case of affine recurrences by
1819// scaling the recurrence outside the loop, but this technique isn't generally
1820// applicable. Expanding a nested recurrence outside a loop requires computing
1821// binomial coefficients. This could be done, but the recurrence has to be in a
1822// perfectly reduced form, which can't be guaranteed.
1823struct SCEVFindUnsafe {
1824  ScalarEvolution &SE;
1825  bool IsUnsafe;
1826
1827  SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
1828
1829  bool follow(const SCEV *S) {
1830    if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
1831      const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
1832      if (!SC || SC->getValue()->isZero()) {
1833        IsUnsafe = true;
1834        return false;
1835      }
1836    }
1837    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
1838      const SCEV *Step = AR->getStepRecurrence(SE);
1839      if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
1840        IsUnsafe = true;
1841        return false;
1842      }
1843    }
1844    return true;
1845  }
1846  bool isDone() const { return IsUnsafe; }
1847};
1848}
1849
1850namespace llvm {
1851bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
1852  SCEVFindUnsafe Search(SE);
1853  visitAll(S, Search);
1854  return !Search.IsUnsafe;
1855}
1856}
1857