ScalarEvolutionExpander.cpp revision 667d787c0a21cf3f5dfcde03ca471162ba35b614
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/Analysis/LoopInfo.h" 18#include "llvm/Target/TargetData.h" 19#include "llvm/ADT/STLExtras.h" 20using namespace llvm; 21 22/// InsertCastOfTo - Insert a cast of V to the specified type, doing what 23/// we can to share the casts. 24Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V, 25 const Type *Ty) { 26 // Short-circuit unnecessary bitcasts. 27 if (opcode == Instruction::BitCast && V->getType() == Ty) 28 return V; 29 30 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 31 if ((opcode == Instruction::PtrToInt || opcode == Instruction::IntToPtr) && 32 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 33 if (CastInst *CI = dyn_cast<CastInst>(V)) 34 if ((CI->getOpcode() == Instruction::PtrToInt || 35 CI->getOpcode() == Instruction::IntToPtr) && 36 SE.getTypeSizeInBits(CI->getType()) == 37 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 38 return CI->getOperand(0); 39 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 40 if ((CE->getOpcode() == Instruction::PtrToInt || 41 CE->getOpcode() == Instruction::IntToPtr) && 42 SE.getTypeSizeInBits(CE->getType()) == 43 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 44 return CE->getOperand(0); 45 } 46 47 // FIXME: keep track of the cast instruction. 48 if (Constant *C = dyn_cast<Constant>(V)) 49 return ConstantExpr::getCast(opcode, C, Ty); 50 51 if (Argument *A = dyn_cast<Argument>(V)) { 52 // Check to see if there is already a cast! 53 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); 54 UI != E; ++UI) 55 if ((*UI)->getType() == Ty) 56 if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI))) 57 if (CI->getOpcode() == opcode) { 58 // If the cast isn't the first instruction of the function, move it. 59 if (BasicBlock::iterator(CI) != 60 A->getParent()->getEntryBlock().begin()) { 61 // Recreate the cast at the beginning of the entry block. 62 // The old cast is left in place in case it is being used 63 // as an insert point. 64 Instruction *NewCI = 65 CastInst::Create(opcode, V, Ty, "", 66 A->getParent()->getEntryBlock().begin()); 67 NewCI->takeName(CI); 68 CI->replaceAllUsesWith(NewCI); 69 return NewCI; 70 } 71 return CI; 72 } 73 74 Instruction *I = CastInst::Create(opcode, V, Ty, V->getName(), 75 A->getParent()->getEntryBlock().begin()); 76 InsertedValues.insert(I); 77 return I; 78 } 79 80 Instruction *I = cast<Instruction>(V); 81 82 // Check to see if there is already a cast. If there is, use it. 83 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 84 UI != E; ++UI) { 85 if ((*UI)->getType() == Ty) 86 if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI))) 87 if (CI->getOpcode() == opcode) { 88 BasicBlock::iterator It = I; ++It; 89 if (isa<InvokeInst>(I)) 90 It = cast<InvokeInst>(I)->getNormalDest()->begin(); 91 while (isa<PHINode>(It)) ++It; 92 if (It != BasicBlock::iterator(CI)) { 93 // Recreate the cast at the beginning of the entry block. 94 // The old cast is left in place in case it is being used 95 // as an insert point. 96 Instruction *NewCI = CastInst::Create(opcode, V, Ty, "", It); 97 NewCI->takeName(CI); 98 CI->replaceAllUsesWith(NewCI); 99 return NewCI; 100 } 101 return CI; 102 } 103 } 104 BasicBlock::iterator IP = I; ++IP; 105 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 106 IP = II->getNormalDest()->begin(); 107 while (isa<PHINode>(IP)) ++IP; 108 Instruction *CI = CastInst::Create(opcode, V, Ty, V->getName(), IP); 109 InsertedValues.insert(CI); 110 return CI; 111} 112 113/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 114/// which must be possible with a noop cast. 115Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) { 116 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 117 assert((Op == Instruction::BitCast || 118 Op == Instruction::PtrToInt || 119 Op == Instruction::IntToPtr) && 120 "InsertNoopCastOfTo cannot perform non-noop casts!"); 121 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 122 "InsertNoopCastOfTo cannot change sizes!"); 123 return InsertCastOfTo(Op, V, Ty); 124} 125 126/// InsertBinop - Insert the specified binary operator, doing a small amount 127/// of work to avoid inserting an obviously redundant operation. 128Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, 129 Value *RHS, BasicBlock::iterator InsertPt) { 130 // Fold a binop with constant operands. 131 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 132 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 133 return ConstantExpr::get(Opcode, CLHS, CRHS); 134 135 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 136 unsigned ScanLimit = 6; 137 BasicBlock::iterator BlockBegin = InsertPt->getParent()->begin(); 138 if (InsertPt != BlockBegin) { 139 // Scanning starts from the last instruction before InsertPt. 140 BasicBlock::iterator IP = InsertPt; 141 --IP; 142 for (; ScanLimit; --IP, --ScanLimit) { 143 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 144 IP->getOperand(1) == RHS) 145 return IP; 146 if (IP == BlockBegin) break; 147 } 148 } 149 150 // If we haven't found this binop, insert it. 151 Instruction *BO = BinaryOperator::Create(Opcode, LHS, RHS, "tmp", InsertPt); 152 InsertedValues.insert(BO); 153 return BO; 154} 155 156/// FactorOutConstant - Test if S is divisible by Factor, using signed 157/// division. If so, update S with Factor divided out and return true. 158/// S need not be evenly divisble if a reasonable remainder can be 159/// computed. 160/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 161/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 162/// check to see if the divide was folded. 163static bool FactorOutConstant(const SCEV* &S, 164 const SCEV* &Remainder, 165 const APInt &Factor, 166 ScalarEvolution &SE) { 167 // Everything is divisible by one. 168 if (Factor == 1) 169 return true; 170 171 // For a Constant, check for a multiple of the given factor. 172 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 173 ConstantInt *CI = 174 ConstantInt::get(C->getValue()->getValue().sdiv(Factor)); 175 // If the quotient is zero and the remainder is non-zero, reject 176 // the value at this scale. It will be considered for subsequent 177 // smaller scales. 178 if (C->isZero() || !CI->isZero()) { 179 const SCEV* Div = SE.getConstant(CI); 180 S = Div; 181 Remainder = 182 SE.getAddExpr(Remainder, 183 SE.getConstant(C->getValue()->getValue().srem(Factor))); 184 return true; 185 } 186 } 187 188 // In a Mul, check if there is a constant operand which is a multiple 189 // of the given factor. 190 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) 191 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 192 if (!C->getValue()->getValue().srem(Factor)) { 193 const SmallVectorImpl<const SCEV*> &MOperands = M->getOperands(); 194 SmallVector<const SCEV*, 4> NewMulOps(MOperands.begin(), MOperands.end()); 195 NewMulOps[0] = 196 SE.getConstant(C->getValue()->getValue().sdiv(Factor)); 197 S = SE.getMulExpr(NewMulOps); 198 return true; 199 } 200 201 // In an AddRec, check if both start and step are divisible. 202 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 203 const SCEV* Step = A->getStepRecurrence(SE); 204 const SCEV* StepRem = SE.getIntegerSCEV(0, Step->getType()); 205 if (!FactorOutConstant(Step, StepRem, Factor, SE)) 206 return false; 207 if (!StepRem->isZero()) 208 return false; 209 const SCEV* Start = A->getStart(); 210 if (!FactorOutConstant(Start, Remainder, Factor, SE)) 211 return false; 212 S = SE.getAddRecExpr(Start, Step, A->getLoop()); 213 return true; 214 } 215 216 return false; 217} 218 219/// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP 220/// instead of using ptrtoint+arithmetic+inttoptr. This helps 221/// BasicAliasAnalysis analyze the result. However, it suffers from the 222/// underlying bug described in PR2831. Addition in LLVM currently always 223/// has two's complement wrapping guaranteed. However, the semantics for 224/// getelementptr overflow are ambiguous. In the common case though, this 225/// expansion gets used when a GEP in the original code has been converted 226/// into integer arithmetic, in which case the resulting code will be no 227/// more undefined than it was originally. 228/// 229/// Design note: It might seem desirable for this function to be more 230/// loop-aware. If some of the indices are loop-invariant while others 231/// aren't, it might seem desirable to emit multiple GEPs, keeping the 232/// loop-invariant portions of the overall computation outside the loop. 233/// However, there are a few reasons this is not done here. Hoisting simple 234/// arithmetic is a low-level optimization that often isn't very 235/// important until late in the optimization process. In fact, passes 236/// like InstructionCombining will combine GEPs, even if it means 237/// pushing loop-invariant computation down into loops, so even if the 238/// GEPs were split here, the work would quickly be undone. The 239/// LoopStrengthReduction pass, which is usually run quite late (and 240/// after the last InstructionCombining pass), takes care of hoisting 241/// loop-invariant portions of expressions, after considering what 242/// can be folded using target addressing modes. 243/// 244Value *SCEVExpander::expandAddToGEP(const SCEV* const *op_begin, 245 const SCEV* const *op_end, 246 const PointerType *PTy, 247 const Type *Ty, 248 Value *V) { 249 const Type *ElTy = PTy->getElementType(); 250 SmallVector<Value *, 4> GepIndices; 251 SmallVector<const SCEV*, 8> Ops(op_begin, op_end); 252 bool AnyNonZeroIndices = false; 253 254 // Decend down the pointer's type and attempt to convert the other 255 // operands into GEP indices, at each level. The first index in a GEP 256 // indexes into the array implied by the pointer operand; the rest of 257 // the indices index into the element or field type selected by the 258 // preceding index. 259 for (;;) { 260 APInt ElSize = APInt(SE.getTypeSizeInBits(Ty), 261 ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0); 262 SmallVector<const SCEV*, 8> NewOps; 263 SmallVector<const SCEV*, 8> ScaledOps; 264 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 265 // Split AddRecs up into parts as either of the parts may be usable 266 // without the other. 267 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) 268 if (!A->getStart()->isZero()) { 269 const SCEV* Start = A->getStart(); 270 Ops.push_back(SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()), 271 A->getStepRecurrence(SE), 272 A->getLoop())); 273 Ops[i] = Start; 274 ++e; 275 } 276 // If the scale size is not 0, attempt to factor out a scale. 277 if (ElSize != 0) { 278 const SCEV* Op = Ops[i]; 279 const SCEV* Remainder = SE.getIntegerSCEV(0, Op->getType()); 280 if (FactorOutConstant(Op, Remainder, ElSize, SE)) { 281 ScaledOps.push_back(Op); // Op now has ElSize factored out. 282 NewOps.push_back(Remainder); 283 continue; 284 } 285 } 286 // If the operand was not divisible, add it to the list of operands 287 // we'll scan next iteration. 288 NewOps.push_back(Ops[i]); 289 } 290 Ops = NewOps; 291 AnyNonZeroIndices |= !ScaledOps.empty(); 292 Value *Scaled = ScaledOps.empty() ? 293 Constant::getNullValue(Ty) : 294 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 295 GepIndices.push_back(Scaled); 296 297 // Collect struct field index operands. 298 if (!Ops.empty()) 299 while (const StructType *STy = dyn_cast<StructType>(ElTy)) { 300 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 301 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 302 const StructLayout &SL = *SE.TD->getStructLayout(STy); 303 uint64_t FullOffset = C->getValue()->getZExtValue(); 304 if (FullOffset < SL.getSizeInBytes()) { 305 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 306 GepIndices.push_back(ConstantInt::get(Type::Int32Ty, ElIdx)); 307 ElTy = STy->getTypeAtIndex(ElIdx); 308 Ops[0] = 309 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 310 AnyNonZeroIndices = true; 311 continue; 312 } 313 } 314 break; 315 } 316 317 if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) { 318 ElTy = ATy->getElementType(); 319 continue; 320 } 321 break; 322 } 323 324 // If none of the operands were convertable to proper GEP indices, cast 325 // the base to i8* and do an ugly getelementptr with that. It's still 326 // better than ptrtoint+arithmetic+inttoptr at least. 327 if (!AnyNonZeroIndices) { 328 V = InsertNoopCastOfTo(V, 329 Type::Int8Ty->getPointerTo(PTy->getAddressSpace())); 330 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 331 332 // Fold a GEP with constant operands. 333 if (Constant *CLHS = dyn_cast<Constant>(V)) 334 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 335 return ConstantExpr::getGetElementPtr(CLHS, &CRHS, 1); 336 337 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 338 unsigned ScanLimit = 6; 339 BasicBlock::iterator BlockBegin = InsertPt->getParent()->begin(); 340 if (InsertPt != BlockBegin) { 341 // Scanning starts from the last instruction before InsertPt. 342 BasicBlock::iterator IP = InsertPt; 343 --IP; 344 for (; ScanLimit; --IP, --ScanLimit) { 345 if (IP->getOpcode() == Instruction::GetElementPtr && 346 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 347 return IP; 348 if (IP == BlockBegin) break; 349 } 350 } 351 352 Value *GEP = GetElementPtrInst::Create(V, Idx, "scevgep", InsertPt); 353 InsertedValues.insert(GEP); 354 return GEP; 355 } 356 357 // Insert a pretty getelementptr. 358 Value *GEP = GetElementPtrInst::Create(V, 359 GepIndices.begin(), 360 GepIndices.end(), 361 "scevgep", InsertPt); 362 Ops.push_back(SE.getUnknown(GEP)); 363 InsertedValues.insert(GEP); 364 return expand(SE.getAddExpr(Ops)); 365} 366 367Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 368 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 369 Value *V = expand(S->getOperand(S->getNumOperands()-1)); 370 371 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 372 // comments on expandAddToGEP for details. 373 if (SE.TD) 374 if (const PointerType *PTy = dyn_cast<PointerType>(V->getType())) { 375 const SmallVectorImpl<const SCEV*> &Ops = S->getOperands(); 376 return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1], 377 PTy, Ty, V); 378 } 379 380 V = InsertNoopCastOfTo(V, Ty); 381 382 // Emit a bunch of add instructions 383 for (int i = S->getNumOperands()-2; i >= 0; --i) { 384 Value *W = expandCodeFor(S->getOperand(i), Ty); 385 V = InsertBinop(Instruction::Add, V, W, InsertPt); 386 } 387 return V; 388} 389 390Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 391 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 392 int FirstOp = 0; // Set if we should emit a subtract. 393 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0))) 394 if (SC->getValue()->isAllOnesValue()) 395 FirstOp = 1; 396 397 int i = S->getNumOperands()-2; 398 Value *V = expandCodeFor(S->getOperand(i+1), Ty); 399 400 // Emit a bunch of multiply instructions 401 for (; i >= FirstOp; --i) { 402 Value *W = expandCodeFor(S->getOperand(i), Ty); 403 V = InsertBinop(Instruction::Mul, V, W, InsertPt); 404 } 405 406 // -1 * ... ---> 0 - ... 407 if (FirstOp == 1) 408 V = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), V, InsertPt); 409 return V; 410} 411 412Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 413 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 414 415 Value *LHS = expandCodeFor(S->getLHS(), Ty); 416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 417 const APInt &RHS = SC->getValue()->getValue(); 418 if (RHS.isPowerOf2()) 419 return InsertBinop(Instruction::LShr, LHS, 420 ConstantInt::get(Ty, RHS.logBase2()), 421 InsertPt); 422 } 423 424 Value *RHS = expandCodeFor(S->getRHS(), Ty); 425 return InsertBinop(Instruction::UDiv, LHS, RHS, InsertPt); 426} 427 428/// Move parts of Base into Rest to leave Base with the minimal 429/// expression that provides a pointer operand suitable for a 430/// GEP expansion. 431static void ExposePointerBase(const SCEV* &Base, const SCEV* &Rest, 432 ScalarEvolution &SE) { 433 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 434 Base = A->getStart(); 435 Rest = SE.getAddExpr(Rest, 436 SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()), 437 A->getStepRecurrence(SE), 438 A->getLoop())); 439 } 440 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 441 Base = A->getOperand(A->getNumOperands()-1); 442 SmallVector<const SCEV*, 8> NewAddOps(A->op_begin(), A->op_end()); 443 NewAddOps.back() = Rest; 444 Rest = SE.getAddExpr(NewAddOps); 445 ExposePointerBase(Base, Rest, SE); 446 } 447} 448 449Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 450 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 451 const Loop *L = S->getLoop(); 452 453 // First check for an existing canonical IV in a suitable type. 454 PHINode *CanonicalIV = 0; 455 if (PHINode *PN = L->getCanonicalInductionVariable()) 456 if (SE.isSCEVable(PN->getType()) && 457 isa<IntegerType>(SE.getEffectiveSCEVType(PN->getType())) && 458 SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 459 CanonicalIV = PN; 460 461 // Rewrite an AddRec in terms of the canonical induction variable, if 462 // its type is more narrow. 463 if (CanonicalIV && 464 SE.getTypeSizeInBits(CanonicalIV->getType()) > 465 SE.getTypeSizeInBits(Ty)) { 466 const SCEV* Start = SE.getAnyExtendExpr(S->getStart(), 467 CanonicalIV->getType()); 468 const SCEV* Step = SE.getAnyExtendExpr(S->getStepRecurrence(SE), 469 CanonicalIV->getType()); 470 Value *V = expand(SE.getAddRecExpr(Start, Step, S->getLoop())); 471 BasicBlock::iterator SaveInsertPt = InsertPt; 472 BasicBlock::iterator NewInsertPt = 473 next(BasicBlock::iterator(cast<Instruction>(V))); 474 while (isa<PHINode>(NewInsertPt)) ++NewInsertPt; 475 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 476 NewInsertPt); 477 InsertPt = SaveInsertPt; 478 return V; 479 } 480 481 // {X,+,F} --> X + {0,+,F} 482 if (!S->getStart()->isZero()) { 483 const SmallVectorImpl<const SCEV*> &SOperands = S->getOperands(); 484 SmallVector<const SCEV*, 4> NewOps(SOperands.begin(), SOperands.end()); 485 NewOps[0] = SE.getIntegerSCEV(0, Ty); 486 const SCEV* Rest = SE.getAddRecExpr(NewOps, L); 487 488 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 489 // comments on expandAddToGEP for details. 490 if (SE.TD) { 491 const SCEV* Base = S->getStart(); 492 const SCEV* RestArray[1] = { Rest }; 493 // Dig into the expression to find the pointer base for a GEP. 494 ExposePointerBase(Base, RestArray[0], SE); 495 // If we found a pointer, expand the AddRec with a GEP. 496 if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 497 // Make sure the Base isn't something exotic, such as a multiplied 498 // or divided pointer value. In those cases, the result type isn't 499 // actually a pointer type. 500 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 501 Value *StartV = expand(Base); 502 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 503 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 504 } 505 } 506 } 507 508 // Just do a normal add. Pre-expand the operands to suppress folding. 509 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 510 SE.getUnknown(expand(Rest)))); 511 } 512 513 // {0,+,1} --> Insert a canonical induction variable into the loop! 514 if (S->isAffine() && 515 S->getOperand(1) == SE.getIntegerSCEV(1, Ty)) { 516 // If there's a canonical IV, just use it. 517 if (CanonicalIV) { 518 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 519 "IVs with types different from the canonical IV should " 520 "already have been handled!"); 521 return CanonicalIV; 522 } 523 524 // Create and insert the PHI node for the induction variable in the 525 // specified loop. 526 BasicBlock *Header = L->getHeader(); 527 PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin()); 528 InsertedValues.insert(PN); 529 PN->addIncoming(Constant::getNullValue(Ty), L->getLoopPreheader()); 530 531 pred_iterator HPI = pred_begin(Header); 532 assert(HPI != pred_end(Header) && "Loop with zero preds???"); 533 if (!L->contains(*HPI)) ++HPI; 534 assert(HPI != pred_end(Header) && L->contains(*HPI) && 535 "No backedge in loop?"); 536 537 // Insert a unit add instruction right before the terminator corresponding 538 // to the back-edge. 539 Constant *One = ConstantInt::get(Ty, 1); 540 Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next", 541 (*HPI)->getTerminator()); 542 InsertedValues.insert(Add); 543 544 pred_iterator PI = pred_begin(Header); 545 if (*PI == L->getLoopPreheader()) 546 ++PI; 547 PN->addIncoming(Add, *PI); 548 return PN; 549 } 550 551 // {0,+,F} --> {0,+,1} * F 552 // Get the canonical induction variable I for this loop. 553 Value *I = CanonicalIV ? 554 CanonicalIV : 555 getOrInsertCanonicalInductionVariable(L, Ty); 556 557 // If this is a simple linear addrec, emit it now as a special case. 558 if (S->isAffine()) // {0,+,F} --> i*F 559 return 560 expand(SE.getTruncateOrNoop( 561 SE.getMulExpr(SE.getUnknown(I), 562 SE.getNoopOrAnyExtend(S->getOperand(1), 563 I->getType())), 564 Ty)); 565 566 // If this is a chain of recurrences, turn it into a closed form, using the 567 // folders, then expandCodeFor the closed form. This allows the folders to 568 // simplify the expression without having to build a bunch of special code 569 // into this folder. 570 const SCEV* IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. 571 572 // Promote S up to the canonical IV type, if the cast is foldable. 573 const SCEV* NewS = S; 574 const SCEV* Ext = SE.getNoopOrAnyExtend(S, I->getType()); 575 if (isa<SCEVAddRecExpr>(Ext)) 576 NewS = Ext; 577 578 const SCEV* V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 579 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 580 581 // Truncate the result down to the original type, if needed. 582 const SCEV* T = SE.getTruncateOrNoop(V, Ty); 583 return expand(T); 584} 585 586Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 587 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 588 Value *V = expandCodeFor(S->getOperand(), 589 SE.getEffectiveSCEVType(S->getOperand()->getType())); 590 Instruction *I = new TruncInst(V, Ty, "tmp.", InsertPt); 591 InsertedValues.insert(I); 592 return I; 593} 594 595Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 596 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 597 Value *V = expandCodeFor(S->getOperand(), 598 SE.getEffectiveSCEVType(S->getOperand()->getType())); 599 Instruction *I = new ZExtInst(V, Ty, "tmp.", InsertPt); 600 InsertedValues.insert(I); 601 return I; 602} 603 604Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 605 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 606 Value *V = expandCodeFor(S->getOperand(), 607 SE.getEffectiveSCEVType(S->getOperand()->getType())); 608 Instruction *I = new SExtInst(V, Ty, "tmp.", InsertPt); 609 InsertedValues.insert(I); 610 return I; 611} 612 613Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 614 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 615 Value *LHS = expandCodeFor(S->getOperand(0), Ty); 616 for (unsigned i = 1; i < S->getNumOperands(); ++i) { 617 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 618 Instruction *ICmp = 619 new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS, "tmp", InsertPt); 620 InsertedValues.insert(ICmp); 621 Instruction *Sel = SelectInst::Create(ICmp, LHS, RHS, "smax", InsertPt); 622 InsertedValues.insert(Sel); 623 LHS = Sel; 624 } 625 return LHS; 626} 627 628Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 629 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 630 Value *LHS = expandCodeFor(S->getOperand(0), Ty); 631 for (unsigned i = 1; i < S->getNumOperands(); ++i) { 632 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 633 Instruction *ICmp = 634 new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS, "tmp", InsertPt); 635 InsertedValues.insert(ICmp); 636 Instruction *Sel = SelectInst::Create(ICmp, LHS, RHS, "umax", InsertPt); 637 InsertedValues.insert(Sel); 638 LHS = Sel; 639 } 640 return LHS; 641} 642 643Value *SCEVExpander::expandCodeFor(const SCEV* SH, const Type *Ty) { 644 // Expand the code for this SCEV. 645 Value *V = expand(SH); 646 if (Ty) { 647 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 648 "non-trivial casts should be done with the SCEVs directly!"); 649 V = InsertNoopCastOfTo(V, Ty); 650 } 651 return V; 652} 653 654Value *SCEVExpander::expand(const SCEV *S) { 655 BasicBlock::iterator SaveInsertPt = InsertPt; 656 657 // Compute an insertion point for this SCEV object. Hoist the instructions 658 // as far out in the loop nest as possible. 659 for (Loop *L = SE.LI->getLoopFor(InsertPt->getParent()); ; 660 L = L->getParentLoop()) 661 if (S->isLoopInvariant(L)) { 662 if (!L) break; 663 if (BasicBlock *Preheader = L->getLoopPreheader()) 664 InsertPt = Preheader->getTerminator(); 665 } else { 666 // If the SCEV is computable at this level, insert it into the header 667 // after the PHIs (and after any other instructions that we've inserted 668 // there) so that it is guaranteed to dominate any user inside the loop. 669 if (L && S->hasComputableLoopEvolution(L)) 670 InsertPt = L->getHeader()->getFirstNonPHI(); 671 while (isInsertedInstruction(InsertPt)) ++InsertPt; 672 break; 673 } 674 675 // Check to see if we already expanded this here. 676 std::map<std::pair<const SCEV *, Instruction *>, 677 AssertingVH<Value> >::iterator I = 678 InsertedExpressions.find(std::make_pair(S, InsertPt)); 679 if (I != InsertedExpressions.end()) { 680 InsertPt = SaveInsertPt; 681 return I->second; 682 } 683 684 // Expand the expression into instructions. 685 Value *V = visit(S); 686 687 // Remember the expanded value for this SCEV at this location. 688 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 689 690 InsertPt = SaveInsertPt; 691 return V; 692} 693 694/// getOrInsertCanonicalInductionVariable - This method returns the 695/// canonical induction variable of the specified type for the specified 696/// loop (inserting one if there is none). A canonical induction variable 697/// starts at zero and steps by one on each iteration. 698Value * 699SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 700 const Type *Ty) { 701 assert(Ty->isInteger() && "Can only insert integer induction variables!"); 702 const SCEV* H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), 703 SE.getIntegerSCEV(1, Ty), L); 704 BasicBlock::iterator SaveInsertPt = InsertPt; 705 Value *V = expandCodeFor(H, 0, L->getHeader()->begin()); 706 InsertPt = SaveInsertPt; 707 return V; 708} 709