ScalarEvolutionExpander.cpp revision 1d826a76f591afea445489b9a5485c345e66bf87
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/Analysis/LoopInfo.h" 18#include "llvm/LLVMContext.h" 19#include "llvm/Target/TargetData.h" 20#include "llvm/ADT/STLExtras.h" 21using namespace llvm; 22 23/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 24/// which must be possible with a noop cast, doing what we can to share 25/// the casts. 26Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) { 27 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 28 assert((Op == Instruction::BitCast || 29 Op == Instruction::PtrToInt || 30 Op == Instruction::IntToPtr) && 31 "InsertNoopCastOfTo cannot perform non-noop casts!"); 32 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 33 "InsertNoopCastOfTo cannot change sizes!"); 34 35 // Short-circuit unnecessary bitcasts. 36 if (Op == Instruction::BitCast && V->getType() == Ty) 37 return V; 38 39 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 40 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 41 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 42 if (CastInst *CI = dyn_cast<CastInst>(V)) 43 if ((CI->getOpcode() == Instruction::PtrToInt || 44 CI->getOpcode() == Instruction::IntToPtr) && 45 SE.getTypeSizeInBits(CI->getType()) == 46 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 47 return CI->getOperand(0); 48 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 49 if ((CE->getOpcode() == Instruction::PtrToInt || 50 CE->getOpcode() == Instruction::IntToPtr) && 51 SE.getTypeSizeInBits(CE->getType()) == 52 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 53 return CE->getOperand(0); 54 } 55 56 if (Constant *C = dyn_cast<Constant>(V)) 57 return ConstantExpr::getCast(Op, C, Ty); 58 59 if (Argument *A = dyn_cast<Argument>(V)) { 60 // Check to see if there is already a cast! 61 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); 62 UI != E; ++UI) 63 if ((*UI)->getType() == Ty) 64 if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI))) 65 if (CI->getOpcode() == Op) { 66 // If the cast isn't the first instruction of the function, move it. 67 if (BasicBlock::iterator(CI) != 68 A->getParent()->getEntryBlock().begin()) { 69 // Recreate the cast at the beginning of the entry block. 70 // The old cast is left in place in case it is being used 71 // as an insert point. 72 Instruction *NewCI = 73 CastInst::Create(Op, V, Ty, "", 74 A->getParent()->getEntryBlock().begin()); 75 NewCI->takeName(CI); 76 CI->replaceAllUsesWith(NewCI); 77 return NewCI; 78 } 79 return CI; 80 } 81 82 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), 83 A->getParent()->getEntryBlock().begin()); 84 rememberInstruction(I); 85 return I; 86 } 87 88 Instruction *I = cast<Instruction>(V); 89 90 // Check to see if there is already a cast. If there is, use it. 91 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 92 UI != E; ++UI) { 93 if ((*UI)->getType() == Ty) 94 if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI))) 95 if (CI->getOpcode() == Op) { 96 BasicBlock::iterator It = I; ++It; 97 if (isa<InvokeInst>(I)) 98 It = cast<InvokeInst>(I)->getNormalDest()->begin(); 99 while (isa<PHINode>(It)) ++It; 100 if (It != BasicBlock::iterator(CI)) { 101 // Recreate the cast after the user. 102 // The old cast is left in place in case it is being used 103 // as an insert point. 104 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", It); 105 NewCI->takeName(CI); 106 CI->replaceAllUsesWith(NewCI); 107 rememberInstruction(NewCI); 108 return NewCI; 109 } 110 rememberInstruction(CI); 111 return CI; 112 } 113 } 114 BasicBlock::iterator IP = I; ++IP; 115 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 116 IP = II->getNormalDest()->begin(); 117 while (isa<PHINode>(IP)) ++IP; 118 Instruction *CI = CastInst::Create(Op, V, Ty, V->getName(), IP); 119 rememberInstruction(CI); 120 return CI; 121} 122 123/// InsertBinop - Insert the specified binary operator, doing a small amount 124/// of work to avoid inserting an obviously redundant operation. 125Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 126 Value *LHS, Value *RHS) { 127 // Fold a binop with constant operands. 128 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 129 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 130 return ConstantExpr::get(Opcode, CLHS, CRHS); 131 132 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 133 unsigned ScanLimit = 6; 134 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 135 // Scanning starts from the last instruction before the insertion point. 136 BasicBlock::iterator IP = Builder.GetInsertPoint(); 137 if (IP != BlockBegin) { 138 --IP; 139 for (; ScanLimit; --IP, --ScanLimit) { 140 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 141 IP->getOperand(1) == RHS) 142 return IP; 143 if (IP == BlockBegin) break; 144 } 145 } 146 147 // If we haven't found this binop, insert it. 148 Value *BO = Builder.CreateBinOp(Opcode, LHS, RHS, "tmp"); 149 rememberInstruction(BO); 150 return BO; 151} 152 153/// FactorOutConstant - Test if S is divisible by Factor, using signed 154/// division. If so, update S with Factor divided out and return true. 155/// S need not be evenly divisble if a reasonable remainder can be 156/// computed. 157/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 158/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 159/// check to see if the divide was folded. 160static bool FactorOutConstant(const SCEV *&S, 161 const SCEV *&Remainder, 162 const SCEV *Factor, 163 ScalarEvolution &SE, 164 const TargetData *TD) { 165 // Everything is divisible by one. 166 if (Factor->isOne()) 167 return true; 168 169 // x/x == 1. 170 if (S == Factor) { 171 S = SE.getIntegerSCEV(1, S->getType()); 172 return true; 173 } 174 175 // For a Constant, check for a multiple of the given factor. 176 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 177 // 0/x == 0. 178 if (C->isZero()) 179 return true; 180 // Check for divisibility. 181 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 182 ConstantInt *CI = 183 ConstantInt::get(SE.getContext(), 184 C->getValue()->getValue().sdiv( 185 FC->getValue()->getValue())); 186 // If the quotient is zero and the remainder is non-zero, reject 187 // the value at this scale. It will be considered for subsequent 188 // smaller scales. 189 if (!CI->isZero()) { 190 const SCEV *Div = SE.getConstant(CI); 191 S = Div; 192 Remainder = 193 SE.getAddExpr(Remainder, 194 SE.getConstant(C->getValue()->getValue().srem( 195 FC->getValue()->getValue()))); 196 return true; 197 } 198 } 199 } 200 201 // In a Mul, check if there is a constant operand which is a multiple 202 // of the given factor. 203 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 204 if (TD) { 205 // With TargetData, the size is known. Check if there is a constant 206 // operand which is a multiple of the given factor. If so, we can 207 // factor it. 208 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 209 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 210 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 211 const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands(); 212 SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(), 213 MOperands.end()); 214 NewMulOps[0] = 215 SE.getConstant(C->getValue()->getValue().sdiv( 216 FC->getValue()->getValue())); 217 S = SE.getMulExpr(NewMulOps); 218 return true; 219 } 220 } else { 221 // Without TargetData, check if Factor can be factored out of any of the 222 // Mul's operands. If so, we can just remove it. 223 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 224 const SCEV *SOp = M->getOperand(i); 225 const SCEV *Remainder = SE.getIntegerSCEV(0, SOp->getType()); 226 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 227 Remainder->isZero()) { 228 const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands(); 229 SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(), 230 MOperands.end()); 231 NewMulOps[i] = SOp; 232 S = SE.getMulExpr(NewMulOps); 233 return true; 234 } 235 } 236 } 237 } 238 239 // In an AddRec, check if both start and step are divisible. 240 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 241 const SCEV *Step = A->getStepRecurrence(SE); 242 const SCEV *StepRem = SE.getIntegerSCEV(0, Step->getType()); 243 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 244 return false; 245 if (!StepRem->isZero()) 246 return false; 247 const SCEV *Start = A->getStart(); 248 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 249 return false; 250 S = SE.getAddRecExpr(Start, Step, A->getLoop()); 251 return true; 252 } 253 254 return false; 255} 256 257/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 258/// is the number of SCEVAddRecExprs present, which are kept at the end of 259/// the list. 260/// 261static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 262 const Type *Ty, 263 ScalarEvolution &SE) { 264 unsigned NumAddRecs = 0; 265 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 266 ++NumAddRecs; 267 // Group Ops into non-addrecs and addrecs. 268 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 269 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 270 // Let ScalarEvolution sort and simplify the non-addrecs list. 271 const SCEV *Sum = NoAddRecs.empty() ? 272 SE.getIntegerSCEV(0, Ty) : 273 SE.getAddExpr(NoAddRecs); 274 // If it returned an add, use the operands. Otherwise it simplified 275 // the sum into a single value, so just use that. 276 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 277 Ops = Add->getOperands(); 278 else { 279 Ops.clear(); 280 if (!Sum->isZero()) 281 Ops.push_back(Sum); 282 } 283 // Then append the addrecs. 284 Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end()); 285} 286 287/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 288/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 289/// This helps expose more opportunities for folding parts of the expressions 290/// into GEP indices. 291/// 292static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 293 const Type *Ty, 294 ScalarEvolution &SE) { 295 // Find the addrecs. 296 SmallVector<const SCEV *, 8> AddRecs; 297 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 298 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 299 const SCEV *Start = A->getStart(); 300 if (Start->isZero()) break; 301 const SCEV *Zero = SE.getIntegerSCEV(0, Ty); 302 AddRecs.push_back(SE.getAddRecExpr(Zero, 303 A->getStepRecurrence(SE), 304 A->getLoop())); 305 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 306 Ops[i] = Zero; 307 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end()); 308 e += Add->getNumOperands(); 309 } else { 310 Ops[i] = Start; 311 } 312 } 313 if (!AddRecs.empty()) { 314 // Add the addrecs onto the end of the list. 315 Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end()); 316 // Resort the operand list, moving any constants to the front. 317 SimplifyAddOperands(Ops, Ty, SE); 318 } 319} 320 321/// expandAddToGEP - Expand an addition expression with a pointer type into 322/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 323/// BasicAliasAnalysis and other passes analyze the result. See the rules 324/// for getelementptr vs. inttoptr in 325/// http://llvm.org/docs/LangRef.html#pointeraliasing 326/// for details. 327/// 328/// Design note: The correctness of using getelementptr here depends on 329/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 330/// they may introduce pointer arithmetic which may not be safely converted 331/// into getelementptr. 332/// 333/// Design note: It might seem desirable for this function to be more 334/// loop-aware. If some of the indices are loop-invariant while others 335/// aren't, it might seem desirable to emit multiple GEPs, keeping the 336/// loop-invariant portions of the overall computation outside the loop. 337/// However, there are a few reasons this is not done here. Hoisting simple 338/// arithmetic is a low-level optimization that often isn't very 339/// important until late in the optimization process. In fact, passes 340/// like InstructionCombining will combine GEPs, even if it means 341/// pushing loop-invariant computation down into loops, so even if the 342/// GEPs were split here, the work would quickly be undone. The 343/// LoopStrengthReduction pass, which is usually run quite late (and 344/// after the last InstructionCombining pass), takes care of hoisting 345/// loop-invariant portions of expressions, after considering what 346/// can be folded using target addressing modes. 347/// 348Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 349 const SCEV *const *op_end, 350 const PointerType *PTy, 351 const Type *Ty, 352 Value *V) { 353 const Type *ElTy = PTy->getElementType(); 354 SmallVector<Value *, 4> GepIndices; 355 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 356 bool AnyNonZeroIndices = false; 357 358 // Split AddRecs up into parts as either of the parts may be usable 359 // without the other. 360 SplitAddRecs(Ops, Ty, SE); 361 362 // Descend down the pointer's type and attempt to convert the other 363 // operands into GEP indices, at each level. The first index in a GEP 364 // indexes into the array implied by the pointer operand; the rest of 365 // the indices index into the element or field type selected by the 366 // preceding index. 367 for (;;) { 368 // If the scale size is not 0, attempt to factor out a scale for 369 // array indexing. 370 SmallVector<const SCEV *, 8> ScaledOps; 371 if (ElTy->isSized()) { 372 const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 373 if (!ElSize->isZero()) { 374 SmallVector<const SCEV *, 8> NewOps; 375 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 376 const SCEV *Op = Ops[i]; 377 const SCEV *Remainder = SE.getIntegerSCEV(0, Ty); 378 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 379 // Op now has ElSize factored out. 380 ScaledOps.push_back(Op); 381 if (!Remainder->isZero()) 382 NewOps.push_back(Remainder); 383 AnyNonZeroIndices = true; 384 } else { 385 // The operand was not divisible, so add it to the list of operands 386 // we'll scan next iteration. 387 NewOps.push_back(Ops[i]); 388 } 389 } 390 // If we made any changes, update Ops. 391 if (!ScaledOps.empty()) { 392 Ops = NewOps; 393 SimplifyAddOperands(Ops, Ty, SE); 394 } 395 } 396 } 397 398 // Record the scaled array index for this level of the type. If 399 // we didn't find any operands that could be factored, tentatively 400 // assume that element zero was selected (since the zero offset 401 // would obviously be folded away). 402 Value *Scaled = ScaledOps.empty() ? 403 Constant::getNullValue(Ty) : 404 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 405 GepIndices.push_back(Scaled); 406 407 // Collect struct field index operands. 408 while (const StructType *STy = dyn_cast<StructType>(ElTy)) { 409 bool FoundFieldNo = false; 410 // An empty struct has no fields. 411 if (STy->getNumElements() == 0) break; 412 if (SE.TD) { 413 // With TargetData, field offsets are known. See if a constant offset 414 // falls within any of the struct fields. 415 if (Ops.empty()) break; 416 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 417 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 418 const StructLayout &SL = *SE.TD->getStructLayout(STy); 419 uint64_t FullOffset = C->getValue()->getZExtValue(); 420 if (FullOffset < SL.getSizeInBytes()) { 421 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 422 GepIndices.push_back( 423 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 424 ElTy = STy->getTypeAtIndex(ElIdx); 425 Ops[0] = 426 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 427 AnyNonZeroIndices = true; 428 FoundFieldNo = true; 429 } 430 } 431 } else { 432 // Without TargetData, just check for an offsetof expression of the 433 // appropriate struct type. 434 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 435 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 436 const Type *CTy; 437 Constant *FieldNo; 438 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 439 GepIndices.push_back(FieldNo); 440 ElTy = 441 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 442 Ops[i] = SE.getConstant(Ty, 0); 443 AnyNonZeroIndices = true; 444 FoundFieldNo = true; 445 break; 446 } 447 } 448 } 449 // If no struct field offsets were found, tentatively assume that 450 // field zero was selected (since the zero offset would obviously 451 // be folded away). 452 if (!FoundFieldNo) { 453 ElTy = STy->getTypeAtIndex(0u); 454 GepIndices.push_back( 455 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 456 } 457 } 458 459 if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 460 ElTy = ATy->getElementType(); 461 else 462 break; 463 } 464 465 // If none of the operands were convertable to proper GEP indices, cast 466 // the base to i8* and do an ugly getelementptr with that. It's still 467 // better than ptrtoint+arithmetic+inttoptr at least. 468 if (!AnyNonZeroIndices) { 469 // Cast the base to i8*. 470 V = InsertNoopCastOfTo(V, 471 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 472 473 // Expand the operands for a plain byte offset. 474 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 475 476 // Fold a GEP with constant operands. 477 if (Constant *CLHS = dyn_cast<Constant>(V)) 478 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 479 return ConstantExpr::getGetElementPtr(CLHS, &CRHS, 1); 480 481 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 482 unsigned ScanLimit = 6; 483 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 484 // Scanning starts from the last instruction before the insertion point. 485 BasicBlock::iterator IP = Builder.GetInsertPoint(); 486 if (IP != BlockBegin) { 487 --IP; 488 for (; ScanLimit; --IP, --ScanLimit) { 489 if (IP->getOpcode() == Instruction::GetElementPtr && 490 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 491 return IP; 492 if (IP == BlockBegin) break; 493 } 494 } 495 496 // Emit a GEP. 497 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 498 rememberInstruction(GEP); 499 return GEP; 500 } 501 502 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 503 // because ScalarEvolution may have changed the address arithmetic to 504 // compute a value which is beyond the end of the allocated object. 505 Value *Casted = V; 506 if (V->getType() != PTy) 507 Casted = InsertNoopCastOfTo(Casted, PTy); 508 Value *GEP = Builder.CreateGEP(Casted, 509 GepIndices.begin(), 510 GepIndices.end(), 511 "scevgep"); 512 Ops.push_back(SE.getUnknown(GEP)); 513 rememberInstruction(GEP); 514 return expand(SE.getAddExpr(Ops)); 515} 516 517/// isNonConstantNegative - Return true if the specified scev is negated, but 518/// not a constant. 519static bool isNonConstantNegative(const SCEV *F) { 520 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F); 521 if (!Mul) return false; 522 523 // If there is a constant factor, it will be first. 524 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 525 if (!SC) return false; 526 527 // Return true if the value is negative, this matches things like (-42 * V). 528 return SC->getValue()->getValue().isNegative(); 529} 530 531Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 532 int NumOperands = S->getNumOperands(); 533 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 534 535 // Find the index of an operand to start with. Choose the operand with 536 // pointer type, if there is one, or the last operand otherwise. 537 int PIdx = 0; 538 for (; PIdx != NumOperands - 1; ++PIdx) 539 if (isa<PointerType>(S->getOperand(PIdx)->getType())) break; 540 541 // Expand code for the operand that we chose. 542 Value *V = expand(S->getOperand(PIdx)); 543 544 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 545 // comments on expandAddToGEP for details. 546 if (const PointerType *PTy = dyn_cast<PointerType>(V->getType())) { 547 // Take the operand at PIdx out of the list. 548 const SmallVectorImpl<const SCEV *> &Ops = S->getOperands(); 549 SmallVector<const SCEV *, 8> NewOps; 550 NewOps.insert(NewOps.end(), Ops.begin(), Ops.begin() + PIdx); 551 NewOps.insert(NewOps.end(), Ops.begin() + PIdx + 1, Ops.end()); 552 // Make a GEP. 553 return expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, V); 554 } 555 556 // Otherwise, we'll expand the rest of the SCEVAddExpr as plain integer 557 // arithmetic. 558 V = InsertNoopCastOfTo(V, Ty); 559 560 // Emit a bunch of add instructions 561 for (int i = NumOperands-1; i >= 0; --i) { 562 if (i == PIdx) continue; 563 const SCEV *Op = S->getOperand(i); 564 if (isNonConstantNegative(Op)) { 565 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 566 V = InsertBinop(Instruction::Sub, V, W); 567 } else { 568 Value *W = expandCodeFor(Op, Ty); 569 V = InsertBinop(Instruction::Add, V, W); 570 } 571 } 572 return V; 573} 574 575Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 576 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 577 int FirstOp = 0; // Set if we should emit a subtract. 578 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0))) 579 if (SC->getValue()->isAllOnesValue()) 580 FirstOp = 1; 581 582 int i = S->getNumOperands()-2; 583 Value *V = expandCodeFor(S->getOperand(i+1), Ty); 584 585 // Emit a bunch of multiply instructions 586 for (; i >= FirstOp; --i) { 587 Value *W = expandCodeFor(S->getOperand(i), Ty); 588 V = InsertBinop(Instruction::Mul, V, W); 589 } 590 591 // -1 * ... ---> 0 - ... 592 if (FirstOp == 1) 593 V = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), V); 594 return V; 595} 596 597Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 598 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 599 600 Value *LHS = expandCodeFor(S->getLHS(), Ty); 601 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 602 const APInt &RHS = SC->getValue()->getValue(); 603 if (RHS.isPowerOf2()) 604 return InsertBinop(Instruction::LShr, LHS, 605 ConstantInt::get(Ty, RHS.logBase2())); 606 } 607 608 Value *RHS = expandCodeFor(S->getRHS(), Ty); 609 return InsertBinop(Instruction::UDiv, LHS, RHS); 610} 611 612/// Move parts of Base into Rest to leave Base with the minimal 613/// expression that provides a pointer operand suitable for a 614/// GEP expansion. 615static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 616 ScalarEvolution &SE) { 617 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 618 Base = A->getStart(); 619 Rest = SE.getAddExpr(Rest, 620 SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()), 621 A->getStepRecurrence(SE), 622 A->getLoop())); 623 } 624 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 625 Base = A->getOperand(A->getNumOperands()-1); 626 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 627 NewAddOps.back() = Rest; 628 Rest = SE.getAddExpr(NewAddOps); 629 ExposePointerBase(Base, Rest, SE); 630 } 631} 632 633/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 634/// the base addrec, which is the addrec without any non-loop-dominating 635/// values, and return the PHI. 636PHINode * 637SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 638 const Loop *L, 639 const Type *ExpandTy, 640 const Type *IntTy) { 641 // Reuse a previously-inserted PHI, if present. 642 for (BasicBlock::iterator I = L->getHeader()->begin(); 643 PHINode *PN = dyn_cast<PHINode>(I); ++I) 644 if (SE.isSCEVable(PN->getType()) && 645 (SE.getEffectiveSCEVType(PN->getType()) == 646 SE.getEffectiveSCEVType(Normalized->getType())) && 647 SE.getSCEV(PN) == Normalized) 648 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 649 // Remember this PHI, even in post-inc mode. 650 InsertedValues.insert(PN); 651 // Remember the increment. 652 Instruction *IncV = 653 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock) 654 ->stripPointerCasts()); 655 rememberInstruction(IncV); 656 // Make sure the increment is where we want it. But don't move it 657 // down past a potential existing post-inc user. 658 if (L == IVIncInsertLoop && !SE.DT->dominates(IncV, IVIncInsertPos)) 659 IncV->moveBefore(IVIncInsertPos); 660 return PN; 661 } 662 663 // Save the original insertion point so we can restore it when we're done. 664 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 665 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 666 667 // Expand code for the start value. 668 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 669 L->getHeader()->begin()); 670 671 // Expand code for the step value. Insert instructions right before the 672 // terminator corresponding to the back-edge. Do this before creating the PHI 673 // so that PHI reuse code doesn't see an incomplete PHI. If the stride is 674 // negative, insert a sub instead of an add for the increment (unless it's a 675 // constant, because subtracts of constants are canonicalized to adds). 676 const SCEV *Step = Normalized->getStepRecurrence(SE); 677 bool isPointer = isa<PointerType>(ExpandTy); 678 bool isNegative = !isPointer && isNonConstantNegative(Step); 679 if (isNegative) 680 Step = SE.getNegativeSCEV(Step); 681 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 682 683 // Create the PHI. 684 Builder.SetInsertPoint(L->getHeader(), L->getHeader()->begin()); 685 PHINode *PN = Builder.CreatePHI(ExpandTy, "lsr.iv"); 686 rememberInstruction(PN); 687 688 // Create the step instructions and populate the PHI. 689 BasicBlock *Header = L->getHeader(); 690 for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header); 691 HPI != HPE; ++HPI) { 692 BasicBlock *Pred = *HPI; 693 694 // Add a start value. 695 if (!L->contains(Pred)) { 696 PN->addIncoming(StartV, Pred); 697 continue; 698 } 699 700 // Create a step value and add it to the PHI. If IVIncInsertLoop is 701 // non-null and equal to the addrec's loop, insert the instructions 702 // at IVIncInsertPos. 703 Instruction *InsertPos = L == IVIncInsertLoop ? 704 IVIncInsertPos : Pred->getTerminator(); 705 Builder.SetInsertPoint(InsertPos->getParent(), InsertPos); 706 Value *IncV; 707 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 708 if (isPointer) { 709 const PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 710 // If the step isn't constant, don't use an implicitly scaled GEP, because 711 // that would require a multiply inside the loop. 712 if (!isa<ConstantInt>(StepV)) 713 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 714 GEPPtrTy->getAddressSpace()); 715 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 716 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 717 if (IncV->getType() != PN->getType()) { 718 IncV = Builder.CreateBitCast(IncV, PN->getType(), "tmp"); 719 rememberInstruction(IncV); 720 } 721 } else { 722 IncV = isNegative ? 723 Builder.CreateSub(PN, StepV, "lsr.iv.next") : 724 Builder.CreateAdd(PN, StepV, "lsr.iv.next"); 725 rememberInstruction(IncV); 726 } 727 PN->addIncoming(IncV, Pred); 728 } 729 730 // Restore the original insert point. 731 if (SaveInsertBB) 732 Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt); 733 734 // Remember this PHI, even in post-inc mode. 735 InsertedValues.insert(PN); 736 737 return PN; 738} 739 740Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 741 const Type *STy = S->getType(); 742 const Type *IntTy = SE.getEffectiveSCEVType(STy); 743 const Loop *L = S->getLoop(); 744 745 // Determine a normalized form of this expression, which is the expression 746 // before any post-inc adjustment is made. 747 const SCEVAddRecExpr *Normalized = S; 748 if (L == PostIncLoop) { 749 const SCEV *Step = S->getStepRecurrence(SE); 750 Normalized = cast<SCEVAddRecExpr>(SE.getMinusSCEV(S, Step)); 751 } 752 753 // Strip off any non-loop-dominating component from the addrec start. 754 const SCEV *Start = Normalized->getStart(); 755 const SCEV *PostLoopOffset = 0; 756 if (!Start->properlyDominates(L->getHeader(), SE.DT)) { 757 PostLoopOffset = Start; 758 Start = SE.getIntegerSCEV(0, Normalized->getType()); 759 Normalized = 760 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, 761 Normalized->getStepRecurrence(SE), 762 Normalized->getLoop())); 763 } 764 765 // Strip off any non-loop-dominating component from the addrec step. 766 const SCEV *Step = Normalized->getStepRecurrence(SE); 767 const SCEV *PostLoopScale = 0; 768 if (!Step->hasComputableLoopEvolution(L) && 769 !Step->dominates(L->getHeader(), SE.DT)) { 770 PostLoopScale = Step; 771 Step = SE.getIntegerSCEV(1, Normalized->getType()); 772 Normalized = 773 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 774 Normalized->getLoop())); 775 } 776 777 // Expand the core addrec. If we need post-loop scaling, force it to 778 // expand to an integer type to avoid the need for additional casting. 779 const Type *ExpandTy = PostLoopScale ? IntTy : STy; 780 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 781 782 // Accomodate post-inc mode, if necessary. 783 Value *Result; 784 if (L != PostIncLoop) 785 Result = PN; 786 else { 787 // In PostInc mode, use the post-incremented value. 788 BasicBlock *LatchBlock = L->getLoopLatch(); 789 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 790 Result = PN->getIncomingValueForBlock(LatchBlock); 791 } 792 793 // Re-apply any non-loop-dominating scale. 794 if (PostLoopScale) { 795 Result = InsertNoopCastOfTo(Result, IntTy); 796 Result = Builder.CreateMul(Result, 797 expandCodeFor(PostLoopScale, IntTy)); 798 rememberInstruction(Result); 799 } 800 801 // Re-apply any non-loop-dominating offset. 802 if (PostLoopOffset) { 803 if (const PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 804 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 805 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 806 } else { 807 Result = InsertNoopCastOfTo(Result, IntTy); 808 Result = Builder.CreateAdd(Result, 809 expandCodeFor(PostLoopOffset, IntTy)); 810 rememberInstruction(Result); 811 } 812 } 813 814 return Result; 815} 816 817Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 818 if (!CanonicalMode) return expandAddRecExprLiterally(S); 819 820 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 821 const Loop *L = S->getLoop(); 822 823 // First check for an existing canonical IV in a suitable type. 824 PHINode *CanonicalIV = 0; 825 if (PHINode *PN = L->getCanonicalInductionVariable()) 826 if (SE.isSCEVable(PN->getType()) && 827 isa<IntegerType>(SE.getEffectiveSCEVType(PN->getType())) && 828 SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 829 CanonicalIV = PN; 830 831 // Rewrite an AddRec in terms of the canonical induction variable, if 832 // its type is more narrow. 833 if (CanonicalIV && 834 SE.getTypeSizeInBits(CanonicalIV->getType()) > 835 SE.getTypeSizeInBits(Ty)) { 836 const SmallVectorImpl<const SCEV *> &Ops = S->getOperands(); 837 SmallVector<const SCEV *, 4> NewOps(Ops.size()); 838 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 839 NewOps[i] = SE.getAnyExtendExpr(Ops[i], CanonicalIV->getType()); 840 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop())); 841 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 842 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 843 BasicBlock::iterator NewInsertPt = 844 llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 845 while (isa<PHINode>(NewInsertPt)) ++NewInsertPt; 846 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 847 NewInsertPt); 848 Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt); 849 return V; 850 } 851 852 // {X,+,F} --> X + {0,+,F} 853 if (!S->getStart()->isZero()) { 854 const SmallVectorImpl<const SCEV *> &SOperands = S->getOperands(); 855 SmallVector<const SCEV *, 4> NewOps(SOperands.begin(), SOperands.end()); 856 NewOps[0] = SE.getIntegerSCEV(0, Ty); 857 const SCEV *Rest = SE.getAddRecExpr(NewOps, L); 858 859 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 860 // comments on expandAddToGEP for details. 861 const SCEV *Base = S->getStart(); 862 const SCEV *RestArray[1] = { Rest }; 863 // Dig into the expression to find the pointer base for a GEP. 864 ExposePointerBase(Base, RestArray[0], SE); 865 // If we found a pointer, expand the AddRec with a GEP. 866 if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 867 // Make sure the Base isn't something exotic, such as a multiplied 868 // or divided pointer value. In those cases, the result type isn't 869 // actually a pointer type. 870 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 871 Value *StartV = expand(Base); 872 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 873 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 874 } 875 } 876 877 // Just do a normal add. Pre-expand the operands to suppress folding. 878 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 879 SE.getUnknown(expand(Rest)))); 880 } 881 882 // {0,+,1} --> Insert a canonical induction variable into the loop! 883 if (S->isAffine() && 884 S->getOperand(1) == SE.getIntegerSCEV(1, Ty)) { 885 // If there's a canonical IV, just use it. 886 if (CanonicalIV) { 887 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 888 "IVs with types different from the canonical IV should " 889 "already have been handled!"); 890 return CanonicalIV; 891 } 892 893 // Create and insert the PHI node for the induction variable in the 894 // specified loop. 895 BasicBlock *Header = L->getHeader(); 896 PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin()); 897 rememberInstruction(PN); 898 899 Constant *One = ConstantInt::get(Ty, 1); 900 for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header); 901 HPI != HPE; ++HPI) 902 if (L->contains(*HPI)) { 903 // Insert a unit add instruction right before the terminator 904 // corresponding to the back-edge. 905 Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next", 906 (*HPI)->getTerminator()); 907 rememberInstruction(Add); 908 PN->addIncoming(Add, *HPI); 909 } else { 910 PN->addIncoming(Constant::getNullValue(Ty), *HPI); 911 } 912 } 913 914 // {0,+,F} --> {0,+,1} * F 915 // Get the canonical induction variable I for this loop. 916 Value *I = CanonicalIV ? 917 CanonicalIV : 918 getOrInsertCanonicalInductionVariable(L, Ty); 919 920 // If this is a simple linear addrec, emit it now as a special case. 921 if (S->isAffine()) // {0,+,F} --> i*F 922 return 923 expand(SE.getTruncateOrNoop( 924 SE.getMulExpr(SE.getUnknown(I), 925 SE.getNoopOrAnyExtend(S->getOperand(1), 926 I->getType())), 927 Ty)); 928 929 // If this is a chain of recurrences, turn it into a closed form, using the 930 // folders, then expandCodeFor the closed form. This allows the folders to 931 // simplify the expression without having to build a bunch of special code 932 // into this folder. 933 const SCEV *IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. 934 935 // Promote S up to the canonical IV type, if the cast is foldable. 936 const SCEV *NewS = S; 937 const SCEV *Ext = SE.getNoopOrAnyExtend(S, I->getType()); 938 if (isa<SCEVAddRecExpr>(Ext)) 939 NewS = Ext; 940 941 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 942 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 943 944 // Truncate the result down to the original type, if needed. 945 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 946 return expand(T); 947} 948 949Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 950 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 951 Value *V = expandCodeFor(S->getOperand(), 952 SE.getEffectiveSCEVType(S->getOperand()->getType())); 953 Value *I = Builder.CreateTrunc(V, Ty, "tmp"); 954 rememberInstruction(I); 955 return I; 956} 957 958Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 959 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 960 Value *V = expandCodeFor(S->getOperand(), 961 SE.getEffectiveSCEVType(S->getOperand()->getType())); 962 Value *I = Builder.CreateZExt(V, Ty, "tmp"); 963 rememberInstruction(I); 964 return I; 965} 966 967Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 968 const Type *Ty = SE.getEffectiveSCEVType(S->getType()); 969 Value *V = expandCodeFor(S->getOperand(), 970 SE.getEffectiveSCEVType(S->getOperand()->getType())); 971 Value *I = Builder.CreateSExt(V, Ty, "tmp"); 972 rememberInstruction(I); 973 return I; 974} 975 976Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 977 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 978 const Type *Ty = LHS->getType(); 979 for (int i = S->getNumOperands()-2; i >= 0; --i) { 980 // In the case of mixed integer and pointer types, do the 981 // rest of the comparisons as integer. 982 if (S->getOperand(i)->getType() != Ty) { 983 Ty = SE.getEffectiveSCEVType(Ty); 984 LHS = InsertNoopCastOfTo(LHS, Ty); 985 } 986 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 987 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS, "tmp"); 988 rememberInstruction(ICmp); 989 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 990 rememberInstruction(Sel); 991 LHS = Sel; 992 } 993 // In the case of mixed integer and pointer types, cast the 994 // final result back to the pointer type. 995 if (LHS->getType() != S->getType()) 996 LHS = InsertNoopCastOfTo(LHS, S->getType()); 997 return LHS; 998} 999 1000Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1001 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1002 const Type *Ty = LHS->getType(); 1003 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1004 // In the case of mixed integer and pointer types, do the 1005 // rest of the comparisons as integer. 1006 if (S->getOperand(i)->getType() != Ty) { 1007 Ty = SE.getEffectiveSCEVType(Ty); 1008 LHS = InsertNoopCastOfTo(LHS, Ty); 1009 } 1010 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1011 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS, "tmp"); 1012 rememberInstruction(ICmp); 1013 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1014 rememberInstruction(Sel); 1015 LHS = Sel; 1016 } 1017 // In the case of mixed integer and pointer types, cast the 1018 // final result back to the pointer type. 1019 if (LHS->getType() != S->getType()) 1020 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1021 return LHS; 1022} 1023 1024Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) { 1025 // Expand the code for this SCEV. 1026 Value *V = expand(SH); 1027 if (Ty) { 1028 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1029 "non-trivial casts should be done with the SCEVs directly!"); 1030 V = InsertNoopCastOfTo(V, Ty); 1031 } 1032 return V; 1033} 1034 1035Value *SCEVExpander::expand(const SCEV *S) { 1036 // Compute an insertion point for this SCEV object. Hoist the instructions 1037 // as far out in the loop nest as possible. 1038 Instruction *InsertPt = Builder.GetInsertPoint(); 1039 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1040 L = L->getParentLoop()) 1041 if (S->isLoopInvariant(L)) { 1042 if (!L) break; 1043 if (BasicBlock *Preheader = L->getLoopPreheader()) 1044 InsertPt = Preheader->getTerminator(); 1045 } else { 1046 // If the SCEV is computable at this level, insert it into the header 1047 // after the PHIs (and after any other instructions that we've inserted 1048 // there) so that it is guaranteed to dominate any user inside the loop. 1049 if (L && S->hasComputableLoopEvolution(L)) 1050 InsertPt = L->getHeader()->getFirstNonPHI(); 1051 while (isInsertedInstruction(InsertPt)) 1052 InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1053 break; 1054 } 1055 1056 // Check to see if we already expanded this here. 1057 std::map<std::pair<const SCEV *, Instruction *>, 1058 AssertingVH<Value> >::iterator I = 1059 InsertedExpressions.find(std::make_pair(S, InsertPt)); 1060 if (I != InsertedExpressions.end()) 1061 return I->second; 1062 1063 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1064 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1065 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1066 1067 // Expand the expression into instructions. 1068 Value *V = visit(S); 1069 1070 // Remember the expanded value for this SCEV at this location. 1071 if (!PostIncLoop) 1072 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1073 1074 Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt); 1075 return V; 1076} 1077 1078void SCEVExpander::rememberInstruction(Value *I) { 1079 if (!PostIncLoop) 1080 InsertedValues.insert(I); 1081 1082 // If we just claimed an existing instruction and that instruction had 1083 // been the insert point, adjust the insert point forward so that 1084 // subsequently inserted code will be dominated. 1085 if (Builder.GetInsertPoint() == I) { 1086 BasicBlock::iterator It = cast<Instruction>(I); 1087 do { ++It; } while (isInsertedInstruction(It)); 1088 Builder.SetInsertPoint(Builder.GetInsertBlock(), It); 1089 } 1090} 1091 1092/// getOrInsertCanonicalInductionVariable - This method returns the 1093/// canonical induction variable of the specified type for the specified 1094/// loop (inserting one if there is none). A canonical induction variable 1095/// starts at zero and steps by one on each iteration. 1096Value * 1097SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1098 const Type *Ty) { 1099 assert(Ty->isInteger() && "Can only insert integer induction variables!"); 1100 const SCEV *H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), 1101 SE.getIntegerSCEV(1, Ty), L); 1102 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1103 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1104 Value *V = expandCodeFor(H, 0, L->getHeader()->begin()); 1105 if (SaveInsertBB) 1106 Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt); 1107 return V; 1108} 1109