ScalarEvolutionExpander.cpp revision 409443b1c6415e55c2bd4f0662e14cbc52d16686
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/ADT/STLExtras.h" 18#include "llvm/Analysis/LoopInfo.h" 19#include "llvm/Analysis/TargetTransformInfo.h" 20#include "llvm/IR/DataLayout.h" 21#include "llvm/IR/IntrinsicInst.h" 22#include "llvm/IR/LLVMContext.h" 23#include "llvm/Support/Debug.h" 24 25using namespace llvm; 26 27/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 28/// reusing an existing cast if a suitable one exists, moving an existing 29/// cast if a suitable one exists but isn't in the right place, or 30/// creating a new one. 31Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 32 Instruction::CastOps Op, 33 BasicBlock::iterator IP) { 34 // This function must be called with the builder having a valid insertion 35 // point. It doesn't need to be the actual IP where the uses of the returned 36 // cast will be added, but it must dominate such IP. 37 // We use this precondition to produce a cast that will dominate all its 38 // uses. In particular, this is crucial for the case where the builder's 39 // insertion point *is* the point where we were asked to put the cast. 40 // Since we don't know the builder's insertion point is actually 41 // where the uses will be added (only that it dominates it), we are 42 // not allowed to move it. 43 BasicBlock::iterator BIP = Builder.GetInsertPoint(); 44 45 Instruction *Ret = NULL; 46 47 // Check to see if there is already a cast! 48 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 49 UI != E; ++UI) { 50 User *U = *UI; 51 if (U->getType() == Ty) 52 if (CastInst *CI = dyn_cast<CastInst>(U)) 53 if (CI->getOpcode() == Op) { 54 // If the cast isn't where we want it, create a new cast at IP. 55 // Likewise, do not reuse a cast at BIP because it must dominate 56 // instructions that might be inserted before BIP. 57 if (BasicBlock::iterator(CI) != IP || BIP == IP) { 58 // Create a new cast, and leave the old cast in place in case 59 // it is being used as an insert point. Clear its operand 60 // so that it doesn't hold anything live. 61 Ret = CastInst::Create(Op, V, Ty, "", IP); 62 Ret->takeName(CI); 63 CI->replaceAllUsesWith(Ret); 64 CI->setOperand(0, UndefValue::get(V->getType())); 65 break; 66 } 67 Ret = CI; 68 break; 69 } 70 } 71 72 // Create a new cast. 73 if (!Ret) 74 Ret = CastInst::Create(Op, V, Ty, V->getName(), IP); 75 76 // We assert at the end of the function since IP might point to an 77 // instruction with different dominance properties than a cast 78 // (an invoke for example) and not dominate BIP (but the cast does). 79 assert(SE.DT->dominates(Ret, BIP)); 80 81 rememberInstruction(Ret); 82 return Ret; 83} 84 85/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 86/// which must be possible with a noop cast, doing what we can to share 87/// the casts. 88Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 89 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 90 assert((Op == Instruction::BitCast || 91 Op == Instruction::PtrToInt || 92 Op == Instruction::IntToPtr) && 93 "InsertNoopCastOfTo cannot perform non-noop casts!"); 94 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 95 "InsertNoopCastOfTo cannot change sizes!"); 96 97 // Short-circuit unnecessary bitcasts. 98 if (Op == Instruction::BitCast) { 99 if (V->getType() == Ty) 100 return V; 101 if (CastInst *CI = dyn_cast<CastInst>(V)) { 102 if (CI->getOperand(0)->getType() == Ty) 103 return CI->getOperand(0); 104 } 105 } 106 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 107 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 108 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 109 if (CastInst *CI = dyn_cast<CastInst>(V)) 110 if ((CI->getOpcode() == Instruction::PtrToInt || 111 CI->getOpcode() == Instruction::IntToPtr) && 112 SE.getTypeSizeInBits(CI->getType()) == 113 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 114 return CI->getOperand(0); 115 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 116 if ((CE->getOpcode() == Instruction::PtrToInt || 117 CE->getOpcode() == Instruction::IntToPtr) && 118 SE.getTypeSizeInBits(CE->getType()) == 119 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 120 return CE->getOperand(0); 121 } 122 123 // Fold a cast of a constant. 124 if (Constant *C = dyn_cast<Constant>(V)) 125 return ConstantExpr::getCast(Op, C, Ty); 126 127 // Cast the argument at the beginning of the entry block, after 128 // any bitcasts of other arguments. 129 if (Argument *A = dyn_cast<Argument>(V)) { 130 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 131 while ((isa<BitCastInst>(IP) && 132 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 133 cast<BitCastInst>(IP)->getOperand(0) != A) || 134 isa<DbgInfoIntrinsic>(IP) || 135 isa<LandingPadInst>(IP)) 136 ++IP; 137 return ReuseOrCreateCast(A, Ty, Op, IP); 138 } 139 140 // Cast the instruction immediately after the instruction. 141 Instruction *I = cast<Instruction>(V); 142 BasicBlock::iterator IP = I; ++IP; 143 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 144 IP = II->getNormalDest()->begin(); 145 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 146 ++IP; 147 return ReuseOrCreateCast(I, Ty, Op, IP); 148} 149 150/// InsertBinop - Insert the specified binary operator, doing a small amount 151/// of work to avoid inserting an obviously redundant operation. 152Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 153 Value *LHS, Value *RHS) { 154 // Fold a binop with constant operands. 155 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 156 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 157 return ConstantExpr::get(Opcode, CLHS, CRHS); 158 159 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 160 unsigned ScanLimit = 6; 161 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 162 // Scanning starts from the last instruction before the insertion point. 163 BasicBlock::iterator IP = Builder.GetInsertPoint(); 164 if (IP != BlockBegin) { 165 --IP; 166 for (; ScanLimit; --IP, --ScanLimit) { 167 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 168 // generated code. 169 if (isa<DbgInfoIntrinsic>(IP)) 170 ScanLimit++; 171 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 172 IP->getOperand(1) == RHS) 173 return IP; 174 if (IP == BlockBegin) break; 175 } 176 } 177 178 // Save the original insertion point so we can restore it when we're done. 179 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 180 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 181 182 // Move the insertion point out of as many loops as we can. 183 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 184 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 185 BasicBlock *Preheader = L->getLoopPreheader(); 186 if (!Preheader) break; 187 188 // Ok, move up a level. 189 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 190 } 191 192 // If we haven't found this binop, insert it. 193 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 194 BO->setDebugLoc(SaveInsertPt->getDebugLoc()); 195 rememberInstruction(BO); 196 197 // Restore the original insert point. 198 if (SaveInsertBB) 199 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 200 201 return BO; 202} 203 204/// FactorOutConstant - Test if S is divisible by Factor, using signed 205/// division. If so, update S with Factor divided out and return true. 206/// S need not be evenly divisible if a reasonable remainder can be 207/// computed. 208/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 209/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 210/// check to see if the divide was folded. 211static bool FactorOutConstant(const SCEV *&S, 212 const SCEV *&Remainder, 213 const SCEV *Factor, 214 ScalarEvolution &SE, 215 const DataLayout *TD) { 216 // Everything is divisible by one. 217 if (Factor->isOne()) 218 return true; 219 220 // x/x == 1. 221 if (S == Factor) { 222 S = SE.getConstant(S->getType(), 1); 223 return true; 224 } 225 226 // For a Constant, check for a multiple of the given factor. 227 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 228 // 0/x == 0. 229 if (C->isZero()) 230 return true; 231 // Check for divisibility. 232 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 233 ConstantInt *CI = 234 ConstantInt::get(SE.getContext(), 235 C->getValue()->getValue().sdiv( 236 FC->getValue()->getValue())); 237 // If the quotient is zero and the remainder is non-zero, reject 238 // the value at this scale. It will be considered for subsequent 239 // smaller scales. 240 if (!CI->isZero()) { 241 const SCEV *Div = SE.getConstant(CI); 242 S = Div; 243 Remainder = 244 SE.getAddExpr(Remainder, 245 SE.getConstant(C->getValue()->getValue().srem( 246 FC->getValue()->getValue()))); 247 return true; 248 } 249 } 250 } 251 252 // In a Mul, check if there is a constant operand which is a multiple 253 // of the given factor. 254 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 255 if (TD) { 256 // With DataLayout, the size is known. Check if there is a constant 257 // operand which is a multiple of the given factor. If so, we can 258 // factor it. 259 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 260 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 261 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 262 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 263 NewMulOps[0] = 264 SE.getConstant(C->getValue()->getValue().sdiv( 265 FC->getValue()->getValue())); 266 S = SE.getMulExpr(NewMulOps); 267 return true; 268 } 269 } else { 270 // Without DataLayout, check if Factor can be factored out of any of the 271 // Mul's operands. If so, we can just remove it. 272 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 273 const SCEV *SOp = M->getOperand(i); 274 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 275 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 276 Remainder->isZero()) { 277 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 278 NewMulOps[i] = SOp; 279 S = SE.getMulExpr(NewMulOps); 280 return true; 281 } 282 } 283 } 284 } 285 286 // In an AddRec, check if both start and step are divisible. 287 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 288 const SCEV *Step = A->getStepRecurrence(SE); 289 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 290 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 291 return false; 292 if (!StepRem->isZero()) 293 return false; 294 const SCEV *Start = A->getStart(); 295 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 296 return false; 297 // FIXME: can use A->getNoWrapFlags(FlagNW) 298 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap); 299 return true; 300 } 301 302 return false; 303} 304 305/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 306/// is the number of SCEVAddRecExprs present, which are kept at the end of 307/// the list. 308/// 309static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 310 Type *Ty, 311 ScalarEvolution &SE) { 312 unsigned NumAddRecs = 0; 313 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 314 ++NumAddRecs; 315 // Group Ops into non-addrecs and addrecs. 316 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 317 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 318 // Let ScalarEvolution sort and simplify the non-addrecs list. 319 const SCEV *Sum = NoAddRecs.empty() ? 320 SE.getConstant(Ty, 0) : 321 SE.getAddExpr(NoAddRecs); 322 // If it returned an add, use the operands. Otherwise it simplified 323 // the sum into a single value, so just use that. 324 Ops.clear(); 325 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 326 Ops.append(Add->op_begin(), Add->op_end()); 327 else if (!Sum->isZero()) 328 Ops.push_back(Sum); 329 // Then append the addrecs. 330 Ops.append(AddRecs.begin(), AddRecs.end()); 331} 332 333/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 334/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 335/// This helps expose more opportunities for folding parts of the expressions 336/// into GEP indices. 337/// 338static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 339 Type *Ty, 340 ScalarEvolution &SE) { 341 // Find the addrecs. 342 SmallVector<const SCEV *, 8> AddRecs; 343 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 344 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 345 const SCEV *Start = A->getStart(); 346 if (Start->isZero()) break; 347 const SCEV *Zero = SE.getConstant(Ty, 0); 348 AddRecs.push_back(SE.getAddRecExpr(Zero, 349 A->getStepRecurrence(SE), 350 A->getLoop(), 351 // FIXME: A->getNoWrapFlags(FlagNW) 352 SCEV::FlagAnyWrap)); 353 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 354 Ops[i] = Zero; 355 Ops.append(Add->op_begin(), Add->op_end()); 356 e += Add->getNumOperands(); 357 } else { 358 Ops[i] = Start; 359 } 360 } 361 if (!AddRecs.empty()) { 362 // Add the addrecs onto the end of the list. 363 Ops.append(AddRecs.begin(), AddRecs.end()); 364 // Resort the operand list, moving any constants to the front. 365 SimplifyAddOperands(Ops, Ty, SE); 366 } 367} 368 369/// expandAddToGEP - Expand an addition expression with a pointer type into 370/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 371/// BasicAliasAnalysis and other passes analyze the result. See the rules 372/// for getelementptr vs. inttoptr in 373/// http://llvm.org/docs/LangRef.html#pointeraliasing 374/// for details. 375/// 376/// Design note: The correctness of using getelementptr here depends on 377/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 378/// they may introduce pointer arithmetic which may not be safely converted 379/// into getelementptr. 380/// 381/// Design note: It might seem desirable for this function to be more 382/// loop-aware. If some of the indices are loop-invariant while others 383/// aren't, it might seem desirable to emit multiple GEPs, keeping the 384/// loop-invariant portions of the overall computation outside the loop. 385/// However, there are a few reasons this is not done here. Hoisting simple 386/// arithmetic is a low-level optimization that often isn't very 387/// important until late in the optimization process. In fact, passes 388/// like InstructionCombining will combine GEPs, even if it means 389/// pushing loop-invariant computation down into loops, so even if the 390/// GEPs were split here, the work would quickly be undone. The 391/// LoopStrengthReduction pass, which is usually run quite late (and 392/// after the last InstructionCombining pass), takes care of hoisting 393/// loop-invariant portions of expressions, after considering what 394/// can be folded using target addressing modes. 395/// 396Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 397 const SCEV *const *op_end, 398 PointerType *PTy, 399 Type *Ty, 400 Value *V) { 401 Type *ElTy = PTy->getElementType(); 402 SmallVector<Value *, 4> GepIndices; 403 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 404 bool AnyNonZeroIndices = false; 405 406 // Split AddRecs up into parts as either of the parts may be usable 407 // without the other. 408 SplitAddRecs(Ops, Ty, SE); 409 410 // Descend down the pointer's type and attempt to convert the other 411 // operands into GEP indices, at each level. The first index in a GEP 412 // indexes into the array implied by the pointer operand; the rest of 413 // the indices index into the element or field type selected by the 414 // preceding index. 415 for (;;) { 416 // If the scale size is not 0, attempt to factor out a scale for 417 // array indexing. 418 SmallVector<const SCEV *, 8> ScaledOps; 419 if (ElTy->isSized()) { 420 const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 421 if (!ElSize->isZero()) { 422 SmallVector<const SCEV *, 8> NewOps; 423 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 424 const SCEV *Op = Ops[i]; 425 const SCEV *Remainder = SE.getConstant(Ty, 0); 426 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 427 // Op now has ElSize factored out. 428 ScaledOps.push_back(Op); 429 if (!Remainder->isZero()) 430 NewOps.push_back(Remainder); 431 AnyNonZeroIndices = true; 432 } else { 433 // The operand was not divisible, so add it to the list of operands 434 // we'll scan next iteration. 435 NewOps.push_back(Ops[i]); 436 } 437 } 438 // If we made any changes, update Ops. 439 if (!ScaledOps.empty()) { 440 Ops = NewOps; 441 SimplifyAddOperands(Ops, Ty, SE); 442 } 443 } 444 } 445 446 // Record the scaled array index for this level of the type. If 447 // we didn't find any operands that could be factored, tentatively 448 // assume that element zero was selected (since the zero offset 449 // would obviously be folded away). 450 Value *Scaled = ScaledOps.empty() ? 451 Constant::getNullValue(Ty) : 452 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 453 GepIndices.push_back(Scaled); 454 455 // Collect struct field index operands. 456 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 457 bool FoundFieldNo = false; 458 // An empty struct has no fields. 459 if (STy->getNumElements() == 0) break; 460 if (SE.TD) { 461 // With DataLayout, field offsets are known. See if a constant offset 462 // falls within any of the struct fields. 463 if (Ops.empty()) break; 464 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 465 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 466 const StructLayout &SL = *SE.TD->getStructLayout(STy); 467 uint64_t FullOffset = C->getValue()->getZExtValue(); 468 if (FullOffset < SL.getSizeInBytes()) { 469 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 470 GepIndices.push_back( 471 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 472 ElTy = STy->getTypeAtIndex(ElIdx); 473 Ops[0] = 474 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 475 AnyNonZeroIndices = true; 476 FoundFieldNo = true; 477 } 478 } 479 } else { 480 // Without DataLayout, just check for an offsetof expression of the 481 // appropriate struct type. 482 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 483 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 484 Type *CTy; 485 Constant *FieldNo; 486 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 487 GepIndices.push_back(FieldNo); 488 ElTy = 489 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 490 Ops[i] = SE.getConstant(Ty, 0); 491 AnyNonZeroIndices = true; 492 FoundFieldNo = true; 493 break; 494 } 495 } 496 } 497 // If no struct field offsets were found, tentatively assume that 498 // field zero was selected (since the zero offset would obviously 499 // be folded away). 500 if (!FoundFieldNo) { 501 ElTy = STy->getTypeAtIndex(0u); 502 GepIndices.push_back( 503 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 504 } 505 } 506 507 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 508 ElTy = ATy->getElementType(); 509 else 510 break; 511 } 512 513 // If none of the operands were convertible to proper GEP indices, cast 514 // the base to i8* and do an ugly getelementptr with that. It's still 515 // better than ptrtoint+arithmetic+inttoptr at least. 516 if (!AnyNonZeroIndices) { 517 // Cast the base to i8*. 518 V = InsertNoopCastOfTo(V, 519 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 520 521 assert(!isa<Instruction>(V) || 522 SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint())); 523 524 // Expand the operands for a plain byte offset. 525 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 526 527 // Fold a GEP with constant operands. 528 if (Constant *CLHS = dyn_cast<Constant>(V)) 529 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 530 return ConstantExpr::getGetElementPtr(CLHS, CRHS); 531 532 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 533 unsigned ScanLimit = 6; 534 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 535 // Scanning starts from the last instruction before the insertion point. 536 BasicBlock::iterator IP = Builder.GetInsertPoint(); 537 if (IP != BlockBegin) { 538 --IP; 539 for (; ScanLimit; --IP, --ScanLimit) { 540 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 541 // generated code. 542 if (isa<DbgInfoIntrinsic>(IP)) 543 ScanLimit++; 544 if (IP->getOpcode() == Instruction::GetElementPtr && 545 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 546 return IP; 547 if (IP == BlockBegin) break; 548 } 549 } 550 551 // Save the original insertion point so we can restore it when we're done. 552 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 553 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 554 555 // Move the insertion point out of as many loops as we can. 556 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 557 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 558 BasicBlock *Preheader = L->getLoopPreheader(); 559 if (!Preheader) break; 560 561 // Ok, move up a level. 562 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 563 } 564 565 // Emit a GEP. 566 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 567 rememberInstruction(GEP); 568 569 // Restore the original insert point. 570 if (SaveInsertBB) 571 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 572 573 return GEP; 574 } 575 576 // Save the original insertion point so we can restore it when we're done. 577 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 578 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 579 580 // Move the insertion point out of as many loops as we can. 581 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 582 if (!L->isLoopInvariant(V)) break; 583 584 bool AnyIndexNotLoopInvariant = false; 585 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 586 E = GepIndices.end(); I != E; ++I) 587 if (!L->isLoopInvariant(*I)) { 588 AnyIndexNotLoopInvariant = true; 589 break; 590 } 591 if (AnyIndexNotLoopInvariant) 592 break; 593 594 BasicBlock *Preheader = L->getLoopPreheader(); 595 if (!Preheader) break; 596 597 // Ok, move up a level. 598 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 599 } 600 601 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 602 // because ScalarEvolution may have changed the address arithmetic to 603 // compute a value which is beyond the end of the allocated object. 604 Value *Casted = V; 605 if (V->getType() != PTy) 606 Casted = InsertNoopCastOfTo(Casted, PTy); 607 Value *GEP = Builder.CreateGEP(Casted, 608 GepIndices, 609 "scevgep"); 610 Ops.push_back(SE.getUnknown(GEP)); 611 rememberInstruction(GEP); 612 613 // Restore the original insert point. 614 if (SaveInsertBB) 615 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 616 617 return expand(SE.getAddExpr(Ops)); 618} 619 620/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 621/// SCEV expansion. If they are nested, this is the most nested. If they are 622/// neighboring, pick the later. 623static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 624 DominatorTree &DT) { 625 if (!A) return B; 626 if (!B) return A; 627 if (A->contains(B)) return B; 628 if (B->contains(A)) return A; 629 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 630 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 631 return A; // Arbitrarily break the tie. 632} 633 634/// getRelevantLoop - Get the most relevant loop associated with the given 635/// expression, according to PickMostRelevantLoop. 636const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 637 // Test whether we've already computed the most relevant loop for this SCEV. 638 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 639 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 640 if (!Pair.second) 641 return Pair.first->second; 642 643 if (isa<SCEVConstant>(S)) 644 // A constant has no relevant loops. 645 return 0; 646 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 647 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 648 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 649 // A non-instruction has no relevant loops. 650 return 0; 651 } 652 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 653 const Loop *L = 0; 654 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 655 L = AR->getLoop(); 656 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 657 I != E; ++I) 658 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 659 return RelevantLoops[N] = L; 660 } 661 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 662 const Loop *Result = getRelevantLoop(C->getOperand()); 663 return RelevantLoops[C] = Result; 664 } 665 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 666 const Loop *Result = 667 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 668 getRelevantLoop(D->getRHS()), 669 *SE.DT); 670 return RelevantLoops[D] = Result; 671 } 672 llvm_unreachable("Unexpected SCEV type!"); 673} 674 675namespace { 676 677/// LoopCompare - Compare loops by PickMostRelevantLoop. 678class LoopCompare { 679 DominatorTree &DT; 680public: 681 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 682 683 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 684 std::pair<const Loop *, const SCEV *> RHS) const { 685 // Keep pointer operands sorted at the end. 686 if (LHS.second->getType()->isPointerTy() != 687 RHS.second->getType()->isPointerTy()) 688 return LHS.second->getType()->isPointerTy(); 689 690 // Compare loops with PickMostRelevantLoop. 691 if (LHS.first != RHS.first) 692 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 693 694 // If one operand is a non-constant negative and the other is not, 695 // put the non-constant negative on the right so that a sub can 696 // be used instead of a negate and add. 697 if (LHS.second->isNonConstantNegative()) { 698 if (!RHS.second->isNonConstantNegative()) 699 return false; 700 } else if (RHS.second->isNonConstantNegative()) 701 return true; 702 703 // Otherwise they are equivalent according to this comparison. 704 return false; 705 } 706}; 707 708} 709 710Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 711 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 712 713 // Collect all the add operands in a loop, along with their associated loops. 714 // Iterate in reverse so that constants are emitted last, all else equal, and 715 // so that pointer operands are inserted first, which the code below relies on 716 // to form more involved GEPs. 717 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 718 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 719 E(S->op_begin()); I != E; ++I) 720 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 721 722 // Sort by loop. Use a stable sort so that constants follow non-constants and 723 // pointer operands precede non-pointer operands. 724 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 725 726 // Emit instructions to add all the operands. Hoist as much as possible 727 // out of loops, and form meaningful getelementptrs where possible. 728 Value *Sum = 0; 729 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 730 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 731 const Loop *CurLoop = I->first; 732 const SCEV *Op = I->second; 733 if (!Sum) { 734 // This is the first operand. Just expand it. 735 Sum = expand(Op); 736 ++I; 737 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 738 // The running sum expression is a pointer. Try to form a getelementptr 739 // at this level with that as the base. 740 SmallVector<const SCEV *, 4> NewOps; 741 for (; I != E && I->first == CurLoop; ++I) { 742 // If the operand is SCEVUnknown and not instructions, peek through 743 // it, to enable more of it to be folded into the GEP. 744 const SCEV *X = I->second; 745 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 746 if (!isa<Instruction>(U->getValue())) 747 X = SE.getSCEV(U->getValue()); 748 NewOps.push_back(X); 749 } 750 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 751 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 752 // The running sum is an integer, and there's a pointer at this level. 753 // Try to form a getelementptr. If the running sum is instructions, 754 // use a SCEVUnknown to avoid re-analyzing them. 755 SmallVector<const SCEV *, 4> NewOps; 756 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 757 SE.getSCEV(Sum)); 758 for (++I; I != E && I->first == CurLoop; ++I) 759 NewOps.push_back(I->second); 760 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 761 } else if (Op->isNonConstantNegative()) { 762 // Instead of doing a negate and add, just do a subtract. 763 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 764 Sum = InsertNoopCastOfTo(Sum, Ty); 765 Sum = InsertBinop(Instruction::Sub, Sum, W); 766 ++I; 767 } else { 768 // A simple add. 769 Value *W = expandCodeFor(Op, Ty); 770 Sum = InsertNoopCastOfTo(Sum, Ty); 771 // Canonicalize a constant to the RHS. 772 if (isa<Constant>(Sum)) std::swap(Sum, W); 773 Sum = InsertBinop(Instruction::Add, Sum, W); 774 ++I; 775 } 776 } 777 778 return Sum; 779} 780 781Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 782 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 783 784 // Collect all the mul operands in a loop, along with their associated loops. 785 // Iterate in reverse so that constants are emitted last, all else equal. 786 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 787 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 788 E(S->op_begin()); I != E; ++I) 789 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 790 791 // Sort by loop. Use a stable sort so that constants follow non-constants. 792 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 793 794 // Emit instructions to mul all the operands. Hoist as much as possible 795 // out of loops. 796 Value *Prod = 0; 797 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 798 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 799 const SCEV *Op = I->second; 800 if (!Prod) { 801 // This is the first operand. Just expand it. 802 Prod = expand(Op); 803 ++I; 804 } else if (Op->isAllOnesValue()) { 805 // Instead of doing a multiply by negative one, just do a negate. 806 Prod = InsertNoopCastOfTo(Prod, Ty); 807 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 808 ++I; 809 } else { 810 // A simple mul. 811 Value *W = expandCodeFor(Op, Ty); 812 Prod = InsertNoopCastOfTo(Prod, Ty); 813 // Canonicalize a constant to the RHS. 814 if (isa<Constant>(Prod)) std::swap(Prod, W); 815 Prod = InsertBinop(Instruction::Mul, Prod, W); 816 ++I; 817 } 818 } 819 820 return Prod; 821} 822 823Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 824 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 825 826 Value *LHS = expandCodeFor(S->getLHS(), Ty); 827 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 828 const APInt &RHS = SC->getValue()->getValue(); 829 if (RHS.isPowerOf2()) 830 return InsertBinop(Instruction::LShr, LHS, 831 ConstantInt::get(Ty, RHS.logBase2())); 832 } 833 834 Value *RHS = expandCodeFor(S->getRHS(), Ty); 835 return InsertBinop(Instruction::UDiv, LHS, RHS); 836} 837 838/// Move parts of Base into Rest to leave Base with the minimal 839/// expression that provides a pointer operand suitable for a 840/// GEP expansion. 841static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 842 ScalarEvolution &SE) { 843 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 844 Base = A->getStart(); 845 Rest = SE.getAddExpr(Rest, 846 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 847 A->getStepRecurrence(SE), 848 A->getLoop(), 849 // FIXME: A->getNoWrapFlags(FlagNW) 850 SCEV::FlagAnyWrap)); 851 } 852 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 853 Base = A->getOperand(A->getNumOperands()-1); 854 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 855 NewAddOps.back() = Rest; 856 Rest = SE.getAddExpr(NewAddOps); 857 ExposePointerBase(Base, Rest, SE); 858 } 859} 860 861/// Determine if this is a well-behaved chain of instructions leading back to 862/// the PHI. If so, it may be reused by expanded expressions. 863bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 864 const Loop *L) { 865 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 866 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 867 return false; 868 // If any of the operands don't dominate the insert position, bail. 869 // Addrec operands are always loop-invariant, so this can only happen 870 // if there are instructions which haven't been hoisted. 871 if (L == IVIncInsertLoop) { 872 for (User::op_iterator OI = IncV->op_begin()+1, 873 OE = IncV->op_end(); OI != OE; ++OI) 874 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 875 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 876 return false; 877 } 878 // Advance to the next instruction. 879 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 880 if (!IncV) 881 return false; 882 883 if (IncV->mayHaveSideEffects()) 884 return false; 885 886 if (IncV != PN) 887 return true; 888 889 return isNormalAddRecExprPHI(PN, IncV, L); 890} 891 892/// getIVIncOperand returns an induction variable increment's induction 893/// variable operand. 894/// 895/// If allowScale is set, any type of GEP is allowed as long as the nonIV 896/// operands dominate InsertPos. 897/// 898/// If allowScale is not set, ensure that a GEP increment conforms to one of the 899/// simple patterns generated by getAddRecExprPHILiterally and 900/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 901Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 902 Instruction *InsertPos, 903 bool allowScale) { 904 if (IncV == InsertPos) 905 return NULL; 906 907 switch (IncV->getOpcode()) { 908 default: 909 return NULL; 910 // Check for a simple Add/Sub or GEP of a loop invariant step. 911 case Instruction::Add: 912 case Instruction::Sub: { 913 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 914 if (!OInst || SE.DT->dominates(OInst, InsertPos)) 915 return dyn_cast<Instruction>(IncV->getOperand(0)); 916 return NULL; 917 } 918 case Instruction::BitCast: 919 return dyn_cast<Instruction>(IncV->getOperand(0)); 920 case Instruction::GetElementPtr: 921 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 922 I != E; ++I) { 923 if (isa<Constant>(*I)) 924 continue; 925 if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 926 if (!SE.DT->dominates(OInst, InsertPos)) 927 return NULL; 928 } 929 if (allowScale) { 930 // allow any kind of GEP as long as it can be hoisted. 931 continue; 932 } 933 // This must be a pointer addition of constants (pretty), which is already 934 // handled, or some number of address-size elements (ugly). Ugly geps 935 // have 2 operands. i1* is used by the expander to represent an 936 // address-size element. 937 if (IncV->getNumOperands() != 2) 938 return NULL; 939 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 940 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 941 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 942 return NULL; 943 break; 944 } 945 return dyn_cast<Instruction>(IncV->getOperand(0)); 946 } 947} 948 949/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 950/// it available to other uses in this loop. Recursively hoist any operands, 951/// until we reach a value that dominates InsertPos. 952bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 953 if (SE.DT->dominates(IncV, InsertPos)) 954 return true; 955 956 // InsertPos must itself dominate IncV so that IncV's new position satisfies 957 // its existing users. 958 if (isa<PHINode>(InsertPos) 959 || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 960 return false; 961 962 // Check that the chain of IV operands leading back to Phi can be hoisted. 963 SmallVector<Instruction*, 4> IVIncs; 964 for(;;) { 965 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 966 if (!Oper) 967 return false; 968 // IncV is safe to hoist. 969 IVIncs.push_back(IncV); 970 IncV = Oper; 971 if (SE.DT->dominates(IncV, InsertPos)) 972 break; 973 } 974 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 975 E = IVIncs.rend(); I != E; ++I) { 976 (*I)->moveBefore(InsertPos); 977 } 978 return true; 979} 980 981/// Determine if this cyclic phi is in a form that would have been generated by 982/// LSR. We don't care if the phi was actually expanded in this pass, as long 983/// as it is in a low-cost form, for example, no implied multiplication. This 984/// should match any patterns generated by getAddRecExprPHILiterally and 985/// expandAddtoGEP. 986bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 987 const Loop *L) { 988 for(Instruction *IVOper = IncV; 989 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 990 /*allowScale=*/false));) { 991 if (IVOper == PN) 992 return true; 993 } 994 return false; 995} 996 997/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 998/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 999/// need to materialize IV increments elsewhere to handle difficult situations. 1000Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 1001 Type *ExpandTy, Type *IntTy, 1002 bool useSubtract) { 1003 Value *IncV; 1004 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 1005 if (ExpandTy->isPointerTy()) { 1006 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 1007 // If the step isn't constant, don't use an implicitly scaled GEP, because 1008 // that would require a multiply inside the loop. 1009 if (!isa<ConstantInt>(StepV)) 1010 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1011 GEPPtrTy->getAddressSpace()); 1012 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1013 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1014 if (IncV->getType() != PN->getType()) { 1015 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1016 rememberInstruction(IncV); 1017 } 1018 } else { 1019 IncV = useSubtract ? 1020 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1021 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1022 rememberInstruction(IncV); 1023 } 1024 return IncV; 1025} 1026 1027/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1028/// the base addrec, which is the addrec without any non-loop-dominating 1029/// values, and return the PHI. 1030PHINode * 1031SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1032 const Loop *L, 1033 Type *ExpandTy, 1034 Type *IntTy) { 1035 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1036 1037 // Reuse a previously-inserted PHI, if present. 1038 BasicBlock *LatchBlock = L->getLoopLatch(); 1039 if (LatchBlock) { 1040 for (BasicBlock::iterator I = L->getHeader()->begin(); 1041 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1042 if (!SE.isSCEVable(PN->getType()) || 1043 (SE.getEffectiveSCEVType(PN->getType()) != 1044 SE.getEffectiveSCEVType(Normalized->getType())) || 1045 SE.getSCEV(PN) != Normalized) 1046 continue; 1047 1048 Instruction *IncV = 1049 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1050 1051 if (LSRMode) { 1052 if (!isExpandedAddRecExprPHI(PN, IncV, L)) 1053 continue; 1054 if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos)) 1055 continue; 1056 } 1057 else { 1058 if (!isNormalAddRecExprPHI(PN, IncV, L)) 1059 continue; 1060 if (L == IVIncInsertLoop) 1061 do { 1062 if (SE.DT->dominates(IncV, IVIncInsertPos)) 1063 break; 1064 // Make sure the increment is where we want it. But don't move it 1065 // down past a potential existing post-inc user. 1066 IncV->moveBefore(IVIncInsertPos); 1067 IVIncInsertPos = IncV; 1068 IncV = cast<Instruction>(IncV->getOperand(0)); 1069 } while (IncV != PN); 1070 } 1071 // Ok, the add recurrence looks usable. 1072 // Remember this PHI, even in post-inc mode. 1073 InsertedValues.insert(PN); 1074 // Remember the increment. 1075 rememberInstruction(IncV); 1076 return PN; 1077 } 1078 } 1079 1080 // Save the original insertion point so we can restore it when we're done. 1081 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1082 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1083 1084 // Another AddRec may need to be recursively expanded below. For example, if 1085 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1086 // loop. Remove this loop from the PostIncLoops set before expanding such 1087 // AddRecs. Otherwise, we cannot find a valid position for the step 1088 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1089 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1090 // so it's not worth implementing SmallPtrSet::swap. 1091 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1092 PostIncLoops.clear(); 1093 1094 // Expand code for the start value. 1095 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1096 L->getHeader()->begin()); 1097 1098 // StartV must be hoisted into L's preheader to dominate the new phi. 1099 assert(!isa<Instruction>(StartV) || 1100 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1101 L->getHeader())); 1102 1103 // Expand code for the step value. Do this before creating the PHI so that PHI 1104 // reuse code doesn't see an incomplete PHI. 1105 const SCEV *Step = Normalized->getStepRecurrence(SE); 1106 // If the stride is negative, insert a sub instead of an add for the increment 1107 // (unless it's a constant, because subtracts of constants are canonicalized 1108 // to adds). 1109 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1110 if (useSubtract) 1111 Step = SE.getNegativeSCEV(Step); 1112 // Expand the step somewhere that dominates the loop header. 1113 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1114 1115 // Create the PHI. 1116 BasicBlock *Header = L->getHeader(); 1117 Builder.SetInsertPoint(Header, Header->begin()); 1118 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1119 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1120 Twine(IVName) + ".iv"); 1121 rememberInstruction(PN); 1122 1123 // Create the step instructions and populate the PHI. 1124 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1125 BasicBlock *Pred = *HPI; 1126 1127 // Add a start value. 1128 if (!L->contains(Pred)) { 1129 PN->addIncoming(StartV, Pred); 1130 continue; 1131 } 1132 1133 // Create a step value and add it to the PHI. 1134 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1135 // instructions at IVIncInsertPos. 1136 Instruction *InsertPos = L == IVIncInsertLoop ? 1137 IVIncInsertPos : Pred->getTerminator(); 1138 Builder.SetInsertPoint(InsertPos); 1139 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1140 if (isa<OverflowingBinaryOperator>(IncV)) { 1141 if (Normalized->getNoWrapFlags(SCEV::FlagNUW)) 1142 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1143 if (Normalized->getNoWrapFlags(SCEV::FlagNSW)) 1144 cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1145 } 1146 PN->addIncoming(IncV, Pred); 1147 } 1148 1149 // Restore the original insert point. 1150 if (SaveInsertBB) 1151 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1152 1153 // After expanding subexpressions, restore the PostIncLoops set so the caller 1154 // can ensure that IVIncrement dominates the current uses. 1155 PostIncLoops = SavedPostIncLoops; 1156 1157 // Remember this PHI, even in post-inc mode. 1158 InsertedValues.insert(PN); 1159 1160 return PN; 1161} 1162 1163Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1164 Type *STy = S->getType(); 1165 Type *IntTy = SE.getEffectiveSCEVType(STy); 1166 const Loop *L = S->getLoop(); 1167 1168 // Determine a normalized form of this expression, which is the expression 1169 // before any post-inc adjustment is made. 1170 const SCEVAddRecExpr *Normalized = S; 1171 if (PostIncLoops.count(L)) { 1172 PostIncLoopSet Loops; 1173 Loops.insert(L); 1174 Normalized = 1175 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1176 Loops, SE, *SE.DT)); 1177 } 1178 1179 // Strip off any non-loop-dominating component from the addrec start. 1180 const SCEV *Start = Normalized->getStart(); 1181 const SCEV *PostLoopOffset = 0; 1182 if (!SE.properlyDominates(Start, L->getHeader())) { 1183 PostLoopOffset = Start; 1184 Start = SE.getConstant(Normalized->getType(), 0); 1185 Normalized = cast<SCEVAddRecExpr>( 1186 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1187 Normalized->getLoop(), 1188 // FIXME: Normalized->getNoWrapFlags(FlagNW) 1189 SCEV::FlagAnyWrap)); 1190 } 1191 1192 // Strip off any non-loop-dominating component from the addrec step. 1193 const SCEV *Step = Normalized->getStepRecurrence(SE); 1194 const SCEV *PostLoopScale = 0; 1195 if (!SE.dominates(Step, L->getHeader())) { 1196 PostLoopScale = Step; 1197 Step = SE.getConstant(Normalized->getType(), 1); 1198 Normalized = 1199 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 1200 Normalized->getLoop(), 1201 // FIXME: Normalized 1202 // ->getNoWrapFlags(FlagNW) 1203 SCEV::FlagAnyWrap)); 1204 } 1205 1206 // Expand the core addrec. If we need post-loop scaling, force it to 1207 // expand to an integer type to avoid the need for additional casting. 1208 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1209 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1210 1211 // Accommodate post-inc mode, if necessary. 1212 Value *Result; 1213 if (!PostIncLoops.count(L)) 1214 Result = PN; 1215 else { 1216 // In PostInc mode, use the post-incremented value. 1217 BasicBlock *LatchBlock = L->getLoopLatch(); 1218 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1219 Result = PN->getIncomingValueForBlock(LatchBlock); 1220 1221 // For an expansion to use the postinc form, the client must call 1222 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1223 // or dominated by IVIncInsertPos. 1224 if (isa<Instruction>(Result) 1225 && !SE.DT->dominates(cast<Instruction>(Result), 1226 Builder.GetInsertPoint())) { 1227 // The induction variable's postinc expansion does not dominate this use. 1228 // IVUsers tries to prevent this case, so it is rare. However, it can 1229 // happen when an IVUser outside the loop is not dominated by the latch 1230 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1231 // all cases. Consider a phi outide whose operand is replaced during 1232 // expansion with the value of the postinc user. Without fundamentally 1233 // changing the way postinc users are tracked, the only remedy is 1234 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1235 // but hopefully expandCodeFor handles that. 1236 bool useSubtract = 1237 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1238 if (useSubtract) 1239 Step = SE.getNegativeSCEV(Step); 1240 // Expand the step somewhere that dominates the loop header. 1241 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1242 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1243 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1244 // Restore the insertion point to the place where the caller has 1245 // determined dominates all uses. 1246 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1247 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1248 } 1249 } 1250 1251 // Re-apply any non-loop-dominating scale. 1252 if (PostLoopScale) { 1253 Result = InsertNoopCastOfTo(Result, IntTy); 1254 Result = Builder.CreateMul(Result, 1255 expandCodeFor(PostLoopScale, IntTy)); 1256 rememberInstruction(Result); 1257 } 1258 1259 // Re-apply any non-loop-dominating offset. 1260 if (PostLoopOffset) { 1261 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1262 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1263 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1264 } else { 1265 Result = InsertNoopCastOfTo(Result, IntTy); 1266 Result = Builder.CreateAdd(Result, 1267 expandCodeFor(PostLoopOffset, IntTy)); 1268 rememberInstruction(Result); 1269 } 1270 } 1271 1272 return Result; 1273} 1274 1275Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1276 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1277 1278 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1279 const Loop *L = S->getLoop(); 1280 1281 // First check for an existing canonical IV in a suitable type. 1282 PHINode *CanonicalIV = 0; 1283 if (PHINode *PN = L->getCanonicalInductionVariable()) 1284 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1285 CanonicalIV = PN; 1286 1287 // Rewrite an AddRec in terms of the canonical induction variable, if 1288 // its type is more narrow. 1289 if (CanonicalIV && 1290 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1291 SE.getTypeSizeInBits(Ty)) { 1292 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1293 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1294 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1295 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1296 // FIXME: S->getNoWrapFlags(FlagNW) 1297 SCEV::FlagAnyWrap)); 1298 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1299 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1300 BasicBlock::iterator NewInsertPt = 1301 llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1302 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1303 isa<LandingPadInst>(NewInsertPt)) 1304 ++NewInsertPt; 1305 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1306 NewInsertPt); 1307 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1308 return V; 1309 } 1310 1311 // {X,+,F} --> X + {0,+,F} 1312 if (!S->getStart()->isZero()) { 1313 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1314 NewOps[0] = SE.getConstant(Ty, 0); 1315 // FIXME: can use S->getNoWrapFlags() 1316 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap); 1317 1318 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1319 // comments on expandAddToGEP for details. 1320 const SCEV *Base = S->getStart(); 1321 const SCEV *RestArray[1] = { Rest }; 1322 // Dig into the expression to find the pointer base for a GEP. 1323 ExposePointerBase(Base, RestArray[0], SE); 1324 // If we found a pointer, expand the AddRec with a GEP. 1325 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1326 // Make sure the Base isn't something exotic, such as a multiplied 1327 // or divided pointer value. In those cases, the result type isn't 1328 // actually a pointer type. 1329 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1330 Value *StartV = expand(Base); 1331 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1332 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1333 } 1334 } 1335 1336 // Just do a normal add. Pre-expand the operands to suppress folding. 1337 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1338 SE.getUnknown(expand(Rest)))); 1339 } 1340 1341 // If we don't yet have a canonical IV, create one. 1342 if (!CanonicalIV) { 1343 // Create and insert the PHI node for the induction variable in the 1344 // specified loop. 1345 BasicBlock *Header = L->getHeader(); 1346 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1347 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1348 Header->begin()); 1349 rememberInstruction(CanonicalIV); 1350 1351 Constant *One = ConstantInt::get(Ty, 1); 1352 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1353 BasicBlock *HP = *HPI; 1354 if (L->contains(HP)) { 1355 // Insert a unit add instruction right before the terminator 1356 // corresponding to the back-edge. 1357 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1358 "indvar.next", 1359 HP->getTerminator()); 1360 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1361 rememberInstruction(Add); 1362 CanonicalIV->addIncoming(Add, HP); 1363 } else { 1364 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1365 } 1366 } 1367 } 1368 1369 // {0,+,1} --> Insert a canonical induction variable into the loop! 1370 if (S->isAffine() && S->getOperand(1)->isOne()) { 1371 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1372 "IVs with types different from the canonical IV should " 1373 "already have been handled!"); 1374 return CanonicalIV; 1375 } 1376 1377 // {0,+,F} --> {0,+,1} * F 1378 1379 // If this is a simple linear addrec, emit it now as a special case. 1380 if (S->isAffine()) // {0,+,F} --> i*F 1381 return 1382 expand(SE.getTruncateOrNoop( 1383 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1384 SE.getNoopOrAnyExtend(S->getOperand(1), 1385 CanonicalIV->getType())), 1386 Ty)); 1387 1388 // If this is a chain of recurrences, turn it into a closed form, using the 1389 // folders, then expandCodeFor the closed form. This allows the folders to 1390 // simplify the expression without having to build a bunch of special code 1391 // into this folder. 1392 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1393 1394 // Promote S up to the canonical IV type, if the cast is foldable. 1395 const SCEV *NewS = S; 1396 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1397 if (isa<SCEVAddRecExpr>(Ext)) 1398 NewS = Ext; 1399 1400 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1401 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1402 1403 // Truncate the result down to the original type, if needed. 1404 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1405 return expand(T); 1406} 1407 1408Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1409 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1410 Value *V = expandCodeFor(S->getOperand(), 1411 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1412 Value *I = Builder.CreateTrunc(V, Ty); 1413 rememberInstruction(I); 1414 return I; 1415} 1416 1417Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1418 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1419 Value *V = expandCodeFor(S->getOperand(), 1420 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1421 Value *I = Builder.CreateZExt(V, Ty); 1422 rememberInstruction(I); 1423 return I; 1424} 1425 1426Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1427 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1428 Value *V = expandCodeFor(S->getOperand(), 1429 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1430 Value *I = Builder.CreateSExt(V, Ty); 1431 rememberInstruction(I); 1432 return I; 1433} 1434 1435Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1436 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1437 Type *Ty = LHS->getType(); 1438 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1439 // In the case of mixed integer and pointer types, do the 1440 // rest of the comparisons as integer. 1441 if (S->getOperand(i)->getType() != Ty) { 1442 Ty = SE.getEffectiveSCEVType(Ty); 1443 LHS = InsertNoopCastOfTo(LHS, Ty); 1444 } 1445 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1446 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1447 rememberInstruction(ICmp); 1448 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1449 rememberInstruction(Sel); 1450 LHS = Sel; 1451 } 1452 // In the case of mixed integer and pointer types, cast the 1453 // final result back to the pointer type. 1454 if (LHS->getType() != S->getType()) 1455 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1456 return LHS; 1457} 1458 1459Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1460 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1461 Type *Ty = LHS->getType(); 1462 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1463 // In the case of mixed integer and pointer types, do the 1464 // rest of the comparisons as integer. 1465 if (S->getOperand(i)->getType() != Ty) { 1466 Ty = SE.getEffectiveSCEVType(Ty); 1467 LHS = InsertNoopCastOfTo(LHS, Ty); 1468 } 1469 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1470 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1471 rememberInstruction(ICmp); 1472 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1473 rememberInstruction(Sel); 1474 LHS = Sel; 1475 } 1476 // In the case of mixed integer and pointer types, cast the 1477 // final result back to the pointer type. 1478 if (LHS->getType() != S->getType()) 1479 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1480 return LHS; 1481} 1482 1483Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1484 Instruction *IP) { 1485 Builder.SetInsertPoint(IP->getParent(), IP); 1486 return expandCodeFor(SH, Ty); 1487} 1488 1489Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1490 // Expand the code for this SCEV. 1491 Value *V = expand(SH); 1492 if (Ty) { 1493 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1494 "non-trivial casts should be done with the SCEVs directly!"); 1495 V = InsertNoopCastOfTo(V, Ty); 1496 } 1497 return V; 1498} 1499 1500Value *SCEVExpander::expand(const SCEV *S) { 1501 // Compute an insertion point for this SCEV object. Hoist the instructions 1502 // as far out in the loop nest as possible. 1503 Instruction *InsertPt = Builder.GetInsertPoint(); 1504 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1505 L = L->getParentLoop()) 1506 if (SE.isLoopInvariant(S, L)) { 1507 if (!L) break; 1508 if (BasicBlock *Preheader = L->getLoopPreheader()) 1509 InsertPt = Preheader->getTerminator(); 1510 else { 1511 // LSR sets the insertion point for AddRec start/step values to the 1512 // block start to simplify value reuse, even though it's an invalid 1513 // position. SCEVExpander must correct for this in all cases. 1514 InsertPt = L->getHeader()->getFirstInsertionPt(); 1515 } 1516 } else { 1517 // If the SCEV is computable at this level, insert it into the header 1518 // after the PHIs (and after any other instructions that we've inserted 1519 // there) so that it is guaranteed to dominate any user inside the loop. 1520 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1521 InsertPt = L->getHeader()->getFirstInsertionPt(); 1522 while (InsertPt != Builder.GetInsertPoint() 1523 && (isInsertedInstruction(InsertPt) 1524 || isa<DbgInfoIntrinsic>(InsertPt))) { 1525 InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1526 } 1527 break; 1528 } 1529 1530 // Check to see if we already expanded this here. 1531 std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator 1532 I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1533 if (I != InsertedExpressions.end()) 1534 return I->second; 1535 1536 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1537 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1538 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1539 1540 // Expand the expression into instructions. 1541 Value *V = visit(S); 1542 1543 // Remember the expanded value for this SCEV at this location. 1544 // 1545 // This is independent of PostIncLoops. The mapped value simply materializes 1546 // the expression at this insertion point. If the mapped value happened to be 1547 // a postinc expansion, it could be reused by a non postinc user, but only if 1548 // its insertion point was already at the head of the loop. 1549 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1550 1551 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1552 return V; 1553} 1554 1555void SCEVExpander::rememberInstruction(Value *I) { 1556 if (!PostIncLoops.empty()) 1557 InsertedPostIncValues.insert(I); 1558 else 1559 InsertedValues.insert(I); 1560} 1561 1562void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { 1563 Builder.SetInsertPoint(BB, I); 1564} 1565 1566/// getOrInsertCanonicalInductionVariable - This method returns the 1567/// canonical induction variable of the specified type for the specified 1568/// loop (inserting one if there is none). A canonical induction variable 1569/// starts at zero and steps by one on each iteration. 1570PHINode * 1571SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1572 Type *Ty) { 1573 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1574 1575 // Build a SCEV for {0,+,1}<L>. 1576 // Conservatively use FlagAnyWrap for now. 1577 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1578 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1579 1580 // Emit code for it. 1581 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1582 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1583 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1584 if (SaveInsertBB) 1585 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1586 1587 return V; 1588} 1589 1590/// Sort values by integer width for replaceCongruentIVs. 1591static bool width_descending(Value *lhs, Value *rhs) { 1592 // Put pointers at the back and make sure pointer < pointer = false. 1593 if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy()) 1594 return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy(); 1595 return rhs->getType()->getPrimitiveSizeInBits() 1596 < lhs->getType()->getPrimitiveSizeInBits(); 1597} 1598 1599/// replaceCongruentIVs - Check for congruent phis in this loop header and 1600/// replace them with their most canonical representative. Return the number of 1601/// phis eliminated. 1602/// 1603/// This does not depend on any SCEVExpander state but should be used in 1604/// the same context that SCEVExpander is used. 1605unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1606 SmallVectorImpl<WeakVH> &DeadInsts, 1607 const TargetTransformInfo *TTI) { 1608 // Find integer phis in order of increasing width. 1609 SmallVector<PHINode*, 8> Phis; 1610 for (BasicBlock::iterator I = L->getHeader()->begin(); 1611 PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1612 Phis.push_back(Phi); 1613 } 1614 if (TTI) 1615 std::sort(Phis.begin(), Phis.end(), width_descending); 1616 1617 unsigned NumElim = 0; 1618 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1619 // Process phis from wide to narrow. Mapping wide phis to the their truncation 1620 // so narrow phis can reuse them. 1621 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1622 PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1623 PHINode *Phi = *PIter; 1624 1625 // Fold constant phis. They may be congruent to other constant phis and 1626 // would confuse the logic below that expects proper IVs. 1627 if (Value *V = Phi->hasConstantValue()) { 1628 Phi->replaceAllUsesWith(V); 1629 DeadInsts.push_back(Phi); 1630 ++NumElim; 1631 DEBUG_WITH_TYPE(DebugType, dbgs() 1632 << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); 1633 continue; 1634 } 1635 1636 if (!SE.isSCEVable(Phi->getType())) 1637 continue; 1638 1639 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1640 if (!OrigPhiRef) { 1641 OrigPhiRef = Phi; 1642 if (Phi->getType()->isIntegerTy() && TTI 1643 && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1644 // This phi can be freely truncated to the narrowest phi type. Map the 1645 // truncated expression to it so it will be reused for narrow types. 1646 const SCEV *TruncExpr = 1647 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1648 ExprToIVMap[TruncExpr] = Phi; 1649 } 1650 continue; 1651 } 1652 1653 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1654 // sense. 1655 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1656 continue; 1657 1658 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1659 Instruction *OrigInc = 1660 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1661 Instruction *IsomorphicInc = 1662 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1663 1664 // If this phi has the same width but is more canonical, replace the 1665 // original with it. As part of the "more canonical" determination, 1666 // respect a prior decision to use an IV chain. 1667 if (OrigPhiRef->getType() == Phi->getType() 1668 && !(ChainedPhis.count(Phi) 1669 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1670 && (ChainedPhis.count(Phi) 1671 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1672 std::swap(OrigPhiRef, Phi); 1673 std::swap(OrigInc, IsomorphicInc); 1674 } 1675 // Replacing the congruent phi is sufficient because acyclic redundancy 1676 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1677 // that a phi is congruent, it's often the head of an IV user cycle that 1678 // is isomorphic with the original phi. It's worth eagerly cleaning up the 1679 // common case of a single IV increment so that DeleteDeadPHIs can remove 1680 // cycles that had postinc uses. 1681 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1682 IsomorphicInc->getType()); 1683 if (OrigInc != IsomorphicInc 1684 && TruncExpr == SE.getSCEV(IsomorphicInc) 1685 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1686 || hoistIVInc(OrigInc, IsomorphicInc))) { 1687 DEBUG_WITH_TYPE(DebugType, dbgs() 1688 << "INDVARS: Eliminated congruent iv.inc: " 1689 << *IsomorphicInc << '\n'); 1690 Value *NewInc = OrigInc; 1691 if (OrigInc->getType() != IsomorphicInc->getType()) { 1692 Instruction *IP = isa<PHINode>(OrigInc) 1693 ? (Instruction*)L->getHeader()->getFirstInsertionPt() 1694 : OrigInc->getNextNode(); 1695 IRBuilder<> Builder(IP); 1696 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1697 NewInc = Builder. 1698 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1699 } 1700 IsomorphicInc->replaceAllUsesWith(NewInc); 1701 DeadInsts.push_back(IsomorphicInc); 1702 } 1703 } 1704 DEBUG_WITH_TYPE(DebugType, dbgs() 1705 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1706 ++NumElim; 1707 Value *NewIV = OrigPhiRef; 1708 if (OrigPhiRef->getType() != Phi->getType()) { 1709 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1710 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1711 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1712 } 1713 Phi->replaceAllUsesWith(NewIV); 1714 DeadInsts.push_back(Phi); 1715 } 1716 return NumElim; 1717} 1718 1719namespace { 1720// Search for a SCEV subexpression that is not safe to expand. Any expression 1721// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 1722// UDiv expressions. We don't know if the UDiv is derived from an IR divide 1723// instruction, but the important thing is that we prove the denominator is 1724// nonzero before expansion. 1725// 1726// IVUsers already checks that IV-derived expressions are safe. So this check is 1727// only needed when the expression includes some subexpression that is not IV 1728// derived. 1729// 1730// Currently, we only allow division by a nonzero constant here. If this is 1731// inadequate, we could easily allow division by SCEVUnknown by using 1732// ValueTracking to check isKnownNonZero(). 1733struct SCEVFindUnsafe { 1734 bool IsUnsafe; 1735 1736 SCEVFindUnsafe(): IsUnsafe(false) {} 1737 1738 bool follow(const SCEV *S) { 1739 const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S); 1740 if (!D) 1741 return true; 1742 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 1743 if (SC && !SC->getValue()->isZero()) 1744 return true; 1745 IsUnsafe = true; 1746 return false; 1747 } 1748 bool isDone() const { return IsUnsafe; } 1749}; 1750} 1751 1752namespace llvm { 1753bool isSafeToExpand(const SCEV *S) { 1754 SCEVFindUnsafe Search; 1755 visitAll(S, Search); 1756 return !Search.IsUnsafe; 1757} 1758} 1759