ScalarEvolutionExpander.cpp revision 705b48d960dff1a96ac40d0cf932eb465b9a550a
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/Analysis/LoopInfo.h" 18#include "llvm/IntrinsicInst.h" 19#include "llvm/LLVMContext.h" 20#include "llvm/Support/Debug.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/Target/TargetLowering.h" 23#include "llvm/ADT/STLExtras.h" 24 25using namespace llvm; 26 27/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 28/// reusing an existing cast if a suitable one exists, moving an existing 29/// cast if a suitable one exists but isn't in the right place, or 30/// creating a new one. 31Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 32 Instruction::CastOps Op, 33 BasicBlock::iterator IP) { 34 // All new or reused instructions must strictly dominate their uses. 35 // It would be nice to assert this here, but we don't always know where 36 // the next instructions will be added as the the caller can move the 37 // Builder's InsertPt before creating them and we might be called with 38 // an invalid InsertPt. 39 40 // Check to see if there is already a cast! 41 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 42 UI != E; ++UI) { 43 User *U = *UI; 44 if (U->getType() == Ty) 45 if (CastInst *CI = dyn_cast<CastInst>(U)) 46 if (CI->getOpcode() == Op) { 47 // If the cast isn't where we want it, fix it. 48 if (BasicBlock::iterator(CI) != IP) { 49 // Create a new cast, and leave the old cast in place in case 50 // it is being used as an insert point. Clear its operand 51 // so that it doesn't hold anything live. 52 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP); 53 NewCI->takeName(CI); 54 CI->replaceAllUsesWith(NewCI); 55 CI->setOperand(0, UndefValue::get(V->getType())); 56 rememberInstruction(NewCI); 57 return NewCI; 58 } 59 rememberInstruction(CI); 60 return CI; 61 } 62 } 63 64 // Create a new cast. 65 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP); 66 rememberInstruction(I); 67 return I; 68} 69 70/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 71/// which must be possible with a noop cast, doing what we can to share 72/// the casts. 73Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 74 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 75 assert((Op == Instruction::BitCast || 76 Op == Instruction::PtrToInt || 77 Op == Instruction::IntToPtr) && 78 "InsertNoopCastOfTo cannot perform non-noop casts!"); 79 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 80 "InsertNoopCastOfTo cannot change sizes!"); 81 82 // Short-circuit unnecessary bitcasts. 83 if (Op == Instruction::BitCast) { 84 if (V->getType() == Ty) 85 return V; 86 if (CastInst *CI = dyn_cast<CastInst>(V)) { 87 if (CI->getOperand(0)->getType() == Ty) 88 return CI->getOperand(0); 89 } 90 } 91 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 92 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 93 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 94 if (CastInst *CI = dyn_cast<CastInst>(V)) 95 if ((CI->getOpcode() == Instruction::PtrToInt || 96 CI->getOpcode() == Instruction::IntToPtr) && 97 SE.getTypeSizeInBits(CI->getType()) == 98 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 99 return CI->getOperand(0); 100 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 101 if ((CE->getOpcode() == Instruction::PtrToInt || 102 CE->getOpcode() == Instruction::IntToPtr) && 103 SE.getTypeSizeInBits(CE->getType()) == 104 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 105 return CE->getOperand(0); 106 } 107 108 // Fold a cast of a constant. 109 if (Constant *C = dyn_cast<Constant>(V)) 110 return ConstantExpr::getCast(Op, C, Ty); 111 112 // Cast the argument at the beginning of the entry block, after 113 // any bitcasts of other arguments. 114 if (Argument *A = dyn_cast<Argument>(V)) { 115 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 116 while ((isa<BitCastInst>(IP) && 117 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 118 cast<BitCastInst>(IP)->getOperand(0) != A) || 119 isa<DbgInfoIntrinsic>(IP) || 120 isa<LandingPadInst>(IP)) 121 ++IP; 122 return ReuseOrCreateCast(A, Ty, Op, IP); 123 } 124 125 // Cast the instruction immediately after the instruction. 126 Instruction *I = cast<Instruction>(V); 127 BasicBlock::iterator IP = I; ++IP; 128 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 129 IP = II->getNormalDest()->begin(); 130 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 131 ++IP; 132 return ReuseOrCreateCast(I, Ty, Op, IP); 133} 134 135/// InsertBinop - Insert the specified binary operator, doing a small amount 136/// of work to avoid inserting an obviously redundant operation. 137Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 138 Value *LHS, Value *RHS) { 139 // Fold a binop with constant operands. 140 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 141 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 142 return ConstantExpr::get(Opcode, CLHS, CRHS); 143 144 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 145 unsigned ScanLimit = 6; 146 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 147 // Scanning starts from the last instruction before the insertion point. 148 BasicBlock::iterator IP = Builder.GetInsertPoint(); 149 if (IP != BlockBegin) { 150 --IP; 151 for (; ScanLimit; --IP, --ScanLimit) { 152 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 153 // generated code. 154 if (isa<DbgInfoIntrinsic>(IP)) 155 ScanLimit++; 156 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 157 IP->getOperand(1) == RHS) 158 return IP; 159 if (IP == BlockBegin) break; 160 } 161 } 162 163 // Save the original insertion point so we can restore it when we're done. 164 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 165 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 166 167 // Move the insertion point out of as many loops as we can. 168 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 169 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 170 BasicBlock *Preheader = L->getLoopPreheader(); 171 if (!Preheader) break; 172 173 // Ok, move up a level. 174 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 175 } 176 177 // If we haven't found this binop, insert it. 178 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 179 BO->setDebugLoc(SaveInsertPt->getDebugLoc()); 180 rememberInstruction(BO); 181 182 // Restore the original insert point. 183 if (SaveInsertBB) 184 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 185 186 return BO; 187} 188 189/// FactorOutConstant - Test if S is divisible by Factor, using signed 190/// division. If so, update S with Factor divided out and return true. 191/// S need not be evenly divisible if a reasonable remainder can be 192/// computed. 193/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 194/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 195/// check to see if the divide was folded. 196static bool FactorOutConstant(const SCEV *&S, 197 const SCEV *&Remainder, 198 const SCEV *Factor, 199 ScalarEvolution &SE, 200 const TargetData *TD) { 201 // Everything is divisible by one. 202 if (Factor->isOne()) 203 return true; 204 205 // x/x == 1. 206 if (S == Factor) { 207 S = SE.getConstant(S->getType(), 1); 208 return true; 209 } 210 211 // For a Constant, check for a multiple of the given factor. 212 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 213 // 0/x == 0. 214 if (C->isZero()) 215 return true; 216 // Check for divisibility. 217 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 218 ConstantInt *CI = 219 ConstantInt::get(SE.getContext(), 220 C->getValue()->getValue().sdiv( 221 FC->getValue()->getValue())); 222 // If the quotient is zero and the remainder is non-zero, reject 223 // the value at this scale. It will be considered for subsequent 224 // smaller scales. 225 if (!CI->isZero()) { 226 const SCEV *Div = SE.getConstant(CI); 227 S = Div; 228 Remainder = 229 SE.getAddExpr(Remainder, 230 SE.getConstant(C->getValue()->getValue().srem( 231 FC->getValue()->getValue()))); 232 return true; 233 } 234 } 235 } 236 237 // In a Mul, check if there is a constant operand which is a multiple 238 // of the given factor. 239 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 240 if (TD) { 241 // With TargetData, the size is known. Check if there is a constant 242 // operand which is a multiple of the given factor. If so, we can 243 // factor it. 244 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 245 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 246 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 247 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 248 NewMulOps[0] = 249 SE.getConstant(C->getValue()->getValue().sdiv( 250 FC->getValue()->getValue())); 251 S = SE.getMulExpr(NewMulOps); 252 return true; 253 } 254 } else { 255 // Without TargetData, check if Factor can be factored out of any of the 256 // Mul's operands. If so, we can just remove it. 257 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 258 const SCEV *SOp = M->getOperand(i); 259 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 260 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 261 Remainder->isZero()) { 262 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 263 NewMulOps[i] = SOp; 264 S = SE.getMulExpr(NewMulOps); 265 return true; 266 } 267 } 268 } 269 } 270 271 // In an AddRec, check if both start and step are divisible. 272 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 273 const SCEV *Step = A->getStepRecurrence(SE); 274 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 275 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 276 return false; 277 if (!StepRem->isZero()) 278 return false; 279 const SCEV *Start = A->getStart(); 280 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 281 return false; 282 // FIXME: can use A->getNoWrapFlags(FlagNW) 283 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap); 284 return true; 285 } 286 287 return false; 288} 289 290/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 291/// is the number of SCEVAddRecExprs present, which are kept at the end of 292/// the list. 293/// 294static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 295 Type *Ty, 296 ScalarEvolution &SE) { 297 unsigned NumAddRecs = 0; 298 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 299 ++NumAddRecs; 300 // Group Ops into non-addrecs and addrecs. 301 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 302 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 303 // Let ScalarEvolution sort and simplify the non-addrecs list. 304 const SCEV *Sum = NoAddRecs.empty() ? 305 SE.getConstant(Ty, 0) : 306 SE.getAddExpr(NoAddRecs); 307 // If it returned an add, use the operands. Otherwise it simplified 308 // the sum into a single value, so just use that. 309 Ops.clear(); 310 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 311 Ops.append(Add->op_begin(), Add->op_end()); 312 else if (!Sum->isZero()) 313 Ops.push_back(Sum); 314 // Then append the addrecs. 315 Ops.append(AddRecs.begin(), AddRecs.end()); 316} 317 318/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 319/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 320/// This helps expose more opportunities for folding parts of the expressions 321/// into GEP indices. 322/// 323static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 324 Type *Ty, 325 ScalarEvolution &SE) { 326 // Find the addrecs. 327 SmallVector<const SCEV *, 8> AddRecs; 328 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 329 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 330 const SCEV *Start = A->getStart(); 331 if (Start->isZero()) break; 332 const SCEV *Zero = SE.getConstant(Ty, 0); 333 AddRecs.push_back(SE.getAddRecExpr(Zero, 334 A->getStepRecurrence(SE), 335 A->getLoop(), 336 // FIXME: A->getNoWrapFlags(FlagNW) 337 SCEV::FlagAnyWrap)); 338 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 339 Ops[i] = Zero; 340 Ops.append(Add->op_begin(), Add->op_end()); 341 e += Add->getNumOperands(); 342 } else { 343 Ops[i] = Start; 344 } 345 } 346 if (!AddRecs.empty()) { 347 // Add the addrecs onto the end of the list. 348 Ops.append(AddRecs.begin(), AddRecs.end()); 349 // Resort the operand list, moving any constants to the front. 350 SimplifyAddOperands(Ops, Ty, SE); 351 } 352} 353 354/// expandAddToGEP - Expand an addition expression with a pointer type into 355/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 356/// BasicAliasAnalysis and other passes analyze the result. See the rules 357/// for getelementptr vs. inttoptr in 358/// http://llvm.org/docs/LangRef.html#pointeraliasing 359/// for details. 360/// 361/// Design note: The correctness of using getelementptr here depends on 362/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 363/// they may introduce pointer arithmetic which may not be safely converted 364/// into getelementptr. 365/// 366/// Design note: It might seem desirable for this function to be more 367/// loop-aware. If some of the indices are loop-invariant while others 368/// aren't, it might seem desirable to emit multiple GEPs, keeping the 369/// loop-invariant portions of the overall computation outside the loop. 370/// However, there are a few reasons this is not done here. Hoisting simple 371/// arithmetic is a low-level optimization that often isn't very 372/// important until late in the optimization process. In fact, passes 373/// like InstructionCombining will combine GEPs, even if it means 374/// pushing loop-invariant computation down into loops, so even if the 375/// GEPs were split here, the work would quickly be undone. The 376/// LoopStrengthReduction pass, which is usually run quite late (and 377/// after the last InstructionCombining pass), takes care of hoisting 378/// loop-invariant portions of expressions, after considering what 379/// can be folded using target addressing modes. 380/// 381Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 382 const SCEV *const *op_end, 383 PointerType *PTy, 384 Type *Ty, 385 Value *V) { 386 Type *ElTy = PTy->getElementType(); 387 SmallVector<Value *, 4> GepIndices; 388 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 389 bool AnyNonZeroIndices = false; 390 391 // Split AddRecs up into parts as either of the parts may be usable 392 // without the other. 393 SplitAddRecs(Ops, Ty, SE); 394 395 // Descend down the pointer's type and attempt to convert the other 396 // operands into GEP indices, at each level. The first index in a GEP 397 // indexes into the array implied by the pointer operand; the rest of 398 // the indices index into the element or field type selected by the 399 // preceding index. 400 for (;;) { 401 // If the scale size is not 0, attempt to factor out a scale for 402 // array indexing. 403 SmallVector<const SCEV *, 8> ScaledOps; 404 if (ElTy->isSized()) { 405 const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 406 if (!ElSize->isZero()) { 407 SmallVector<const SCEV *, 8> NewOps; 408 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 409 const SCEV *Op = Ops[i]; 410 const SCEV *Remainder = SE.getConstant(Ty, 0); 411 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 412 // Op now has ElSize factored out. 413 ScaledOps.push_back(Op); 414 if (!Remainder->isZero()) 415 NewOps.push_back(Remainder); 416 AnyNonZeroIndices = true; 417 } else { 418 // The operand was not divisible, so add it to the list of operands 419 // we'll scan next iteration. 420 NewOps.push_back(Ops[i]); 421 } 422 } 423 // If we made any changes, update Ops. 424 if (!ScaledOps.empty()) { 425 Ops = NewOps; 426 SimplifyAddOperands(Ops, Ty, SE); 427 } 428 } 429 } 430 431 // Record the scaled array index for this level of the type. If 432 // we didn't find any operands that could be factored, tentatively 433 // assume that element zero was selected (since the zero offset 434 // would obviously be folded away). 435 Value *Scaled = ScaledOps.empty() ? 436 Constant::getNullValue(Ty) : 437 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 438 GepIndices.push_back(Scaled); 439 440 // Collect struct field index operands. 441 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 442 bool FoundFieldNo = false; 443 // An empty struct has no fields. 444 if (STy->getNumElements() == 0) break; 445 if (SE.TD) { 446 // With TargetData, field offsets are known. See if a constant offset 447 // falls within any of the struct fields. 448 if (Ops.empty()) break; 449 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 450 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 451 const StructLayout &SL = *SE.TD->getStructLayout(STy); 452 uint64_t FullOffset = C->getValue()->getZExtValue(); 453 if (FullOffset < SL.getSizeInBytes()) { 454 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 455 GepIndices.push_back( 456 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 457 ElTy = STy->getTypeAtIndex(ElIdx); 458 Ops[0] = 459 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 460 AnyNonZeroIndices = true; 461 FoundFieldNo = true; 462 } 463 } 464 } else { 465 // Without TargetData, just check for an offsetof expression of the 466 // appropriate struct type. 467 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 468 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 469 Type *CTy; 470 Constant *FieldNo; 471 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 472 GepIndices.push_back(FieldNo); 473 ElTy = 474 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 475 Ops[i] = SE.getConstant(Ty, 0); 476 AnyNonZeroIndices = true; 477 FoundFieldNo = true; 478 break; 479 } 480 } 481 } 482 // If no struct field offsets were found, tentatively assume that 483 // field zero was selected (since the zero offset would obviously 484 // be folded away). 485 if (!FoundFieldNo) { 486 ElTy = STy->getTypeAtIndex(0u); 487 GepIndices.push_back( 488 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 489 } 490 } 491 492 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 493 ElTy = ATy->getElementType(); 494 else 495 break; 496 } 497 498 // If none of the operands were convertible to proper GEP indices, cast 499 // the base to i8* and do an ugly getelementptr with that. It's still 500 // better than ptrtoint+arithmetic+inttoptr at least. 501 if (!AnyNonZeroIndices) { 502 // Cast the base to i8*. 503 V = InsertNoopCastOfTo(V, 504 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 505 506 assert(!isa<Instruction>(V) || 507 SE.DT->properlyDominates(cast<Instruction>(V), 508 Builder.GetInsertPoint())); 509 510 // Expand the operands for a plain byte offset. 511 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 512 513 // Fold a GEP with constant operands. 514 if (Constant *CLHS = dyn_cast<Constant>(V)) 515 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 516 return ConstantExpr::getGetElementPtr(CLHS, CRHS); 517 518 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 519 unsigned ScanLimit = 6; 520 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 521 // Scanning starts from the last instruction before the insertion point. 522 BasicBlock::iterator IP = Builder.GetInsertPoint(); 523 if (IP != BlockBegin) { 524 --IP; 525 for (; ScanLimit; --IP, --ScanLimit) { 526 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 527 // generated code. 528 if (isa<DbgInfoIntrinsic>(IP)) 529 ScanLimit++; 530 if (IP->getOpcode() == Instruction::GetElementPtr && 531 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 532 return IP; 533 if (IP == BlockBegin) break; 534 } 535 } 536 537 // Save the original insertion point so we can restore it when we're done. 538 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 539 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 540 541 // Move the insertion point out of as many loops as we can. 542 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 543 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 544 BasicBlock *Preheader = L->getLoopPreheader(); 545 if (!Preheader) break; 546 547 // Ok, move up a level. 548 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 549 } 550 551 // Emit a GEP. 552 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 553 rememberInstruction(GEP); 554 555 // Restore the original insert point. 556 if (SaveInsertBB) 557 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 558 559 return GEP; 560 } 561 562 // Save the original insertion point so we can restore it when we're done. 563 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 564 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 565 566 // Move the insertion point out of as many loops as we can. 567 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 568 if (!L->isLoopInvariant(V)) break; 569 570 bool AnyIndexNotLoopInvariant = false; 571 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 572 E = GepIndices.end(); I != E; ++I) 573 if (!L->isLoopInvariant(*I)) { 574 AnyIndexNotLoopInvariant = true; 575 break; 576 } 577 if (AnyIndexNotLoopInvariant) 578 break; 579 580 BasicBlock *Preheader = L->getLoopPreheader(); 581 if (!Preheader) break; 582 583 // Ok, move up a level. 584 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 585 } 586 587 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 588 // because ScalarEvolution may have changed the address arithmetic to 589 // compute a value which is beyond the end of the allocated object. 590 Value *Casted = V; 591 if (V->getType() != PTy) 592 Casted = InsertNoopCastOfTo(Casted, PTy); 593 Value *GEP = Builder.CreateGEP(Casted, 594 GepIndices, 595 "scevgep"); 596 Ops.push_back(SE.getUnknown(GEP)); 597 rememberInstruction(GEP); 598 599 // Restore the original insert point. 600 if (SaveInsertBB) 601 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 602 603 return expand(SE.getAddExpr(Ops)); 604} 605 606/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 607/// SCEV expansion. If they are nested, this is the most nested. If they are 608/// neighboring, pick the later. 609static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 610 DominatorTree &DT) { 611 if (!A) return B; 612 if (!B) return A; 613 if (A->contains(B)) return B; 614 if (B->contains(A)) return A; 615 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 616 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 617 return A; // Arbitrarily break the tie. 618} 619 620/// getRelevantLoop - Get the most relevant loop associated with the given 621/// expression, according to PickMostRelevantLoop. 622const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 623 // Test whether we've already computed the most relevant loop for this SCEV. 624 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 625 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 626 if (!Pair.second) 627 return Pair.first->second; 628 629 if (isa<SCEVConstant>(S)) 630 // A constant has no relevant loops. 631 return 0; 632 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 633 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 634 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 635 // A non-instruction has no relevant loops. 636 return 0; 637 } 638 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 639 const Loop *L = 0; 640 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 641 L = AR->getLoop(); 642 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 643 I != E; ++I) 644 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 645 return RelevantLoops[N] = L; 646 } 647 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 648 const Loop *Result = getRelevantLoop(C->getOperand()); 649 return RelevantLoops[C] = Result; 650 } 651 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 652 const Loop *Result = 653 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 654 getRelevantLoop(D->getRHS()), 655 *SE.DT); 656 return RelevantLoops[D] = Result; 657 } 658 llvm_unreachable("Unexpected SCEV type!"); 659} 660 661namespace { 662 663/// LoopCompare - Compare loops by PickMostRelevantLoop. 664class LoopCompare { 665 DominatorTree &DT; 666public: 667 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 668 669 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 670 std::pair<const Loop *, const SCEV *> RHS) const { 671 // Keep pointer operands sorted at the end. 672 if (LHS.second->getType()->isPointerTy() != 673 RHS.second->getType()->isPointerTy()) 674 return LHS.second->getType()->isPointerTy(); 675 676 // Compare loops with PickMostRelevantLoop. 677 if (LHS.first != RHS.first) 678 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 679 680 // If one operand is a non-constant negative and the other is not, 681 // put the non-constant negative on the right so that a sub can 682 // be used instead of a negate and add. 683 if (LHS.second->isNonConstantNegative()) { 684 if (!RHS.second->isNonConstantNegative()) 685 return false; 686 } else if (RHS.second->isNonConstantNegative()) 687 return true; 688 689 // Otherwise they are equivalent according to this comparison. 690 return false; 691 } 692}; 693 694} 695 696Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 697 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 698 699 // Collect all the add operands in a loop, along with their associated loops. 700 // Iterate in reverse so that constants are emitted last, all else equal, and 701 // so that pointer operands are inserted first, which the code below relies on 702 // to form more involved GEPs. 703 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 704 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 705 E(S->op_begin()); I != E; ++I) 706 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 707 708 // Sort by loop. Use a stable sort so that constants follow non-constants and 709 // pointer operands precede non-pointer operands. 710 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 711 712 // Emit instructions to add all the operands. Hoist as much as possible 713 // out of loops, and form meaningful getelementptrs where possible. 714 Value *Sum = 0; 715 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 716 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 717 const Loop *CurLoop = I->first; 718 const SCEV *Op = I->second; 719 if (!Sum) { 720 // This is the first operand. Just expand it. 721 Sum = expand(Op); 722 ++I; 723 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 724 // The running sum expression is a pointer. Try to form a getelementptr 725 // at this level with that as the base. 726 SmallVector<const SCEV *, 4> NewOps; 727 for (; I != E && I->first == CurLoop; ++I) { 728 // If the operand is SCEVUnknown and not instructions, peek through 729 // it, to enable more of it to be folded into the GEP. 730 const SCEV *X = I->second; 731 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 732 if (!isa<Instruction>(U->getValue())) 733 X = SE.getSCEV(U->getValue()); 734 NewOps.push_back(X); 735 } 736 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 737 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 738 // The running sum is an integer, and there's a pointer at this level. 739 // Try to form a getelementptr. If the running sum is instructions, 740 // use a SCEVUnknown to avoid re-analyzing them. 741 SmallVector<const SCEV *, 4> NewOps; 742 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 743 SE.getSCEV(Sum)); 744 for (++I; I != E && I->first == CurLoop; ++I) 745 NewOps.push_back(I->second); 746 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 747 } else if (Op->isNonConstantNegative()) { 748 // Instead of doing a negate and add, just do a subtract. 749 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 750 Sum = InsertNoopCastOfTo(Sum, Ty); 751 Sum = InsertBinop(Instruction::Sub, Sum, W); 752 ++I; 753 } else { 754 // A simple add. 755 Value *W = expandCodeFor(Op, Ty); 756 Sum = InsertNoopCastOfTo(Sum, Ty); 757 // Canonicalize a constant to the RHS. 758 if (isa<Constant>(Sum)) std::swap(Sum, W); 759 Sum = InsertBinop(Instruction::Add, Sum, W); 760 ++I; 761 } 762 } 763 764 return Sum; 765} 766 767Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 768 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 769 770 // Collect all the mul operands in a loop, along with their associated loops. 771 // Iterate in reverse so that constants are emitted last, all else equal. 772 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 773 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 774 E(S->op_begin()); I != E; ++I) 775 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 776 777 // Sort by loop. Use a stable sort so that constants follow non-constants. 778 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 779 780 // Emit instructions to mul all the operands. Hoist as much as possible 781 // out of loops. 782 Value *Prod = 0; 783 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 784 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 785 const SCEV *Op = I->second; 786 if (!Prod) { 787 // This is the first operand. Just expand it. 788 Prod = expand(Op); 789 ++I; 790 } else if (Op->isAllOnesValue()) { 791 // Instead of doing a multiply by negative one, just do a negate. 792 Prod = InsertNoopCastOfTo(Prod, Ty); 793 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 794 ++I; 795 } else { 796 // A simple mul. 797 Value *W = expandCodeFor(Op, Ty); 798 Prod = InsertNoopCastOfTo(Prod, Ty); 799 // Canonicalize a constant to the RHS. 800 if (isa<Constant>(Prod)) std::swap(Prod, W); 801 Prod = InsertBinop(Instruction::Mul, Prod, W); 802 ++I; 803 } 804 } 805 806 return Prod; 807} 808 809Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 810 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 811 812 Value *LHS = expandCodeFor(S->getLHS(), Ty); 813 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 814 const APInt &RHS = SC->getValue()->getValue(); 815 if (RHS.isPowerOf2()) 816 return InsertBinop(Instruction::LShr, LHS, 817 ConstantInt::get(Ty, RHS.logBase2())); 818 } 819 820 Value *RHS = expandCodeFor(S->getRHS(), Ty); 821 return InsertBinop(Instruction::UDiv, LHS, RHS); 822} 823 824/// Move parts of Base into Rest to leave Base with the minimal 825/// expression that provides a pointer operand suitable for a 826/// GEP expansion. 827static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 828 ScalarEvolution &SE) { 829 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 830 Base = A->getStart(); 831 Rest = SE.getAddExpr(Rest, 832 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 833 A->getStepRecurrence(SE), 834 A->getLoop(), 835 // FIXME: A->getNoWrapFlags(FlagNW) 836 SCEV::FlagAnyWrap)); 837 } 838 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 839 Base = A->getOperand(A->getNumOperands()-1); 840 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 841 NewAddOps.back() = Rest; 842 Rest = SE.getAddExpr(NewAddOps); 843 ExposePointerBase(Base, Rest, SE); 844 } 845} 846 847/// Determine if this is a well-behaved chain of instructions leading back to 848/// the PHI. If so, it may be reused by expanded expressions. 849bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 850 const Loop *L) { 851 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 852 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 853 return false; 854 // If any of the operands don't dominate the insert position, bail. 855 // Addrec operands are always loop-invariant, so this can only happen 856 // if there are instructions which haven't been hoisted. 857 if (L == IVIncInsertLoop) { 858 for (User::op_iterator OI = IncV->op_begin()+1, 859 OE = IncV->op_end(); OI != OE; ++OI) 860 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 861 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 862 return false; 863 } 864 // Advance to the next instruction. 865 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 866 if (!IncV) 867 return false; 868 869 if (IncV->mayHaveSideEffects()) 870 return false; 871 872 if (IncV != PN) 873 return true; 874 875 return isNormalAddRecExprPHI(PN, IncV, L); 876} 877 878/// getIVIncOperand returns an induction variable increment's induction 879/// variable operand. 880/// 881/// If allowScale is set, any type of GEP is allowed as long as the nonIV 882/// operands dominate InsertPos. 883/// 884/// If allowScale is not set, ensure that a GEP increment conforms to one of the 885/// simple patterns generated by getAddRecExprPHILiterally and 886/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 887Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 888 Instruction *InsertPos, 889 bool allowScale) { 890 if (IncV == InsertPos) 891 return NULL; 892 893 switch (IncV->getOpcode()) { 894 default: 895 return NULL; 896 // Check for a simple Add/Sub or GEP of a loop invariant step. 897 case Instruction::Add: 898 case Instruction::Sub: { 899 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 900 if (!OInst || SE.DT->properlyDominates(OInst, InsertPos)) 901 return dyn_cast<Instruction>(IncV->getOperand(0)); 902 return NULL; 903 } 904 case Instruction::BitCast: 905 return dyn_cast<Instruction>(IncV->getOperand(0)); 906 case Instruction::GetElementPtr: 907 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 908 I != E; ++I) { 909 if (isa<Constant>(*I)) 910 continue; 911 if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 912 if (!SE.DT->properlyDominates(OInst, InsertPos)) 913 return NULL; 914 } 915 if (allowScale) { 916 // allow any kind of GEP as long as it can be hoisted. 917 continue; 918 } 919 // This must be a pointer addition of constants (pretty), which is already 920 // handled, or some number of address-size elements (ugly). Ugly geps 921 // have 2 operands. i1* is used by the expander to represent an 922 // address-size element. 923 if (IncV->getNumOperands() != 2) 924 return NULL; 925 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 926 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 927 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 928 return NULL; 929 break; 930 } 931 return dyn_cast<Instruction>(IncV->getOperand(0)); 932 } 933} 934 935/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 936/// it available to other uses in this loop. Recursively hoist any operands, 937/// until we reach a value that dominates InsertPos. 938bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 939 if (SE.DT->properlyDominates(IncV, InsertPos)) 940 return true; 941 942 // InsertPos must itself dominate IncV so that IncV's new position satisfies 943 // its existing users. 944 if (!SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 945 return false; 946 947 // Check that the chain of IV operands leading back to Phi can be hoisted. 948 SmallVector<Instruction*, 4> IVIncs; 949 for(;;) { 950 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 951 if (!Oper) 952 return false; 953 // IncV is safe to hoist. 954 IVIncs.push_back(IncV); 955 IncV = Oper; 956 if (SE.DT->properlyDominates(IncV, InsertPos)) 957 break; 958 } 959 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 960 E = IVIncs.rend(); I != E; ++I) { 961 (*I)->moveBefore(InsertPos); 962 } 963 return true; 964} 965 966/// Determine if this cyclic phi is in a form that would have been generated by 967/// LSR. We don't care if the phi was actually expanded in this pass, as long 968/// as it is in a low-cost form, for example, no implied multiplication. This 969/// should match any patterns generated by getAddRecExprPHILiterally and 970/// expandAddtoGEP. 971bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 972 const Loop *L) { 973 for(Instruction *IVOper = IncV; 974 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 975 /*allowScale=*/false));) { 976 if (IVOper == PN) 977 return true; 978 } 979 return false; 980} 981 982/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 983/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 984/// need to materialize IV increments elsewhere to handle difficult situations. 985Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 986 Type *ExpandTy, Type *IntTy, 987 bool useSubtract) { 988 Value *IncV; 989 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 990 if (ExpandTy->isPointerTy()) { 991 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 992 // If the step isn't constant, don't use an implicitly scaled GEP, because 993 // that would require a multiply inside the loop. 994 if (!isa<ConstantInt>(StepV)) 995 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 996 GEPPtrTy->getAddressSpace()); 997 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 998 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 999 if (IncV->getType() != PN->getType()) { 1000 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1001 rememberInstruction(IncV); 1002 } 1003 } else { 1004 IncV = useSubtract ? 1005 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1006 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1007 rememberInstruction(IncV); 1008 } 1009 return IncV; 1010} 1011 1012/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1013/// the base addrec, which is the addrec without any non-loop-dominating 1014/// values, and return the PHI. 1015PHINode * 1016SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1017 const Loop *L, 1018 Type *ExpandTy, 1019 Type *IntTy) { 1020 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1021 1022 // Reuse a previously-inserted PHI, if present. 1023 BasicBlock *LatchBlock = L->getLoopLatch(); 1024 if (LatchBlock) { 1025 for (BasicBlock::iterator I = L->getHeader()->begin(); 1026 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1027 if (!SE.isSCEVable(PN->getType()) || 1028 (SE.getEffectiveSCEVType(PN->getType()) != 1029 SE.getEffectiveSCEVType(Normalized->getType())) || 1030 SE.getSCEV(PN) != Normalized) 1031 continue; 1032 1033 Instruction *IncV = 1034 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1035 1036 if (LSRMode) { 1037 if (!isExpandedAddRecExprPHI(PN, IncV, L)) 1038 continue; 1039 if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos)) 1040 continue; 1041 } 1042 else { 1043 if (!isNormalAddRecExprPHI(PN, IncV, L)) 1044 continue; 1045 if (L == IVIncInsertLoop) 1046 do { 1047 if (SE.DT->dominates(IncV, IVIncInsertPos)) 1048 break; 1049 // Make sure the increment is where we want it. But don't move it 1050 // down past a potential existing post-inc user. 1051 IncV->moveBefore(IVIncInsertPos); 1052 IVIncInsertPos = IncV; 1053 IncV = cast<Instruction>(IncV->getOperand(0)); 1054 } while (IncV != PN); 1055 } 1056 // Ok, the add recurrence looks usable. 1057 // Remember this PHI, even in post-inc mode. 1058 InsertedValues.insert(PN); 1059 // Remember the increment. 1060 rememberInstruction(IncV); 1061 return PN; 1062 } 1063 } 1064 1065 // Save the original insertion point so we can restore it when we're done. 1066 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1067 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1068 1069 // Another AddRec may need to be recursively expanded below. For example, if 1070 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1071 // loop. Remove this loop from the PostIncLoops set before expanding such 1072 // AddRecs. Otherwise, we cannot find a valid position for the step 1073 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1074 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1075 // so it's not worth implementing SmallPtrSet::swap. 1076 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1077 PostIncLoops.clear(); 1078 1079 // Expand code for the start value. 1080 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1081 L->getHeader()->begin()); 1082 1083 // StartV must be hoisted into L's preheader to dominate the new phi. 1084 assert(!isa<Instruction>(StartV) || 1085 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1086 L->getHeader())); 1087 1088 // Expand code for the step value. Do this before creating the PHI so that PHI 1089 // reuse code doesn't see an incomplete PHI. 1090 const SCEV *Step = Normalized->getStepRecurrence(SE); 1091 // If the stride is negative, insert a sub instead of an add for the increment 1092 // (unless it's a constant, because subtracts of constants are canonicalized 1093 // to adds). 1094 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1095 if (useSubtract) 1096 Step = SE.getNegativeSCEV(Step); 1097 // Expand the step somewhere that dominates the loop header. 1098 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1099 1100 // Create the PHI. 1101 BasicBlock *Header = L->getHeader(); 1102 Builder.SetInsertPoint(Header, Header->begin()); 1103 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1104 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1105 Twine(IVName) + ".iv"); 1106 rememberInstruction(PN); 1107 1108 // Create the step instructions and populate the PHI. 1109 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1110 BasicBlock *Pred = *HPI; 1111 1112 // Add a start value. 1113 if (!L->contains(Pred)) { 1114 PN->addIncoming(StartV, Pred); 1115 continue; 1116 } 1117 1118 // Create a step value and add it to the PHI. 1119 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1120 // instructions at IVIncInsertPos. 1121 Instruction *InsertPos = L == IVIncInsertLoop ? 1122 IVIncInsertPos : Pred->getTerminator(); 1123 Builder.SetInsertPoint(InsertPos); 1124 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1125 1126 PN->addIncoming(IncV, Pred); 1127 } 1128 1129 // Restore the original insert point. 1130 if (SaveInsertBB) 1131 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1132 1133 // After expanding subexpressions, restore the PostIncLoops set so the caller 1134 // can ensure that IVIncrement dominates the current uses. 1135 PostIncLoops = SavedPostIncLoops; 1136 1137 // Remember this PHI, even in post-inc mode. 1138 InsertedValues.insert(PN); 1139 1140 return PN; 1141} 1142 1143Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1144 Type *STy = S->getType(); 1145 Type *IntTy = SE.getEffectiveSCEVType(STy); 1146 const Loop *L = S->getLoop(); 1147 1148 // Determine a normalized form of this expression, which is the expression 1149 // before any post-inc adjustment is made. 1150 const SCEVAddRecExpr *Normalized = S; 1151 if (PostIncLoops.count(L)) { 1152 PostIncLoopSet Loops; 1153 Loops.insert(L); 1154 Normalized = 1155 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1156 Loops, SE, *SE.DT)); 1157 } 1158 1159 // Strip off any non-loop-dominating component from the addrec start. 1160 const SCEV *Start = Normalized->getStart(); 1161 const SCEV *PostLoopOffset = 0; 1162 if (!SE.properlyDominates(Start, L->getHeader())) { 1163 PostLoopOffset = Start; 1164 Start = SE.getConstant(Normalized->getType(), 0); 1165 Normalized = cast<SCEVAddRecExpr>( 1166 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1167 Normalized->getLoop(), 1168 // FIXME: Normalized->getNoWrapFlags(FlagNW) 1169 SCEV::FlagAnyWrap)); 1170 } 1171 1172 // Strip off any non-loop-dominating component from the addrec step. 1173 const SCEV *Step = Normalized->getStepRecurrence(SE); 1174 const SCEV *PostLoopScale = 0; 1175 if (!SE.dominates(Step, L->getHeader())) { 1176 PostLoopScale = Step; 1177 Step = SE.getConstant(Normalized->getType(), 1); 1178 Normalized = 1179 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 1180 Normalized->getLoop(), 1181 // FIXME: Normalized 1182 // ->getNoWrapFlags(FlagNW) 1183 SCEV::FlagAnyWrap)); 1184 } 1185 1186 // Expand the core addrec. If we need post-loop scaling, force it to 1187 // expand to an integer type to avoid the need for additional casting. 1188 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1189 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1190 1191 // Accommodate post-inc mode, if necessary. 1192 Value *Result; 1193 if (!PostIncLoops.count(L)) 1194 Result = PN; 1195 else { 1196 // In PostInc mode, use the post-incremented value. 1197 BasicBlock *LatchBlock = L->getLoopLatch(); 1198 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1199 Result = PN->getIncomingValueForBlock(LatchBlock); 1200 1201 // For an expansion to use the postinc form, the client must call 1202 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1203 // or dominated by IVIncInsertPos. 1204 if (isa<Instruction>(Result) 1205 && !SE.DT->dominates(cast<Instruction>(Result), 1206 Builder.GetInsertPoint())) { 1207 // The induction variable's postinc expansion does not dominate this use. 1208 // IVUsers tries to prevent this case, so it is rare. However, it can 1209 // happen when an IVUser outside the loop is not dominated by the latch 1210 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1211 // all cases. Consider a phi outide whose operand is replaced during 1212 // expansion with the value of the postinc user. Without fundamentally 1213 // changing the way postinc users are tracked, the only remedy is 1214 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1215 // but hopefully expandCodeFor handles that. 1216 bool useSubtract = 1217 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1218 if (useSubtract) 1219 Step = SE.getNegativeSCEV(Step); 1220 // Expand the step somewhere that dominates the loop header. 1221 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1222 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1223 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1224 // Restore the insertion point to the place where the caller has 1225 // determined dominates all uses. 1226 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1227 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1228 } 1229 } 1230 1231 // Re-apply any non-loop-dominating scale. 1232 if (PostLoopScale) { 1233 Result = InsertNoopCastOfTo(Result, IntTy); 1234 Result = Builder.CreateMul(Result, 1235 expandCodeFor(PostLoopScale, IntTy)); 1236 rememberInstruction(Result); 1237 } 1238 1239 // Re-apply any non-loop-dominating offset. 1240 if (PostLoopOffset) { 1241 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1242 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1243 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1244 } else { 1245 Result = InsertNoopCastOfTo(Result, IntTy); 1246 Result = Builder.CreateAdd(Result, 1247 expandCodeFor(PostLoopOffset, IntTy)); 1248 rememberInstruction(Result); 1249 } 1250 } 1251 1252 return Result; 1253} 1254 1255Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1256 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1257 1258 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1259 const Loop *L = S->getLoop(); 1260 1261 // First check for an existing canonical IV in a suitable type. 1262 PHINode *CanonicalIV = 0; 1263 if (PHINode *PN = L->getCanonicalInductionVariable()) 1264 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1265 CanonicalIV = PN; 1266 1267 // Rewrite an AddRec in terms of the canonical induction variable, if 1268 // its type is more narrow. 1269 if (CanonicalIV && 1270 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1271 SE.getTypeSizeInBits(Ty)) { 1272 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1273 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1274 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1275 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1276 // FIXME: S->getNoWrapFlags(FlagNW) 1277 SCEV::FlagAnyWrap)); 1278 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1279 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1280 BasicBlock::iterator NewInsertPt = 1281 llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1282 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1283 isa<LandingPadInst>(NewInsertPt)) 1284 ++NewInsertPt; 1285 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1286 NewInsertPt); 1287 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1288 return V; 1289 } 1290 1291 // {X,+,F} --> X + {0,+,F} 1292 if (!S->getStart()->isZero()) { 1293 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1294 NewOps[0] = SE.getConstant(Ty, 0); 1295 // FIXME: can use S->getNoWrapFlags() 1296 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap); 1297 1298 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1299 // comments on expandAddToGEP for details. 1300 const SCEV *Base = S->getStart(); 1301 const SCEV *RestArray[1] = { Rest }; 1302 // Dig into the expression to find the pointer base for a GEP. 1303 ExposePointerBase(Base, RestArray[0], SE); 1304 // If we found a pointer, expand the AddRec with a GEP. 1305 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1306 // Make sure the Base isn't something exotic, such as a multiplied 1307 // or divided pointer value. In those cases, the result type isn't 1308 // actually a pointer type. 1309 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1310 Value *StartV = expand(Base); 1311 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1312 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1313 } 1314 } 1315 1316 // Just do a normal add. Pre-expand the operands to suppress folding. 1317 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1318 SE.getUnknown(expand(Rest)))); 1319 } 1320 1321 // If we don't yet have a canonical IV, create one. 1322 if (!CanonicalIV) { 1323 // Create and insert the PHI node for the induction variable in the 1324 // specified loop. 1325 BasicBlock *Header = L->getHeader(); 1326 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1327 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1328 Header->begin()); 1329 rememberInstruction(CanonicalIV); 1330 1331 Constant *One = ConstantInt::get(Ty, 1); 1332 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1333 BasicBlock *HP = *HPI; 1334 if (L->contains(HP)) { 1335 // Insert a unit add instruction right before the terminator 1336 // corresponding to the back-edge. 1337 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1338 "indvar.next", 1339 HP->getTerminator()); 1340 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1341 rememberInstruction(Add); 1342 CanonicalIV->addIncoming(Add, HP); 1343 } else { 1344 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1345 } 1346 } 1347 } 1348 1349 // {0,+,1} --> Insert a canonical induction variable into the loop! 1350 if (S->isAffine() && S->getOperand(1)->isOne()) { 1351 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1352 "IVs with types different from the canonical IV should " 1353 "already have been handled!"); 1354 return CanonicalIV; 1355 } 1356 1357 // {0,+,F} --> {0,+,1} * F 1358 1359 // If this is a simple linear addrec, emit it now as a special case. 1360 if (S->isAffine()) // {0,+,F} --> i*F 1361 return 1362 expand(SE.getTruncateOrNoop( 1363 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1364 SE.getNoopOrAnyExtend(S->getOperand(1), 1365 CanonicalIV->getType())), 1366 Ty)); 1367 1368 // If this is a chain of recurrences, turn it into a closed form, using the 1369 // folders, then expandCodeFor the closed form. This allows the folders to 1370 // simplify the expression without having to build a bunch of special code 1371 // into this folder. 1372 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1373 1374 // Promote S up to the canonical IV type, if the cast is foldable. 1375 const SCEV *NewS = S; 1376 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1377 if (isa<SCEVAddRecExpr>(Ext)) 1378 NewS = Ext; 1379 1380 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1381 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1382 1383 // Truncate the result down to the original type, if needed. 1384 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1385 return expand(T); 1386} 1387 1388Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1389 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1390 Value *V = expandCodeFor(S->getOperand(), 1391 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1392 Value *I = Builder.CreateTrunc(V, Ty); 1393 rememberInstruction(I); 1394 return I; 1395} 1396 1397Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1398 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1399 Value *V = expandCodeFor(S->getOperand(), 1400 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1401 Value *I = Builder.CreateZExt(V, Ty); 1402 rememberInstruction(I); 1403 return I; 1404} 1405 1406Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1407 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1408 Value *V = expandCodeFor(S->getOperand(), 1409 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1410 Value *I = Builder.CreateSExt(V, Ty); 1411 rememberInstruction(I); 1412 return I; 1413} 1414 1415Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1416 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1417 Type *Ty = LHS->getType(); 1418 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1419 // In the case of mixed integer and pointer types, do the 1420 // rest of the comparisons as integer. 1421 if (S->getOperand(i)->getType() != Ty) { 1422 Ty = SE.getEffectiveSCEVType(Ty); 1423 LHS = InsertNoopCastOfTo(LHS, Ty); 1424 } 1425 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1426 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1427 rememberInstruction(ICmp); 1428 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1429 rememberInstruction(Sel); 1430 LHS = Sel; 1431 } 1432 // In the case of mixed integer and pointer types, cast the 1433 // final result back to the pointer type. 1434 if (LHS->getType() != S->getType()) 1435 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1436 return LHS; 1437} 1438 1439Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1440 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1441 Type *Ty = LHS->getType(); 1442 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1443 // In the case of mixed integer and pointer types, do the 1444 // rest of the comparisons as integer. 1445 if (S->getOperand(i)->getType() != Ty) { 1446 Ty = SE.getEffectiveSCEVType(Ty); 1447 LHS = InsertNoopCastOfTo(LHS, Ty); 1448 } 1449 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1450 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1451 rememberInstruction(ICmp); 1452 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1453 rememberInstruction(Sel); 1454 LHS = Sel; 1455 } 1456 // In the case of mixed integer and pointer types, cast the 1457 // final result back to the pointer type. 1458 if (LHS->getType() != S->getType()) 1459 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1460 return LHS; 1461} 1462 1463Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1464 Instruction *IP) { 1465 Builder.SetInsertPoint(IP->getParent(), IP); 1466 return expandCodeFor(SH, Ty); 1467} 1468 1469Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1470 // Expand the code for this SCEV. 1471 Value *V = expand(SH); 1472 if (Ty) { 1473 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1474 "non-trivial casts should be done with the SCEVs directly!"); 1475 V = InsertNoopCastOfTo(V, Ty); 1476 } 1477 return V; 1478} 1479 1480Value *SCEVExpander::expand(const SCEV *S) { 1481 // Compute an insertion point for this SCEV object. Hoist the instructions 1482 // as far out in the loop nest as possible. 1483 Instruction *InsertPt = Builder.GetInsertPoint(); 1484 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1485 L = L->getParentLoop()) 1486 if (SE.isLoopInvariant(S, L)) { 1487 if (!L) break; 1488 if (BasicBlock *Preheader = L->getLoopPreheader()) 1489 InsertPt = Preheader->getTerminator(); 1490 else { 1491 // LSR sets the insertion point for AddRec start/step values to the 1492 // block start to simplify value reuse, even though it's an invalid 1493 // position. SCEVExpander must correct for this in all cases. 1494 InsertPt = L->getHeader()->getFirstInsertionPt(); 1495 } 1496 } else { 1497 // If the SCEV is computable at this level, insert it into the header 1498 // after the PHIs (and after any other instructions that we've inserted 1499 // there) so that it is guaranteed to dominate any user inside the loop. 1500 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1501 InsertPt = L->getHeader()->getFirstInsertionPt(); 1502 while (InsertPt != Builder.GetInsertPoint() 1503 && (isInsertedInstruction(InsertPt) 1504 || isa<DbgInfoIntrinsic>(InsertPt))) { 1505 InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1506 } 1507 break; 1508 } 1509 1510 // Check to see if we already expanded this here. 1511 std::map<std::pair<const SCEV *, Instruction *>, 1512 AssertingVH<Value> >::iterator I = 1513 InsertedExpressions.find(std::make_pair(S, InsertPt)); 1514 if (I != InsertedExpressions.end()) 1515 return I->second; 1516 1517 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1518 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1519 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1520 1521 // Expand the expression into instructions. 1522 Value *V = visit(S); 1523 1524 // Remember the expanded value for this SCEV at this location. 1525 // 1526 // This is independent of PostIncLoops. The mapped value simply materializes 1527 // the expression at this insertion point. If the mapped value happened to be 1528 // a postinc expansion, it could be reused by a non postinc user, but only if 1529 // its insertion point was already at the head of the loop. 1530 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1531 1532 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1533 return V; 1534} 1535 1536void SCEVExpander::rememberInstruction(Value *I) { 1537 if (!PostIncLoops.empty()) 1538 InsertedPostIncValues.insert(I); 1539 else 1540 InsertedValues.insert(I); 1541} 1542 1543void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { 1544 Builder.SetInsertPoint(BB, I); 1545} 1546 1547/// getOrInsertCanonicalInductionVariable - This method returns the 1548/// canonical induction variable of the specified type for the specified 1549/// loop (inserting one if there is none). A canonical induction variable 1550/// starts at zero and steps by one on each iteration. 1551PHINode * 1552SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1553 Type *Ty) { 1554 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1555 1556 // Build a SCEV for {0,+,1}<L>. 1557 // Conservatively use FlagAnyWrap for now. 1558 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1559 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1560 1561 // Emit code for it. 1562 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1563 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1564 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1565 if (SaveInsertBB) 1566 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1567 1568 return V; 1569} 1570 1571/// Sort values by integer width for replaceCongruentIVs. 1572static bool width_descending(Value *lhs, Value *rhs) { 1573 // Put pointers at the back and make sure pointer < pointer = false. 1574 if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy()) 1575 return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy(); 1576 return rhs->getType()->getPrimitiveSizeInBits() 1577 < lhs->getType()->getPrimitiveSizeInBits(); 1578} 1579 1580/// replaceCongruentIVs - Check for congruent phis in this loop header and 1581/// replace them with their most canonical representative. Return the number of 1582/// phis eliminated. 1583/// 1584/// This does not depend on any SCEVExpander state but should be used in 1585/// the same context that SCEVExpander is used. 1586unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1587 SmallVectorImpl<WeakVH> &DeadInsts, 1588 const TargetLowering *TLI) { 1589 // Find integer phis in order of increasing width. 1590 SmallVector<PHINode*, 8> Phis; 1591 for (BasicBlock::iterator I = L->getHeader()->begin(); 1592 PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1593 Phis.push_back(Phi); 1594 } 1595 if (TLI) 1596 std::sort(Phis.begin(), Phis.end(), width_descending); 1597 1598 unsigned NumElim = 0; 1599 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1600 // Process phis from wide to narrow. Mapping wide phis to the their truncation 1601 // so narrow phis can reuse them. 1602 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1603 PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1604 PHINode *Phi = *PIter; 1605 1606 if (!SE.isSCEVable(Phi->getType())) 1607 continue; 1608 1609 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1610 if (!OrigPhiRef) { 1611 OrigPhiRef = Phi; 1612 if (Phi->getType()->isIntegerTy() && TLI 1613 && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1614 // This phi can be freely truncated to the narrowest phi type. Map the 1615 // truncated expression to it so it will be reused for narrow types. 1616 const SCEV *TruncExpr = 1617 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1618 ExprToIVMap[TruncExpr] = Phi; 1619 } 1620 continue; 1621 } 1622 1623 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1624 // sense. 1625 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1626 continue; 1627 1628 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1629 Instruction *OrigInc = 1630 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1631 Instruction *IsomorphicInc = 1632 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1633 1634 // If this phi has the same width but is more canonical, replace the 1635 // original with it. As part of the "more canonical" determination, 1636 // respect a prior decision to use an IV chain. 1637 if (OrigPhiRef->getType() == Phi->getType() 1638 && !(ChainedPhis.count(Phi) 1639 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1640 && (ChainedPhis.count(Phi) 1641 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1642 std::swap(OrigPhiRef, Phi); 1643 std::swap(OrigInc, IsomorphicInc); 1644 } 1645 // Replacing the congruent phi is sufficient because acyclic redundancy 1646 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1647 // that a phi is congruent, it's often the head of an IV user cycle that 1648 // is isomorphic with the original phi. It's worth eagerly cleaning up the 1649 // common case of a single IV increment so that DeleteDeadPHIs can remove 1650 // cycles that had postinc uses. 1651 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1652 IsomorphicInc->getType()); 1653 if (OrigInc != IsomorphicInc 1654 && TruncExpr == SE.getSCEV(IsomorphicInc) 1655 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1656 || hoistIVInc(OrigInc, IsomorphicInc))) { 1657 DEBUG_WITH_TYPE(DebugType, dbgs() 1658 << "INDVARS: Eliminated congruent iv.inc: " 1659 << *IsomorphicInc << '\n'); 1660 Value *NewInc = OrigInc; 1661 if (OrigInc->getType() != IsomorphicInc->getType()) { 1662 Instruction *IP = isa<PHINode>(OrigInc) 1663 ? (Instruction*)L->getHeader()->getFirstInsertionPt() 1664 : OrigInc->getNextNode(); 1665 IRBuilder<> Builder(IP); 1666 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1667 NewInc = Builder. 1668 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1669 } 1670 IsomorphicInc->replaceAllUsesWith(NewInc); 1671 DeadInsts.push_back(IsomorphicInc); 1672 } 1673 } 1674 DEBUG_WITH_TYPE(DebugType, dbgs() 1675 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1676 ++NumElim; 1677 Value *NewIV = OrigPhiRef; 1678 if (OrigPhiRef->getType() != Phi->getType()) { 1679 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1680 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1681 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1682 } 1683 Phi->replaceAllUsesWith(NewIV); 1684 DeadInsts.push_back(Phi); 1685 } 1686 return NumElim; 1687} 1688