ScalarEvolutionExpander.cpp revision f8fd841a4bb7a59f81cf4642169e8251e039acfe
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/Analysis/LoopInfo.h" 18#include "llvm/IntrinsicInst.h" 19#include "llvm/LLVMContext.h" 20#include "llvm/Support/Debug.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/ADT/STLExtras.h" 23 24using namespace llvm; 25 26/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 27/// reusing an existing cast if a suitable one exists, moving an existing 28/// cast if a suitable one exists but isn't in the right place, or 29/// creating a new one. 30Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 31 Instruction::CastOps Op, 32 BasicBlock::iterator IP) { 33 // Check to see if there is already a cast! 34 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 35 UI != E; ++UI) { 36 User *U = *UI; 37 if (U->getType() == Ty) 38 if (CastInst *CI = dyn_cast<CastInst>(U)) 39 if (CI->getOpcode() == Op) { 40 // If the cast isn't where we want it, fix it. 41 if (BasicBlock::iterator(CI) != IP) { 42 // Create a new cast, and leave the old cast in place in case 43 // it is being used as an insert point. Clear its operand 44 // so that it doesn't hold anything live. 45 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP); 46 NewCI->takeName(CI); 47 CI->replaceAllUsesWith(NewCI); 48 CI->setOperand(0, UndefValue::get(V->getType())); 49 rememberInstruction(NewCI); 50 return NewCI; 51 } 52 rememberInstruction(CI); 53 return CI; 54 } 55 } 56 57 // Create a new cast. 58 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP); 59 rememberInstruction(I); 60 return I; 61} 62 63/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 64/// which must be possible with a noop cast, doing what we can to share 65/// the casts. 66Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 67 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 68 assert((Op == Instruction::BitCast || 69 Op == Instruction::PtrToInt || 70 Op == Instruction::IntToPtr) && 71 "InsertNoopCastOfTo cannot perform non-noop casts!"); 72 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 73 "InsertNoopCastOfTo cannot change sizes!"); 74 75 // Short-circuit unnecessary bitcasts. 76 if (Op == Instruction::BitCast) { 77 if (V->getType() == Ty) 78 return V; 79 if (CastInst *CI = dyn_cast<CastInst>(V)) { 80 if (CI->getOperand(0)->getType() == Ty) 81 return CI->getOperand(0); 82 } 83 } 84 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 85 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 86 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 87 if (CastInst *CI = dyn_cast<CastInst>(V)) 88 if ((CI->getOpcode() == Instruction::PtrToInt || 89 CI->getOpcode() == Instruction::IntToPtr) && 90 SE.getTypeSizeInBits(CI->getType()) == 91 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 92 return CI->getOperand(0); 93 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 94 if ((CE->getOpcode() == Instruction::PtrToInt || 95 CE->getOpcode() == Instruction::IntToPtr) && 96 SE.getTypeSizeInBits(CE->getType()) == 97 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 98 return CE->getOperand(0); 99 } 100 101 // Fold a cast of a constant. 102 if (Constant *C = dyn_cast<Constant>(V)) 103 return ConstantExpr::getCast(Op, C, Ty); 104 105 // Cast the argument at the beginning of the entry block, after 106 // any bitcasts of other arguments. 107 if (Argument *A = dyn_cast<Argument>(V)) { 108 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 109 while ((isa<BitCastInst>(IP) && 110 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 111 cast<BitCastInst>(IP)->getOperand(0) != A) || 112 isa<DbgInfoIntrinsic>(IP) || 113 isa<LandingPadInst>(IP)) 114 ++IP; 115 return ReuseOrCreateCast(A, Ty, Op, IP); 116 } 117 118 // Cast the instruction immediately after the instruction. 119 Instruction *I = cast<Instruction>(V); 120 BasicBlock::iterator IP = I; ++IP; 121 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 122 IP = II->getNormalDest()->begin(); 123 while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP) || 124 isa<LandingPadInst>(IP)) 125 ++IP; 126 return ReuseOrCreateCast(I, Ty, Op, IP); 127} 128 129/// InsertBinop - Insert the specified binary operator, doing a small amount 130/// of work to avoid inserting an obviously redundant operation. 131Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 132 Value *LHS, Value *RHS) { 133 // Fold a binop with constant operands. 134 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 135 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 136 return ConstantExpr::get(Opcode, CLHS, CRHS); 137 138 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 139 unsigned ScanLimit = 6; 140 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 141 // Scanning starts from the last instruction before the insertion point. 142 BasicBlock::iterator IP = Builder.GetInsertPoint(); 143 if (IP != BlockBegin) { 144 --IP; 145 for (; ScanLimit; --IP, --ScanLimit) { 146 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 147 // generated code. 148 if (isa<DbgInfoIntrinsic>(IP)) 149 ScanLimit++; 150 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 151 IP->getOperand(1) == RHS) 152 return IP; 153 if (IP == BlockBegin) break; 154 } 155 } 156 157 // Save the original insertion point so we can restore it when we're done. 158 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 159 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 160 161 // Move the insertion point out of as many loops as we can. 162 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 163 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 164 BasicBlock *Preheader = L->getLoopPreheader(); 165 if (!Preheader) break; 166 167 // Ok, move up a level. 168 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 169 } 170 171 // If we haven't found this binop, insert it. 172 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 173 BO->setDebugLoc(SaveInsertPt->getDebugLoc()); 174 rememberInstruction(BO); 175 176 // Restore the original insert point. 177 if (SaveInsertBB) 178 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 179 180 return BO; 181} 182 183/// FactorOutConstant - Test if S is divisible by Factor, using signed 184/// division. If so, update S with Factor divided out and return true. 185/// S need not be evenly divisible if a reasonable remainder can be 186/// computed. 187/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 188/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 189/// check to see if the divide was folded. 190static bool FactorOutConstant(const SCEV *&S, 191 const SCEV *&Remainder, 192 const SCEV *Factor, 193 ScalarEvolution &SE, 194 const TargetData *TD) { 195 // Everything is divisible by one. 196 if (Factor->isOne()) 197 return true; 198 199 // x/x == 1. 200 if (S == Factor) { 201 S = SE.getConstant(S->getType(), 1); 202 return true; 203 } 204 205 // For a Constant, check for a multiple of the given factor. 206 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 207 // 0/x == 0. 208 if (C->isZero()) 209 return true; 210 // Check for divisibility. 211 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 212 ConstantInt *CI = 213 ConstantInt::get(SE.getContext(), 214 C->getValue()->getValue().sdiv( 215 FC->getValue()->getValue())); 216 // If the quotient is zero and the remainder is non-zero, reject 217 // the value at this scale. It will be considered for subsequent 218 // smaller scales. 219 if (!CI->isZero()) { 220 const SCEV *Div = SE.getConstant(CI); 221 S = Div; 222 Remainder = 223 SE.getAddExpr(Remainder, 224 SE.getConstant(C->getValue()->getValue().srem( 225 FC->getValue()->getValue()))); 226 return true; 227 } 228 } 229 } 230 231 // In a Mul, check if there is a constant operand which is a multiple 232 // of the given factor. 233 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 234 if (TD) { 235 // With TargetData, the size is known. Check if there is a constant 236 // operand which is a multiple of the given factor. If so, we can 237 // factor it. 238 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 239 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 240 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 241 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 242 NewMulOps[0] = 243 SE.getConstant(C->getValue()->getValue().sdiv( 244 FC->getValue()->getValue())); 245 S = SE.getMulExpr(NewMulOps); 246 return true; 247 } 248 } else { 249 // Without TargetData, check if Factor can be factored out of any of the 250 // Mul's operands. If so, we can just remove it. 251 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 252 const SCEV *SOp = M->getOperand(i); 253 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 254 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 255 Remainder->isZero()) { 256 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 257 NewMulOps[i] = SOp; 258 S = SE.getMulExpr(NewMulOps); 259 return true; 260 } 261 } 262 } 263 } 264 265 // In an AddRec, check if both start and step are divisible. 266 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 267 const SCEV *Step = A->getStepRecurrence(SE); 268 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 269 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 270 return false; 271 if (!StepRem->isZero()) 272 return false; 273 const SCEV *Start = A->getStart(); 274 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 275 return false; 276 // FIXME: can use A->getNoWrapFlags(FlagNW) 277 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap); 278 return true; 279 } 280 281 return false; 282} 283 284/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 285/// is the number of SCEVAddRecExprs present, which are kept at the end of 286/// the list. 287/// 288static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 289 Type *Ty, 290 ScalarEvolution &SE) { 291 unsigned NumAddRecs = 0; 292 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 293 ++NumAddRecs; 294 // Group Ops into non-addrecs and addrecs. 295 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 296 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 297 // Let ScalarEvolution sort and simplify the non-addrecs list. 298 const SCEV *Sum = NoAddRecs.empty() ? 299 SE.getConstant(Ty, 0) : 300 SE.getAddExpr(NoAddRecs); 301 // If it returned an add, use the operands. Otherwise it simplified 302 // the sum into a single value, so just use that. 303 Ops.clear(); 304 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 305 Ops.append(Add->op_begin(), Add->op_end()); 306 else if (!Sum->isZero()) 307 Ops.push_back(Sum); 308 // Then append the addrecs. 309 Ops.append(AddRecs.begin(), AddRecs.end()); 310} 311 312/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 313/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 314/// This helps expose more opportunities for folding parts of the expressions 315/// into GEP indices. 316/// 317static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 318 Type *Ty, 319 ScalarEvolution &SE) { 320 // Find the addrecs. 321 SmallVector<const SCEV *, 8> AddRecs; 322 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 323 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 324 const SCEV *Start = A->getStart(); 325 if (Start->isZero()) break; 326 const SCEV *Zero = SE.getConstant(Ty, 0); 327 AddRecs.push_back(SE.getAddRecExpr(Zero, 328 A->getStepRecurrence(SE), 329 A->getLoop(), 330 // FIXME: A->getNoWrapFlags(FlagNW) 331 SCEV::FlagAnyWrap)); 332 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 333 Ops[i] = Zero; 334 Ops.append(Add->op_begin(), Add->op_end()); 335 e += Add->getNumOperands(); 336 } else { 337 Ops[i] = Start; 338 } 339 } 340 if (!AddRecs.empty()) { 341 // Add the addrecs onto the end of the list. 342 Ops.append(AddRecs.begin(), AddRecs.end()); 343 // Resort the operand list, moving any constants to the front. 344 SimplifyAddOperands(Ops, Ty, SE); 345 } 346} 347 348/// expandAddToGEP - Expand an addition expression with a pointer type into 349/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 350/// BasicAliasAnalysis and other passes analyze the result. See the rules 351/// for getelementptr vs. inttoptr in 352/// http://llvm.org/docs/LangRef.html#pointeraliasing 353/// for details. 354/// 355/// Design note: The correctness of using getelementptr here depends on 356/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 357/// they may introduce pointer arithmetic which may not be safely converted 358/// into getelementptr. 359/// 360/// Design note: It might seem desirable for this function to be more 361/// loop-aware. If some of the indices are loop-invariant while others 362/// aren't, it might seem desirable to emit multiple GEPs, keeping the 363/// loop-invariant portions of the overall computation outside the loop. 364/// However, there are a few reasons this is not done here. Hoisting simple 365/// arithmetic is a low-level optimization that often isn't very 366/// important until late in the optimization process. In fact, passes 367/// like InstructionCombining will combine GEPs, even if it means 368/// pushing loop-invariant computation down into loops, so even if the 369/// GEPs were split here, the work would quickly be undone. The 370/// LoopStrengthReduction pass, which is usually run quite late (and 371/// after the last InstructionCombining pass), takes care of hoisting 372/// loop-invariant portions of expressions, after considering what 373/// can be folded using target addressing modes. 374/// 375Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 376 const SCEV *const *op_end, 377 PointerType *PTy, 378 Type *Ty, 379 Value *V) { 380 Type *ElTy = PTy->getElementType(); 381 SmallVector<Value *, 4> GepIndices; 382 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 383 bool AnyNonZeroIndices = false; 384 385 // Split AddRecs up into parts as either of the parts may be usable 386 // without the other. 387 SplitAddRecs(Ops, Ty, SE); 388 389 // Descend down the pointer's type and attempt to convert the other 390 // operands into GEP indices, at each level. The first index in a GEP 391 // indexes into the array implied by the pointer operand; the rest of 392 // the indices index into the element or field type selected by the 393 // preceding index. 394 for (;;) { 395 // If the scale size is not 0, attempt to factor out a scale for 396 // array indexing. 397 SmallVector<const SCEV *, 8> ScaledOps; 398 if (ElTy->isSized()) { 399 const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 400 if (!ElSize->isZero()) { 401 SmallVector<const SCEV *, 8> NewOps; 402 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 403 const SCEV *Op = Ops[i]; 404 const SCEV *Remainder = SE.getConstant(Ty, 0); 405 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 406 // Op now has ElSize factored out. 407 ScaledOps.push_back(Op); 408 if (!Remainder->isZero()) 409 NewOps.push_back(Remainder); 410 AnyNonZeroIndices = true; 411 } else { 412 // The operand was not divisible, so add it to the list of operands 413 // we'll scan next iteration. 414 NewOps.push_back(Ops[i]); 415 } 416 } 417 // If we made any changes, update Ops. 418 if (!ScaledOps.empty()) { 419 Ops = NewOps; 420 SimplifyAddOperands(Ops, Ty, SE); 421 } 422 } 423 } 424 425 // Record the scaled array index for this level of the type. If 426 // we didn't find any operands that could be factored, tentatively 427 // assume that element zero was selected (since the zero offset 428 // would obviously be folded away). 429 Value *Scaled = ScaledOps.empty() ? 430 Constant::getNullValue(Ty) : 431 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 432 GepIndices.push_back(Scaled); 433 434 // Collect struct field index operands. 435 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 436 bool FoundFieldNo = false; 437 // An empty struct has no fields. 438 if (STy->getNumElements() == 0) break; 439 if (SE.TD) { 440 // With TargetData, field offsets are known. See if a constant offset 441 // falls within any of the struct fields. 442 if (Ops.empty()) break; 443 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 444 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 445 const StructLayout &SL = *SE.TD->getStructLayout(STy); 446 uint64_t FullOffset = C->getValue()->getZExtValue(); 447 if (FullOffset < SL.getSizeInBytes()) { 448 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 449 GepIndices.push_back( 450 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 451 ElTy = STy->getTypeAtIndex(ElIdx); 452 Ops[0] = 453 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 454 AnyNonZeroIndices = true; 455 FoundFieldNo = true; 456 } 457 } 458 } else { 459 // Without TargetData, just check for an offsetof expression of the 460 // appropriate struct type. 461 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 462 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 463 Type *CTy; 464 Constant *FieldNo; 465 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 466 GepIndices.push_back(FieldNo); 467 ElTy = 468 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 469 Ops[i] = SE.getConstant(Ty, 0); 470 AnyNonZeroIndices = true; 471 FoundFieldNo = true; 472 break; 473 } 474 } 475 } 476 // If no struct field offsets were found, tentatively assume that 477 // field zero was selected (since the zero offset would obviously 478 // be folded away). 479 if (!FoundFieldNo) { 480 ElTy = STy->getTypeAtIndex(0u); 481 GepIndices.push_back( 482 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 483 } 484 } 485 486 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 487 ElTy = ATy->getElementType(); 488 else 489 break; 490 } 491 492 // If none of the operands were convertible to proper GEP indices, cast 493 // the base to i8* and do an ugly getelementptr with that. It's still 494 // better than ptrtoint+arithmetic+inttoptr at least. 495 if (!AnyNonZeroIndices) { 496 // Cast the base to i8*. 497 V = InsertNoopCastOfTo(V, 498 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 499 500 // Expand the operands for a plain byte offset. 501 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 502 503 // Fold a GEP with constant operands. 504 if (Constant *CLHS = dyn_cast<Constant>(V)) 505 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 506 return ConstantExpr::getGetElementPtr(CLHS, CRHS); 507 508 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 509 unsigned ScanLimit = 6; 510 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 511 // Scanning starts from the last instruction before the insertion point. 512 BasicBlock::iterator IP = Builder.GetInsertPoint(); 513 if (IP != BlockBegin) { 514 --IP; 515 for (; ScanLimit; --IP, --ScanLimit) { 516 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 517 // generated code. 518 if (isa<DbgInfoIntrinsic>(IP)) 519 ScanLimit++; 520 if (IP->getOpcode() == Instruction::GetElementPtr && 521 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 522 return IP; 523 if (IP == BlockBegin) break; 524 } 525 } 526 527 // Save the original insertion point so we can restore it when we're done. 528 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 529 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 530 531 // Move the insertion point out of as many loops as we can. 532 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 533 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 534 BasicBlock *Preheader = L->getLoopPreheader(); 535 if (!Preheader) break; 536 537 // Ok, move up a level. 538 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 539 } 540 541 // Emit a GEP. 542 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 543 rememberInstruction(GEP); 544 545 // Restore the original insert point. 546 if (SaveInsertBB) 547 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 548 549 return GEP; 550 } 551 552 // Save the original insertion point so we can restore it when we're done. 553 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 554 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 555 556 // Move the insertion point out of as many loops as we can. 557 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 558 if (!L->isLoopInvariant(V)) break; 559 560 bool AnyIndexNotLoopInvariant = false; 561 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 562 E = GepIndices.end(); I != E; ++I) 563 if (!L->isLoopInvariant(*I)) { 564 AnyIndexNotLoopInvariant = true; 565 break; 566 } 567 if (AnyIndexNotLoopInvariant) 568 break; 569 570 BasicBlock *Preheader = L->getLoopPreheader(); 571 if (!Preheader) break; 572 573 // Ok, move up a level. 574 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 575 } 576 577 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 578 // because ScalarEvolution may have changed the address arithmetic to 579 // compute a value which is beyond the end of the allocated object. 580 Value *Casted = V; 581 if (V->getType() != PTy) 582 Casted = InsertNoopCastOfTo(Casted, PTy); 583 Value *GEP = Builder.CreateGEP(Casted, 584 GepIndices, 585 "scevgep"); 586 Ops.push_back(SE.getUnknown(GEP)); 587 rememberInstruction(GEP); 588 589 // Restore the original insert point. 590 if (SaveInsertBB) 591 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 592 593 return expand(SE.getAddExpr(Ops)); 594} 595 596/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 597/// SCEV expansion. If they are nested, this is the most nested. If they are 598/// neighboring, pick the later. 599static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 600 DominatorTree &DT) { 601 if (!A) return B; 602 if (!B) return A; 603 if (A->contains(B)) return B; 604 if (B->contains(A)) return A; 605 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 606 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 607 return A; // Arbitrarily break the tie. 608} 609 610/// getRelevantLoop - Get the most relevant loop associated with the given 611/// expression, according to PickMostRelevantLoop. 612const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 613 // Test whether we've already computed the most relevant loop for this SCEV. 614 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 615 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 616 if (!Pair.second) 617 return Pair.first->second; 618 619 if (isa<SCEVConstant>(S)) 620 // A constant has no relevant loops. 621 return 0; 622 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 623 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 624 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 625 // A non-instruction has no relevant loops. 626 return 0; 627 } 628 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 629 const Loop *L = 0; 630 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 631 L = AR->getLoop(); 632 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 633 I != E; ++I) 634 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 635 return RelevantLoops[N] = L; 636 } 637 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 638 const Loop *Result = getRelevantLoop(C->getOperand()); 639 return RelevantLoops[C] = Result; 640 } 641 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 642 const Loop *Result = 643 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 644 getRelevantLoop(D->getRHS()), 645 *SE.DT); 646 return RelevantLoops[D] = Result; 647 } 648 llvm_unreachable("Unexpected SCEV type!"); 649 return 0; 650} 651 652namespace { 653 654/// LoopCompare - Compare loops by PickMostRelevantLoop. 655class LoopCompare { 656 DominatorTree &DT; 657public: 658 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 659 660 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 661 std::pair<const Loop *, const SCEV *> RHS) const { 662 // Keep pointer operands sorted at the end. 663 if (LHS.second->getType()->isPointerTy() != 664 RHS.second->getType()->isPointerTy()) 665 return LHS.second->getType()->isPointerTy(); 666 667 // Compare loops with PickMostRelevantLoop. 668 if (LHS.first != RHS.first) 669 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 670 671 // If one operand is a non-constant negative and the other is not, 672 // put the non-constant negative on the right so that a sub can 673 // be used instead of a negate and add. 674 if (LHS.second->isNonConstantNegative()) { 675 if (!RHS.second->isNonConstantNegative()) 676 return false; 677 } else if (RHS.second->isNonConstantNegative()) 678 return true; 679 680 // Otherwise they are equivalent according to this comparison. 681 return false; 682 } 683}; 684 685} 686 687Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 688 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 689 690 // Collect all the add operands in a loop, along with their associated loops. 691 // Iterate in reverse so that constants are emitted last, all else equal, and 692 // so that pointer operands are inserted first, which the code below relies on 693 // to form more involved GEPs. 694 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 695 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 696 E(S->op_begin()); I != E; ++I) 697 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 698 699 // Sort by loop. Use a stable sort so that constants follow non-constants and 700 // pointer operands precede non-pointer operands. 701 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 702 703 // Emit instructions to add all the operands. Hoist as much as possible 704 // out of loops, and form meaningful getelementptrs where possible. 705 Value *Sum = 0; 706 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 707 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 708 const Loop *CurLoop = I->first; 709 const SCEV *Op = I->second; 710 if (!Sum) { 711 // This is the first operand. Just expand it. 712 Sum = expand(Op); 713 ++I; 714 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 715 // The running sum expression is a pointer. Try to form a getelementptr 716 // at this level with that as the base. 717 SmallVector<const SCEV *, 4> NewOps; 718 for (; I != E && I->first == CurLoop; ++I) { 719 // If the operand is SCEVUnknown and not instructions, peek through 720 // it, to enable more of it to be folded into the GEP. 721 const SCEV *X = I->second; 722 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 723 if (!isa<Instruction>(U->getValue())) 724 X = SE.getSCEV(U->getValue()); 725 NewOps.push_back(X); 726 } 727 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 728 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 729 // The running sum is an integer, and there's a pointer at this level. 730 // Try to form a getelementptr. If the running sum is instructions, 731 // use a SCEVUnknown to avoid re-analyzing them. 732 SmallVector<const SCEV *, 4> NewOps; 733 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 734 SE.getSCEV(Sum)); 735 for (++I; I != E && I->first == CurLoop; ++I) 736 NewOps.push_back(I->second); 737 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 738 } else if (Op->isNonConstantNegative()) { 739 // Instead of doing a negate and add, just do a subtract. 740 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 741 Sum = InsertNoopCastOfTo(Sum, Ty); 742 Sum = InsertBinop(Instruction::Sub, Sum, W); 743 ++I; 744 } else { 745 // A simple add. 746 Value *W = expandCodeFor(Op, Ty); 747 Sum = InsertNoopCastOfTo(Sum, Ty); 748 // Canonicalize a constant to the RHS. 749 if (isa<Constant>(Sum)) std::swap(Sum, W); 750 Sum = InsertBinop(Instruction::Add, Sum, W); 751 ++I; 752 } 753 } 754 755 return Sum; 756} 757 758Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 759 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 760 761 // Collect all the mul operands in a loop, along with their associated loops. 762 // Iterate in reverse so that constants are emitted last, all else equal. 763 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 764 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 765 E(S->op_begin()); I != E; ++I) 766 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 767 768 // Sort by loop. Use a stable sort so that constants follow non-constants. 769 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 770 771 // Emit instructions to mul all the operands. Hoist as much as possible 772 // out of loops. 773 Value *Prod = 0; 774 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 775 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 776 const SCEV *Op = I->second; 777 if (!Prod) { 778 // This is the first operand. Just expand it. 779 Prod = expand(Op); 780 ++I; 781 } else if (Op->isAllOnesValue()) { 782 // Instead of doing a multiply by negative one, just do a negate. 783 Prod = InsertNoopCastOfTo(Prod, Ty); 784 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 785 ++I; 786 } else { 787 // A simple mul. 788 Value *W = expandCodeFor(Op, Ty); 789 Prod = InsertNoopCastOfTo(Prod, Ty); 790 // Canonicalize a constant to the RHS. 791 if (isa<Constant>(Prod)) std::swap(Prod, W); 792 Prod = InsertBinop(Instruction::Mul, Prod, W); 793 ++I; 794 } 795 } 796 797 return Prod; 798} 799 800Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 801 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 802 803 Value *LHS = expandCodeFor(S->getLHS(), Ty); 804 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 805 const APInt &RHS = SC->getValue()->getValue(); 806 if (RHS.isPowerOf2()) 807 return InsertBinop(Instruction::LShr, LHS, 808 ConstantInt::get(Ty, RHS.logBase2())); 809 } 810 811 Value *RHS = expandCodeFor(S->getRHS(), Ty); 812 return InsertBinop(Instruction::UDiv, LHS, RHS); 813} 814 815/// Move parts of Base into Rest to leave Base with the minimal 816/// expression that provides a pointer operand suitable for a 817/// GEP expansion. 818static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 819 ScalarEvolution &SE) { 820 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 821 Base = A->getStart(); 822 Rest = SE.getAddExpr(Rest, 823 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 824 A->getStepRecurrence(SE), 825 A->getLoop(), 826 // FIXME: A->getNoWrapFlags(FlagNW) 827 SCEV::FlagAnyWrap)); 828 } 829 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 830 Base = A->getOperand(A->getNumOperands()-1); 831 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 832 NewAddOps.back() = Rest; 833 Rest = SE.getAddExpr(NewAddOps); 834 ExposePointerBase(Base, Rest, SE); 835 } 836} 837 838/// Determine if this is a well-behaved chain of instructions leading back to 839/// the PHI. If so, it may be reused by expanded expressions. 840bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 841 const Loop *L) { 842 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 843 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 844 return false; 845 // If any of the operands don't dominate the insert position, bail. 846 // Addrec operands are always loop-invariant, so this can only happen 847 // if there are instructions which haven't been hoisted. 848 if (L == IVIncInsertLoop) { 849 for (User::op_iterator OI = IncV->op_begin()+1, 850 OE = IncV->op_end(); OI != OE; ++OI) 851 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 852 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 853 return false; 854 } 855 // Advance to the next instruction. 856 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 857 if (!IncV) 858 return false; 859 860 if (IncV->mayHaveSideEffects()) 861 return false; 862 863 if (IncV != PN) 864 return true; 865 866 return isNormalAddRecExprPHI(PN, IncV, L); 867} 868 869/// Determine if this cyclic phi is in a form that would have been generated by 870/// LSR. We don't care if the phi was actually expanded in this pass, as long 871/// as it is in a low-cost form, for example, no implied multiplication. This 872/// should match any patterns generated by getAddRecExprPHILiterally and 873/// expandAddtoGEP. 874bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 875 const Loop *L) { 876 switch (IncV->getOpcode()) { 877 // Check for a simple Add/Sub or GEP of a loop invariant step. 878 case Instruction::Add: 879 case Instruction::Sub: 880 return IncV->getOperand(0) == PN 881 && L->isLoopInvariant(IncV->getOperand(1)); 882 case Instruction::BitCast: 883 IncV = dyn_cast<GetElementPtrInst>(IncV->getOperand(0)); 884 if (!IncV) 885 return false; 886 // fall-thru to GEP handling 887 case Instruction::GetElementPtr: { 888 // This must be a pointer addition of constants (pretty) or some number of 889 // address-size elements (ugly). 890 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 891 I != E; ++I) { 892 if (isa<Constant>(*I)) 893 continue; 894 // ugly geps have 2 operands. 895 // i1* is used by the expander to represent an address-size element. 896 if (IncV->getNumOperands() != 2) 897 return false; 898 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 899 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 900 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 901 return false; 902 // Ensure the operands dominate the insertion point. I don't know of a 903 // case when this would not be true, so this is somewhat untested. 904 if (L == IVIncInsertLoop) { 905 for (User::op_iterator OI = IncV->op_begin()+1, 906 OE = IncV->op_end(); OI != OE; ++OI) 907 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 908 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 909 return false; 910 } 911 break; 912 } 913 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 914 if (IncV && IncV->getOpcode() == Instruction::BitCast) 915 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 916 return IncV == PN; 917 } 918 default: 919 return false; 920 } 921} 922 923/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 924/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 925/// need to materialize IV increments elsewhere to handle difficult situations. 926Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 927 Type *ExpandTy, Type *IntTy, 928 bool useSubtract) { 929 Value *IncV; 930 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 931 if (ExpandTy->isPointerTy()) { 932 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 933 // If the step isn't constant, don't use an implicitly scaled GEP, because 934 // that would require a multiply inside the loop. 935 if (!isa<ConstantInt>(StepV)) 936 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 937 GEPPtrTy->getAddressSpace()); 938 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 939 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 940 if (IncV->getType() != PN->getType()) { 941 IncV = Builder.CreateBitCast(IncV, PN->getType()); 942 rememberInstruction(IncV); 943 } 944 } else { 945 IncV = useSubtract ? 946 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 947 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 948 rememberInstruction(IncV); 949 } 950 return IncV; 951} 952 953/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 954/// the base addrec, which is the addrec without any non-loop-dominating 955/// values, and return the PHI. 956PHINode * 957SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 958 const Loop *L, 959 Type *ExpandTy, 960 Type *IntTy) { 961 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 962 963 // Reuse a previously-inserted PHI, if present. 964 BasicBlock *LatchBlock = L->getLoopLatch(); 965 if (LatchBlock) { 966 for (BasicBlock::iterator I = L->getHeader()->begin(); 967 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 968 if (!SE.isSCEVable(PN->getType()) || 969 (SE.getEffectiveSCEVType(PN->getType()) != 970 SE.getEffectiveSCEVType(Normalized->getType())) || 971 SE.getSCEV(PN) != Normalized) 972 continue; 973 974 Instruction *IncV = 975 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 976 977 if (LSRMode) { 978 if (!isExpandedAddRecExprPHI(PN, IncV, L)) 979 continue; 980 } 981 else { 982 if (!isNormalAddRecExprPHI(PN, IncV, L)) 983 continue; 984 } 985 // Ok, the add recurrence looks usable. 986 // Remember this PHI, even in post-inc mode. 987 InsertedValues.insert(PN); 988 // Remember the increment. 989 rememberInstruction(IncV); 990 if (L == IVIncInsertLoop) 991 do { 992 if (SE.DT->dominates(IncV, IVIncInsertPos)) 993 break; 994 // Make sure the increment is where we want it. But don't move it 995 // down past a potential existing post-inc user. 996 IncV->moveBefore(IVIncInsertPos); 997 IVIncInsertPos = IncV; 998 IncV = cast<Instruction>(IncV->getOperand(0)); 999 } while (IncV != PN); 1000 return PN; 1001 } 1002 } 1003 1004 // Save the original insertion point so we can restore it when we're done. 1005 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1006 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1007 1008 // Another AddRec may need to be recursively expanded below. For example, if 1009 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1010 // loop. Remove this loop from the PostIncLoops set before expanding such 1011 // AddRecs. Otherwise, we cannot find a valid position for the step 1012 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1013 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1014 // so it's not worth implementing SmallPtrSet::swap. 1015 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1016 PostIncLoops.clear(); 1017 1018 // Expand code for the start value. 1019 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1020 L->getHeader()->begin()); 1021 1022 // StartV must be hoisted into L's preheader to dominate the new phi. 1023 assert(!isa<Instruction>(StartV) || 1024 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1025 L->getHeader())); 1026 1027 // Expand code for the step value. Do this before creating the PHI so that PHI 1028 // reuse code doesn't see an incomplete PHI. 1029 const SCEV *Step = Normalized->getStepRecurrence(SE); 1030 // If the stride is negative, insert a sub instead of an add for the increment 1031 // (unless it's a constant, because subtracts of constants are canonicalized 1032 // to adds). 1033 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1034 if (useSubtract) 1035 Step = SE.getNegativeSCEV(Step); 1036 // Expand the step somewhere that dominates the loop header. 1037 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1038 1039 // Create the PHI. 1040 BasicBlock *Header = L->getHeader(); 1041 Builder.SetInsertPoint(Header, Header->begin()); 1042 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1043 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1044 Twine(IVName) + ".iv"); 1045 rememberInstruction(PN); 1046 1047 // Create the step instructions and populate the PHI. 1048 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1049 BasicBlock *Pred = *HPI; 1050 1051 // Add a start value. 1052 if (!L->contains(Pred)) { 1053 PN->addIncoming(StartV, Pred); 1054 continue; 1055 } 1056 1057 // Create a step value and add it to the PHI. 1058 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1059 // instructions at IVIncInsertPos. 1060 Instruction *InsertPos = L == IVIncInsertLoop ? 1061 IVIncInsertPos : Pred->getTerminator(); 1062 Builder.SetInsertPoint(InsertPos); 1063 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1064 1065 PN->addIncoming(IncV, Pred); 1066 } 1067 1068 // Restore the original insert point. 1069 if (SaveInsertBB) 1070 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1071 1072 // After expanding subexpressions, restore the PostIncLoops set so the caller 1073 // can ensure that IVIncrement dominates the current uses. 1074 PostIncLoops = SavedPostIncLoops; 1075 1076 // Remember this PHI, even in post-inc mode. 1077 InsertedValues.insert(PN); 1078 1079 return PN; 1080} 1081 1082Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1083 Type *STy = S->getType(); 1084 Type *IntTy = SE.getEffectiveSCEVType(STy); 1085 const Loop *L = S->getLoop(); 1086 1087 // Determine a normalized form of this expression, which is the expression 1088 // before any post-inc adjustment is made. 1089 const SCEVAddRecExpr *Normalized = S; 1090 if (PostIncLoops.count(L)) { 1091 PostIncLoopSet Loops; 1092 Loops.insert(L); 1093 Normalized = 1094 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1095 Loops, SE, *SE.DT)); 1096 } 1097 1098 // Strip off any non-loop-dominating component from the addrec start. 1099 const SCEV *Start = Normalized->getStart(); 1100 const SCEV *PostLoopOffset = 0; 1101 if (!SE.properlyDominates(Start, L->getHeader())) { 1102 PostLoopOffset = Start; 1103 Start = SE.getConstant(Normalized->getType(), 0); 1104 Normalized = cast<SCEVAddRecExpr>( 1105 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1106 Normalized->getLoop(), 1107 // FIXME: Normalized->getNoWrapFlags(FlagNW) 1108 SCEV::FlagAnyWrap)); 1109 } 1110 1111 // Strip off any non-loop-dominating component from the addrec step. 1112 const SCEV *Step = Normalized->getStepRecurrence(SE); 1113 const SCEV *PostLoopScale = 0; 1114 if (!SE.dominates(Step, L->getHeader())) { 1115 PostLoopScale = Step; 1116 Step = SE.getConstant(Normalized->getType(), 1); 1117 Normalized = 1118 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 1119 Normalized->getLoop(), 1120 // FIXME: Normalized 1121 // ->getNoWrapFlags(FlagNW) 1122 SCEV::FlagAnyWrap)); 1123 } 1124 1125 // Expand the core addrec. If we need post-loop scaling, force it to 1126 // expand to an integer type to avoid the need for additional casting. 1127 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1128 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1129 1130 // Accommodate post-inc mode, if necessary. 1131 Value *Result; 1132 if (!PostIncLoops.count(L)) 1133 Result = PN; 1134 else { 1135 // In PostInc mode, use the post-incremented value. 1136 BasicBlock *LatchBlock = L->getLoopLatch(); 1137 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1138 Result = PN->getIncomingValueForBlock(LatchBlock); 1139 1140 // For an expansion to use the postinc form, the client must call 1141 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1142 // or dominated by IVIncInsertPos. 1143 if (isa<Instruction>(Result) 1144 && !SE.DT->dominates(cast<Instruction>(Result), 1145 Builder.GetInsertPoint())) { 1146 // The induction variable's postinc expansion does not dominate this use. 1147 // IVUsers tries to prevent this case, so it is rare. However, it can 1148 // happen when an IVUser outside the loop is not dominated by the latch 1149 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1150 // all cases. Consider a phi outide whose operand is replaced during 1151 // expansion with the value of the postinc user. Without fundamentally 1152 // changing the way postinc users are tracked, the only remedy is 1153 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1154 // but hopefully expandCodeFor handles that. 1155 bool useSubtract = 1156 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1157 if (useSubtract) 1158 Step = SE.getNegativeSCEV(Step); 1159 // Expand the step somewhere that dominates the loop header. 1160 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1161 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1162 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1163 // Restore the insertion point to the place where the caller has 1164 // determined dominates all uses. 1165 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1166 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1167 } 1168 } 1169 1170 // Re-apply any non-loop-dominating scale. 1171 if (PostLoopScale) { 1172 Result = InsertNoopCastOfTo(Result, IntTy); 1173 Result = Builder.CreateMul(Result, 1174 expandCodeFor(PostLoopScale, IntTy)); 1175 rememberInstruction(Result); 1176 } 1177 1178 // Re-apply any non-loop-dominating offset. 1179 if (PostLoopOffset) { 1180 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1181 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1182 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1183 } else { 1184 Result = InsertNoopCastOfTo(Result, IntTy); 1185 Result = Builder.CreateAdd(Result, 1186 expandCodeFor(PostLoopOffset, IntTy)); 1187 rememberInstruction(Result); 1188 } 1189 } 1190 1191 return Result; 1192} 1193 1194Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1195 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1196 1197 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1198 const Loop *L = S->getLoop(); 1199 1200 // First check for an existing canonical IV in a suitable type. 1201 PHINode *CanonicalIV = 0; 1202 if (PHINode *PN = L->getCanonicalInductionVariable()) 1203 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1204 CanonicalIV = PN; 1205 1206 // Rewrite an AddRec in terms of the canonical induction variable, if 1207 // its type is more narrow. 1208 if (CanonicalIV && 1209 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1210 SE.getTypeSizeInBits(Ty)) { 1211 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1212 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1213 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1214 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1215 // FIXME: S->getNoWrapFlags(FlagNW) 1216 SCEV::FlagAnyWrap)); 1217 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1218 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1219 BasicBlock::iterator NewInsertPt = 1220 llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1221 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1222 isa<LandingPadInst>(NewInsertPt)) 1223 ++NewInsertPt; 1224 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1225 NewInsertPt); 1226 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1227 return V; 1228 } 1229 1230 // {X,+,F} --> X + {0,+,F} 1231 if (!S->getStart()->isZero()) { 1232 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1233 NewOps[0] = SE.getConstant(Ty, 0); 1234 // FIXME: can use S->getNoWrapFlags() 1235 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap); 1236 1237 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1238 // comments on expandAddToGEP for details. 1239 const SCEV *Base = S->getStart(); 1240 const SCEV *RestArray[1] = { Rest }; 1241 // Dig into the expression to find the pointer base for a GEP. 1242 ExposePointerBase(Base, RestArray[0], SE); 1243 // If we found a pointer, expand the AddRec with a GEP. 1244 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1245 // Make sure the Base isn't something exotic, such as a multiplied 1246 // or divided pointer value. In those cases, the result type isn't 1247 // actually a pointer type. 1248 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1249 Value *StartV = expand(Base); 1250 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1251 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1252 } 1253 } 1254 1255 // Just do a normal add. Pre-expand the operands to suppress folding. 1256 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1257 SE.getUnknown(expand(Rest)))); 1258 } 1259 1260 // If we don't yet have a canonical IV, create one. 1261 if (!CanonicalIV) { 1262 // Create and insert the PHI node for the induction variable in the 1263 // specified loop. 1264 BasicBlock *Header = L->getHeader(); 1265 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1266 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1267 Header->begin()); 1268 rememberInstruction(CanonicalIV); 1269 1270 Constant *One = ConstantInt::get(Ty, 1); 1271 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1272 BasicBlock *HP = *HPI; 1273 if (L->contains(HP)) { 1274 // Insert a unit add instruction right before the terminator 1275 // corresponding to the back-edge. 1276 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1277 "indvar.next", 1278 HP->getTerminator()); 1279 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1280 rememberInstruction(Add); 1281 CanonicalIV->addIncoming(Add, HP); 1282 } else { 1283 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1284 } 1285 } 1286 } 1287 1288 // {0,+,1} --> Insert a canonical induction variable into the loop! 1289 if (S->isAffine() && S->getOperand(1)->isOne()) { 1290 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1291 "IVs with types different from the canonical IV should " 1292 "already have been handled!"); 1293 return CanonicalIV; 1294 } 1295 1296 // {0,+,F} --> {0,+,1} * F 1297 1298 // If this is a simple linear addrec, emit it now as a special case. 1299 if (S->isAffine()) // {0,+,F} --> i*F 1300 return 1301 expand(SE.getTruncateOrNoop( 1302 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1303 SE.getNoopOrAnyExtend(S->getOperand(1), 1304 CanonicalIV->getType())), 1305 Ty)); 1306 1307 // If this is a chain of recurrences, turn it into a closed form, using the 1308 // folders, then expandCodeFor the closed form. This allows the folders to 1309 // simplify the expression without having to build a bunch of special code 1310 // into this folder. 1311 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1312 1313 // Promote S up to the canonical IV type, if the cast is foldable. 1314 const SCEV *NewS = S; 1315 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1316 if (isa<SCEVAddRecExpr>(Ext)) 1317 NewS = Ext; 1318 1319 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1320 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1321 1322 // Truncate the result down to the original type, if needed. 1323 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1324 return expand(T); 1325} 1326 1327Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1328 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1329 Value *V = expandCodeFor(S->getOperand(), 1330 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1331 Value *I = Builder.CreateTrunc(V, Ty); 1332 rememberInstruction(I); 1333 return I; 1334} 1335 1336Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1337 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1338 Value *V = expandCodeFor(S->getOperand(), 1339 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1340 Value *I = Builder.CreateZExt(V, Ty); 1341 rememberInstruction(I); 1342 return I; 1343} 1344 1345Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1346 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1347 Value *V = expandCodeFor(S->getOperand(), 1348 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1349 Value *I = Builder.CreateSExt(V, Ty); 1350 rememberInstruction(I); 1351 return I; 1352} 1353 1354Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1355 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1356 Type *Ty = LHS->getType(); 1357 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1358 // In the case of mixed integer and pointer types, do the 1359 // rest of the comparisons as integer. 1360 if (S->getOperand(i)->getType() != Ty) { 1361 Ty = SE.getEffectiveSCEVType(Ty); 1362 LHS = InsertNoopCastOfTo(LHS, Ty); 1363 } 1364 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1365 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1366 rememberInstruction(ICmp); 1367 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1368 rememberInstruction(Sel); 1369 LHS = Sel; 1370 } 1371 // In the case of mixed integer and pointer types, cast the 1372 // final result back to the pointer type. 1373 if (LHS->getType() != S->getType()) 1374 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1375 return LHS; 1376} 1377 1378Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1379 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1380 Type *Ty = LHS->getType(); 1381 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1382 // In the case of mixed integer and pointer types, do the 1383 // rest of the comparisons as integer. 1384 if (S->getOperand(i)->getType() != Ty) { 1385 Ty = SE.getEffectiveSCEVType(Ty); 1386 LHS = InsertNoopCastOfTo(LHS, Ty); 1387 } 1388 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1389 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1390 rememberInstruction(ICmp); 1391 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1392 rememberInstruction(Sel); 1393 LHS = Sel; 1394 } 1395 // In the case of mixed integer and pointer types, cast the 1396 // final result back to the pointer type. 1397 if (LHS->getType() != S->getType()) 1398 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1399 return LHS; 1400} 1401 1402Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1403 Instruction *I) { 1404 BasicBlock::iterator IP = I; 1405 while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP)) 1406 ++IP; 1407 Builder.SetInsertPoint(IP->getParent(), IP); 1408 return expandCodeFor(SH, Ty); 1409} 1410 1411Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1412 // Expand the code for this SCEV. 1413 Value *V = expand(SH); 1414 if (Ty) { 1415 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1416 "non-trivial casts should be done with the SCEVs directly!"); 1417 V = InsertNoopCastOfTo(V, Ty); 1418 } 1419 return V; 1420} 1421 1422Value *SCEVExpander::expand(const SCEV *S) { 1423 // Compute an insertion point for this SCEV object. Hoist the instructions 1424 // as far out in the loop nest as possible. 1425 Instruction *InsertPt = Builder.GetInsertPoint(); 1426 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1427 L = L->getParentLoop()) 1428 if (SE.isLoopInvariant(S, L)) { 1429 if (!L) break; 1430 if (BasicBlock *Preheader = L->getLoopPreheader()) 1431 InsertPt = Preheader->getTerminator(); 1432 else { 1433 // LSR sets the insertion point for AddRec start/step values to the 1434 // block start to simplify value reuse, even though it's an invalid 1435 // position. SCEVExpander must correct for this in all cases. 1436 InsertPt = L->getHeader()->getFirstInsertionPt(); 1437 } 1438 } else { 1439 // If the SCEV is computable at this level, insert it into the header 1440 // after the PHIs (and after any other instructions that we've inserted 1441 // there) so that it is guaranteed to dominate any user inside the loop. 1442 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1443 InsertPt = L->getHeader()->getFirstInsertionPt(); 1444 while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt)) 1445 InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1446 break; 1447 } 1448 1449 // Check to see if we already expanded this here. 1450 std::map<std::pair<const SCEV *, Instruction *>, 1451 AssertingVH<Value> >::iterator I = 1452 InsertedExpressions.find(std::make_pair(S, InsertPt)); 1453 if (I != InsertedExpressions.end()) 1454 return I->second; 1455 1456 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1457 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1458 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1459 1460 // Expand the expression into instructions. 1461 Value *V = visit(S); 1462 1463 // Remember the expanded value for this SCEV at this location. 1464 // 1465 // This is independent of PostIncLoops. The mapped value simply materializes 1466 // the expression at this insertion point. If the mapped value happened to be 1467 // a postinc expansion, it could be reused by a non postinc user, but only if 1468 // its insertion point was already at the head of the loop. 1469 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1470 1471 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1472 return V; 1473} 1474 1475void SCEVExpander::rememberInstruction(Value *I) { 1476 if (!PostIncLoops.empty()) 1477 InsertedPostIncValues.insert(I); 1478 else 1479 InsertedValues.insert(I); 1480 1481 // If we just claimed an existing instruction and that instruction had 1482 // been the insert point, adjust the insert point forward so that 1483 // subsequently inserted code will be dominated. 1484 if (Builder.GetInsertPoint() == I) { 1485 BasicBlock::iterator It = cast<Instruction>(I); 1486 do { ++It; } while (isInsertedInstruction(It) || 1487 isa<DbgInfoIntrinsic>(It)); 1488 Builder.SetInsertPoint(Builder.GetInsertBlock(), It); 1489 } 1490} 1491 1492void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { 1493 // If we acquired more instructions since the old insert point was saved, 1494 // advance past them. 1495 while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I; 1496 1497 Builder.SetInsertPoint(BB, I); 1498} 1499 1500/// getOrInsertCanonicalInductionVariable - This method returns the 1501/// canonical induction variable of the specified type for the specified 1502/// loop (inserting one if there is none). A canonical induction variable 1503/// starts at zero and steps by one on each iteration. 1504PHINode * 1505SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1506 Type *Ty) { 1507 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1508 1509 // Build a SCEV for {0,+,1}<L>. 1510 // Conservatively use FlagAnyWrap for now. 1511 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1512 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1513 1514 // Emit code for it. 1515 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1516 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1517 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1518 if (SaveInsertBB) 1519 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1520 1521 return V; 1522} 1523 1524/// hoistStep - Attempt to hoist an IV increment above a potential use. 1525/// 1526/// To successfully hoist, two criteria must be met: 1527/// - IncV operands dominate InsertPos and 1528/// - InsertPos dominates IncV 1529/// 1530/// Meeting the second condition means that we don't need to check all of IncV's 1531/// existing uses (it's moving up in the domtree). 1532/// 1533/// This does not yet recursively hoist the operands, although that would 1534/// not be difficult. 1535/// 1536/// This does not require a SCEVExpander instance and could be replaced by a 1537/// general code-insertion helper. 1538bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos, 1539 const DominatorTree *DT) { 1540 if (DT->dominates(IncV, InsertPos)) 1541 return true; 1542 1543 if (!DT->dominates(InsertPos->getParent(), IncV->getParent())) 1544 return false; 1545 1546 if (IncV->mayHaveSideEffects()) 1547 return false; 1548 1549 // Attempt to hoist IncV 1550 for (User::op_iterator OI = IncV->op_begin(), OE = IncV->op_end(); 1551 OI != OE; ++OI) { 1552 Instruction *OInst = dyn_cast<Instruction>(OI); 1553 if (OInst && (OInst == InsertPos || !DT->dominates(OInst, InsertPos))) 1554 return false; 1555 } 1556 IncV->moveBefore(InsertPos); 1557 return true; 1558} 1559 1560/// replaceCongruentIVs - Check for congruent phis in this loop header and 1561/// replace them with their most canonical representative. Return the number of 1562/// phis eliminated. 1563/// 1564/// This does not depend on any SCEVExpander state but should be used in 1565/// the same context that SCEVExpander is used. 1566unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1567 SmallVectorImpl<WeakVH> &DeadInsts) { 1568 unsigned NumElim = 0; 1569 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1570 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { 1571 PHINode *Phi = cast<PHINode>(I); 1572 if (!SE.isSCEVable(Phi->getType())) 1573 continue; 1574 1575 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1576 if (!OrigPhiRef) { 1577 OrigPhiRef = Phi; 1578 continue; 1579 } 1580 1581 // If one phi derives from the other via GEPs, types may differ. 1582 // We could consider adding a bitcast here to handle it. 1583 if (OrigPhiRef->getType() != Phi->getType()) 1584 continue; 1585 1586 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1587 Instruction *OrigInc = 1588 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1589 Instruction *IsomorphicInc = 1590 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1591 1592 // If this phi is more canonical, swap it with the original. 1593 if (!isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L) 1594 && isExpandedAddRecExprPHI(Phi, IsomorphicInc, L)) { 1595 std::swap(OrigPhiRef, Phi); 1596 std::swap(OrigInc, IsomorphicInc); 1597 } 1598 // Replacing the congruent phi is sufficient because acyclic redundancy 1599 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1600 // that a phi is congruent, it's often the head of an IV user cycle that 1601 // is isomorphic with the original phi. So it's worth eagerly cleaning up 1602 // the common case of a single IV increment. 1603 if (OrigInc != IsomorphicInc && 1604 OrigInc->getType() == IsomorphicInc->getType() && 1605 SE.getSCEV(OrigInc) == SE.getSCEV(IsomorphicInc) && 1606 hoistStep(OrigInc, IsomorphicInc, DT)) { 1607 DEBUG_WITH_TYPE(DebugType, dbgs() 1608 << "INDVARS: Eliminated congruent iv.inc: " 1609 << *IsomorphicInc << '\n'); 1610 IsomorphicInc->replaceAllUsesWith(OrigInc); 1611 DeadInsts.push_back(IsomorphicInc); 1612 } 1613 } 1614 DEBUG_WITH_TYPE(DebugType, dbgs() 1615 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1616 ++NumElim; 1617 Phi->replaceAllUsesWith(OrigPhiRef); 1618 DeadInsts.push_back(Phi); 1619 } 1620 return NumElim; 1621} 1622