ScalarEvolutionExpander.cpp revision 5b04cfb78514cf4dbed4b989a3b07310a91df7f7
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/Analysis/LoopInfo.h" 18#include "llvm/IntrinsicInst.h" 19#include "llvm/LLVMContext.h" 20#include "llvm/Support/Debug.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/Target/TargetLowering.h" 23#include "llvm/ADT/STLExtras.h" 24 25using namespace llvm; 26 27/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 28/// reusing an existing cast if a suitable one exists, moving an existing 29/// cast if a suitable one exists but isn't in the right place, or 30/// creating a new one. 31Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 32 Instruction::CastOps Op, 33 BasicBlock::iterator IP) { 34 // This function must be called with the builder having a valid insertion 35 // point. It doesn't need to be the actual IP where the uses of the returned 36 // cast will be added, but it must dominate such IP. 37 // We use this precondition to assert that we can produce a cast that will 38 // dominate all its uses. In particular, this is crucial for the case 39 // where the builder's insertion point *is* the point where we were asked 40 // to put the cast. 41 // Since we don't know the the builder's insertion point is actually 42 // where the uses will be added (only that it dominates it), we are 43 // not allowed to move it. 44 BasicBlock::iterator BIP = Builder.GetInsertPoint(); 45 46 // FIXME: enable once our implementation of dominates is fixed. 47 assert(BIP == IP || SE.DT->dominates(IP, BIP)); 48 49 // Check to see if there is already a cast! 50 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 51 UI != E; ++UI) { 52 User *U = *UI; 53 if (U->getType() == Ty) 54 if (CastInst *CI = dyn_cast<CastInst>(U)) 55 if (CI->getOpcode() == Op) { 56 // If the cast isn't where we want it, create a new cast at IP. 57 // Likewise, do not reuse a cast at BIP because it must dominate 58 // instructions that might be inserted before BIP. 59 if (BasicBlock::iterator(CI) != IP || BIP == IP) { 60 // Create a new cast, and leave the old cast in place in case 61 // it is being used as an insert point. Clear its operand 62 // so that it doesn't hold anything live. 63 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP); 64 NewCI->takeName(CI); 65 CI->replaceAllUsesWith(NewCI); 66 CI->setOperand(0, UndefValue::get(V->getType())); 67 rememberInstruction(NewCI); 68 return NewCI; 69 } 70 rememberInstruction(CI); 71 return CI; 72 } 73 } 74 75 // Create a new cast. 76 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP); 77 rememberInstruction(I); 78 return I; 79} 80 81/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 82/// which must be possible with a noop cast, doing what we can to share 83/// the casts. 84Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 85 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 86 assert((Op == Instruction::BitCast || 87 Op == Instruction::PtrToInt || 88 Op == Instruction::IntToPtr) && 89 "InsertNoopCastOfTo cannot perform non-noop casts!"); 90 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 91 "InsertNoopCastOfTo cannot change sizes!"); 92 93 // Short-circuit unnecessary bitcasts. 94 if (Op == Instruction::BitCast) { 95 if (V->getType() == Ty) 96 return V; 97 if (CastInst *CI = dyn_cast<CastInst>(V)) { 98 if (CI->getOperand(0)->getType() == Ty) 99 return CI->getOperand(0); 100 } 101 } 102 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 103 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 104 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 105 if (CastInst *CI = dyn_cast<CastInst>(V)) 106 if ((CI->getOpcode() == Instruction::PtrToInt || 107 CI->getOpcode() == Instruction::IntToPtr) && 108 SE.getTypeSizeInBits(CI->getType()) == 109 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 110 return CI->getOperand(0); 111 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 112 if ((CE->getOpcode() == Instruction::PtrToInt || 113 CE->getOpcode() == Instruction::IntToPtr) && 114 SE.getTypeSizeInBits(CE->getType()) == 115 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 116 return CE->getOperand(0); 117 } 118 119 // Fold a cast of a constant. 120 if (Constant *C = dyn_cast<Constant>(V)) 121 return ConstantExpr::getCast(Op, C, Ty); 122 123 // Cast the argument at the beginning of the entry block, after 124 // any bitcasts of other arguments. 125 if (Argument *A = dyn_cast<Argument>(V)) { 126 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 127 while ((isa<BitCastInst>(IP) && 128 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 129 cast<BitCastInst>(IP)->getOperand(0) != A) || 130 isa<DbgInfoIntrinsic>(IP) || 131 isa<LandingPadInst>(IP)) 132 ++IP; 133 return ReuseOrCreateCast(A, Ty, Op, IP); 134 } 135 136 // Cast the instruction immediately after the instruction. 137 Instruction *I = cast<Instruction>(V); 138 BasicBlock::iterator IP = I; ++IP; 139 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 140 IP = II->getNormalDest()->begin(); 141 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 142 ++IP; 143 return ReuseOrCreateCast(I, Ty, Op, IP); 144} 145 146/// InsertBinop - Insert the specified binary operator, doing a small amount 147/// of work to avoid inserting an obviously redundant operation. 148Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 149 Value *LHS, Value *RHS) { 150 // Fold a binop with constant operands. 151 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 152 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 153 return ConstantExpr::get(Opcode, CLHS, CRHS); 154 155 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 156 unsigned ScanLimit = 6; 157 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 158 // Scanning starts from the last instruction before the insertion point. 159 BasicBlock::iterator IP = Builder.GetInsertPoint(); 160 if (IP != BlockBegin) { 161 --IP; 162 for (; ScanLimit; --IP, --ScanLimit) { 163 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 164 // generated code. 165 if (isa<DbgInfoIntrinsic>(IP)) 166 ScanLimit++; 167 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 168 IP->getOperand(1) == RHS) 169 return IP; 170 if (IP == BlockBegin) break; 171 } 172 } 173 174 // Save the original insertion point so we can restore it when we're done. 175 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 176 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 177 178 // Move the insertion point out of as many loops as we can. 179 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 180 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 181 BasicBlock *Preheader = L->getLoopPreheader(); 182 if (!Preheader) break; 183 184 // Ok, move up a level. 185 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 186 } 187 188 // If we haven't found this binop, insert it. 189 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 190 BO->setDebugLoc(SaveInsertPt->getDebugLoc()); 191 rememberInstruction(BO); 192 193 // Restore the original insert point. 194 if (SaveInsertBB) 195 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 196 197 return BO; 198} 199 200/// FactorOutConstant - Test if S is divisible by Factor, using signed 201/// division. If so, update S with Factor divided out and return true. 202/// S need not be evenly divisible if a reasonable remainder can be 203/// computed. 204/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 205/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 206/// check to see if the divide was folded. 207static bool FactorOutConstant(const SCEV *&S, 208 const SCEV *&Remainder, 209 const SCEV *Factor, 210 ScalarEvolution &SE, 211 const TargetData *TD) { 212 // Everything is divisible by one. 213 if (Factor->isOne()) 214 return true; 215 216 // x/x == 1. 217 if (S == Factor) { 218 S = SE.getConstant(S->getType(), 1); 219 return true; 220 } 221 222 // For a Constant, check for a multiple of the given factor. 223 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 224 // 0/x == 0. 225 if (C->isZero()) 226 return true; 227 // Check for divisibility. 228 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 229 ConstantInt *CI = 230 ConstantInt::get(SE.getContext(), 231 C->getValue()->getValue().sdiv( 232 FC->getValue()->getValue())); 233 // If the quotient is zero and the remainder is non-zero, reject 234 // the value at this scale. It will be considered for subsequent 235 // smaller scales. 236 if (!CI->isZero()) { 237 const SCEV *Div = SE.getConstant(CI); 238 S = Div; 239 Remainder = 240 SE.getAddExpr(Remainder, 241 SE.getConstant(C->getValue()->getValue().srem( 242 FC->getValue()->getValue()))); 243 return true; 244 } 245 } 246 } 247 248 // In a Mul, check if there is a constant operand which is a multiple 249 // of the given factor. 250 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 251 if (TD) { 252 // With TargetData, the size is known. Check if there is a constant 253 // operand which is a multiple of the given factor. If so, we can 254 // factor it. 255 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 256 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 257 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 258 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 259 NewMulOps[0] = 260 SE.getConstant(C->getValue()->getValue().sdiv( 261 FC->getValue()->getValue())); 262 S = SE.getMulExpr(NewMulOps); 263 return true; 264 } 265 } else { 266 // Without TargetData, check if Factor can be factored out of any of the 267 // Mul's operands. If so, we can just remove it. 268 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 269 const SCEV *SOp = M->getOperand(i); 270 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 271 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 272 Remainder->isZero()) { 273 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 274 NewMulOps[i] = SOp; 275 S = SE.getMulExpr(NewMulOps); 276 return true; 277 } 278 } 279 } 280 } 281 282 // In an AddRec, check if both start and step are divisible. 283 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 284 const SCEV *Step = A->getStepRecurrence(SE); 285 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 286 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 287 return false; 288 if (!StepRem->isZero()) 289 return false; 290 const SCEV *Start = A->getStart(); 291 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 292 return false; 293 // FIXME: can use A->getNoWrapFlags(FlagNW) 294 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap); 295 return true; 296 } 297 298 return false; 299} 300 301/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 302/// is the number of SCEVAddRecExprs present, which are kept at the end of 303/// the list. 304/// 305static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 306 Type *Ty, 307 ScalarEvolution &SE) { 308 unsigned NumAddRecs = 0; 309 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 310 ++NumAddRecs; 311 // Group Ops into non-addrecs and addrecs. 312 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 313 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 314 // Let ScalarEvolution sort and simplify the non-addrecs list. 315 const SCEV *Sum = NoAddRecs.empty() ? 316 SE.getConstant(Ty, 0) : 317 SE.getAddExpr(NoAddRecs); 318 // If it returned an add, use the operands. Otherwise it simplified 319 // the sum into a single value, so just use that. 320 Ops.clear(); 321 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 322 Ops.append(Add->op_begin(), Add->op_end()); 323 else if (!Sum->isZero()) 324 Ops.push_back(Sum); 325 // Then append the addrecs. 326 Ops.append(AddRecs.begin(), AddRecs.end()); 327} 328 329/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 330/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 331/// This helps expose more opportunities for folding parts of the expressions 332/// into GEP indices. 333/// 334static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 335 Type *Ty, 336 ScalarEvolution &SE) { 337 // Find the addrecs. 338 SmallVector<const SCEV *, 8> AddRecs; 339 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 340 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 341 const SCEV *Start = A->getStart(); 342 if (Start->isZero()) break; 343 const SCEV *Zero = SE.getConstant(Ty, 0); 344 AddRecs.push_back(SE.getAddRecExpr(Zero, 345 A->getStepRecurrence(SE), 346 A->getLoop(), 347 // FIXME: A->getNoWrapFlags(FlagNW) 348 SCEV::FlagAnyWrap)); 349 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 350 Ops[i] = Zero; 351 Ops.append(Add->op_begin(), Add->op_end()); 352 e += Add->getNumOperands(); 353 } else { 354 Ops[i] = Start; 355 } 356 } 357 if (!AddRecs.empty()) { 358 // Add the addrecs onto the end of the list. 359 Ops.append(AddRecs.begin(), AddRecs.end()); 360 // Resort the operand list, moving any constants to the front. 361 SimplifyAddOperands(Ops, Ty, SE); 362 } 363} 364 365/// expandAddToGEP - Expand an addition expression with a pointer type into 366/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 367/// BasicAliasAnalysis and other passes analyze the result. See the rules 368/// for getelementptr vs. inttoptr in 369/// http://llvm.org/docs/LangRef.html#pointeraliasing 370/// for details. 371/// 372/// Design note: The correctness of using getelementptr here depends on 373/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 374/// they may introduce pointer arithmetic which may not be safely converted 375/// into getelementptr. 376/// 377/// Design note: It might seem desirable for this function to be more 378/// loop-aware. If some of the indices are loop-invariant while others 379/// aren't, it might seem desirable to emit multiple GEPs, keeping the 380/// loop-invariant portions of the overall computation outside the loop. 381/// However, there are a few reasons this is not done here. Hoisting simple 382/// arithmetic is a low-level optimization that often isn't very 383/// important until late in the optimization process. In fact, passes 384/// like InstructionCombining will combine GEPs, even if it means 385/// pushing loop-invariant computation down into loops, so even if the 386/// GEPs were split here, the work would quickly be undone. The 387/// LoopStrengthReduction pass, which is usually run quite late (and 388/// after the last InstructionCombining pass), takes care of hoisting 389/// loop-invariant portions of expressions, after considering what 390/// can be folded using target addressing modes. 391/// 392Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 393 const SCEV *const *op_end, 394 PointerType *PTy, 395 Type *Ty, 396 Value *V) { 397 Type *ElTy = PTy->getElementType(); 398 SmallVector<Value *, 4> GepIndices; 399 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 400 bool AnyNonZeroIndices = false; 401 402 // Split AddRecs up into parts as either of the parts may be usable 403 // without the other. 404 SplitAddRecs(Ops, Ty, SE); 405 406 // Descend down the pointer's type and attempt to convert the other 407 // operands into GEP indices, at each level. The first index in a GEP 408 // indexes into the array implied by the pointer operand; the rest of 409 // the indices index into the element or field type selected by the 410 // preceding index. 411 for (;;) { 412 // If the scale size is not 0, attempt to factor out a scale for 413 // array indexing. 414 SmallVector<const SCEV *, 8> ScaledOps; 415 if (ElTy->isSized()) { 416 const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 417 if (!ElSize->isZero()) { 418 SmallVector<const SCEV *, 8> NewOps; 419 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 420 const SCEV *Op = Ops[i]; 421 const SCEV *Remainder = SE.getConstant(Ty, 0); 422 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 423 // Op now has ElSize factored out. 424 ScaledOps.push_back(Op); 425 if (!Remainder->isZero()) 426 NewOps.push_back(Remainder); 427 AnyNonZeroIndices = true; 428 } else { 429 // The operand was not divisible, so add it to the list of operands 430 // we'll scan next iteration. 431 NewOps.push_back(Ops[i]); 432 } 433 } 434 // If we made any changes, update Ops. 435 if (!ScaledOps.empty()) { 436 Ops = NewOps; 437 SimplifyAddOperands(Ops, Ty, SE); 438 } 439 } 440 } 441 442 // Record the scaled array index for this level of the type. If 443 // we didn't find any operands that could be factored, tentatively 444 // assume that element zero was selected (since the zero offset 445 // would obviously be folded away). 446 Value *Scaled = ScaledOps.empty() ? 447 Constant::getNullValue(Ty) : 448 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 449 GepIndices.push_back(Scaled); 450 451 // Collect struct field index operands. 452 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 453 bool FoundFieldNo = false; 454 // An empty struct has no fields. 455 if (STy->getNumElements() == 0) break; 456 if (SE.TD) { 457 // With TargetData, field offsets are known. See if a constant offset 458 // falls within any of the struct fields. 459 if (Ops.empty()) break; 460 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 461 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 462 const StructLayout &SL = *SE.TD->getStructLayout(STy); 463 uint64_t FullOffset = C->getValue()->getZExtValue(); 464 if (FullOffset < SL.getSizeInBytes()) { 465 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 466 GepIndices.push_back( 467 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 468 ElTy = STy->getTypeAtIndex(ElIdx); 469 Ops[0] = 470 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 471 AnyNonZeroIndices = true; 472 FoundFieldNo = true; 473 } 474 } 475 } else { 476 // Without TargetData, just check for an offsetof expression of the 477 // appropriate struct type. 478 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 479 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 480 Type *CTy; 481 Constant *FieldNo; 482 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 483 GepIndices.push_back(FieldNo); 484 ElTy = 485 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 486 Ops[i] = SE.getConstant(Ty, 0); 487 AnyNonZeroIndices = true; 488 FoundFieldNo = true; 489 break; 490 } 491 } 492 } 493 // If no struct field offsets were found, tentatively assume that 494 // field zero was selected (since the zero offset would obviously 495 // be folded away). 496 if (!FoundFieldNo) { 497 ElTy = STy->getTypeAtIndex(0u); 498 GepIndices.push_back( 499 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 500 } 501 } 502 503 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 504 ElTy = ATy->getElementType(); 505 else 506 break; 507 } 508 509 // If none of the operands were convertible to proper GEP indices, cast 510 // the base to i8* and do an ugly getelementptr with that. It's still 511 // better than ptrtoint+arithmetic+inttoptr at least. 512 if (!AnyNonZeroIndices) { 513 // Cast the base to i8*. 514 V = InsertNoopCastOfTo(V, 515 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 516 517 assert(!isa<Instruction>(V) || 518 SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint())); 519 520 // Expand the operands for a plain byte offset. 521 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 522 523 // Fold a GEP with constant operands. 524 if (Constant *CLHS = dyn_cast<Constant>(V)) 525 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 526 return ConstantExpr::getGetElementPtr(CLHS, CRHS); 527 528 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 529 unsigned ScanLimit = 6; 530 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 531 // Scanning starts from the last instruction before the insertion point. 532 BasicBlock::iterator IP = Builder.GetInsertPoint(); 533 if (IP != BlockBegin) { 534 --IP; 535 for (; ScanLimit; --IP, --ScanLimit) { 536 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 537 // generated code. 538 if (isa<DbgInfoIntrinsic>(IP)) 539 ScanLimit++; 540 if (IP->getOpcode() == Instruction::GetElementPtr && 541 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 542 return IP; 543 if (IP == BlockBegin) break; 544 } 545 } 546 547 // Save the original insertion point so we can restore it when we're done. 548 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 549 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 550 551 // Move the insertion point out of as many loops as we can. 552 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 553 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 554 BasicBlock *Preheader = L->getLoopPreheader(); 555 if (!Preheader) break; 556 557 // Ok, move up a level. 558 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 559 } 560 561 // Emit a GEP. 562 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 563 rememberInstruction(GEP); 564 565 // Restore the original insert point. 566 if (SaveInsertBB) 567 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 568 569 return GEP; 570 } 571 572 // Save the original insertion point so we can restore it when we're done. 573 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 574 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 575 576 // Move the insertion point out of as many loops as we can. 577 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 578 if (!L->isLoopInvariant(V)) break; 579 580 bool AnyIndexNotLoopInvariant = false; 581 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 582 E = GepIndices.end(); I != E; ++I) 583 if (!L->isLoopInvariant(*I)) { 584 AnyIndexNotLoopInvariant = true; 585 break; 586 } 587 if (AnyIndexNotLoopInvariant) 588 break; 589 590 BasicBlock *Preheader = L->getLoopPreheader(); 591 if (!Preheader) break; 592 593 // Ok, move up a level. 594 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 595 } 596 597 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 598 // because ScalarEvolution may have changed the address arithmetic to 599 // compute a value which is beyond the end of the allocated object. 600 Value *Casted = V; 601 if (V->getType() != PTy) 602 Casted = InsertNoopCastOfTo(Casted, PTy); 603 Value *GEP = Builder.CreateGEP(Casted, 604 GepIndices, 605 "scevgep"); 606 Ops.push_back(SE.getUnknown(GEP)); 607 rememberInstruction(GEP); 608 609 // Restore the original insert point. 610 if (SaveInsertBB) 611 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 612 613 return expand(SE.getAddExpr(Ops)); 614} 615 616/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 617/// SCEV expansion. If they are nested, this is the most nested. If they are 618/// neighboring, pick the later. 619static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 620 DominatorTree &DT) { 621 if (!A) return B; 622 if (!B) return A; 623 if (A->contains(B)) return B; 624 if (B->contains(A)) return A; 625 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 626 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 627 return A; // Arbitrarily break the tie. 628} 629 630/// getRelevantLoop - Get the most relevant loop associated with the given 631/// expression, according to PickMostRelevantLoop. 632const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 633 // Test whether we've already computed the most relevant loop for this SCEV. 634 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 635 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 636 if (!Pair.second) 637 return Pair.first->second; 638 639 if (isa<SCEVConstant>(S)) 640 // A constant has no relevant loops. 641 return 0; 642 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 643 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 644 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 645 // A non-instruction has no relevant loops. 646 return 0; 647 } 648 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 649 const Loop *L = 0; 650 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 651 L = AR->getLoop(); 652 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 653 I != E; ++I) 654 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 655 return RelevantLoops[N] = L; 656 } 657 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 658 const Loop *Result = getRelevantLoop(C->getOperand()); 659 return RelevantLoops[C] = Result; 660 } 661 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 662 const Loop *Result = 663 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 664 getRelevantLoop(D->getRHS()), 665 *SE.DT); 666 return RelevantLoops[D] = Result; 667 } 668 llvm_unreachable("Unexpected SCEV type!"); 669} 670 671namespace { 672 673/// LoopCompare - Compare loops by PickMostRelevantLoop. 674class LoopCompare { 675 DominatorTree &DT; 676public: 677 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 678 679 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 680 std::pair<const Loop *, const SCEV *> RHS) const { 681 // Keep pointer operands sorted at the end. 682 if (LHS.second->getType()->isPointerTy() != 683 RHS.second->getType()->isPointerTy()) 684 return LHS.second->getType()->isPointerTy(); 685 686 // Compare loops with PickMostRelevantLoop. 687 if (LHS.first != RHS.first) 688 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 689 690 // If one operand is a non-constant negative and the other is not, 691 // put the non-constant negative on the right so that a sub can 692 // be used instead of a negate and add. 693 if (LHS.second->isNonConstantNegative()) { 694 if (!RHS.second->isNonConstantNegative()) 695 return false; 696 } else if (RHS.second->isNonConstantNegative()) 697 return true; 698 699 // Otherwise they are equivalent according to this comparison. 700 return false; 701 } 702}; 703 704} 705 706Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 707 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 708 709 // Collect all the add operands in a loop, along with their associated loops. 710 // Iterate in reverse so that constants are emitted last, all else equal, and 711 // so that pointer operands are inserted first, which the code below relies on 712 // to form more involved GEPs. 713 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 714 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 715 E(S->op_begin()); I != E; ++I) 716 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 717 718 // Sort by loop. Use a stable sort so that constants follow non-constants and 719 // pointer operands precede non-pointer operands. 720 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 721 722 // Emit instructions to add all the operands. Hoist as much as possible 723 // out of loops, and form meaningful getelementptrs where possible. 724 Value *Sum = 0; 725 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 726 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 727 const Loop *CurLoop = I->first; 728 const SCEV *Op = I->second; 729 if (!Sum) { 730 // This is the first operand. Just expand it. 731 Sum = expand(Op); 732 ++I; 733 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 734 // The running sum expression is a pointer. Try to form a getelementptr 735 // at this level with that as the base. 736 SmallVector<const SCEV *, 4> NewOps; 737 for (; I != E && I->first == CurLoop; ++I) { 738 // If the operand is SCEVUnknown and not instructions, peek through 739 // it, to enable more of it to be folded into the GEP. 740 const SCEV *X = I->second; 741 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 742 if (!isa<Instruction>(U->getValue())) 743 X = SE.getSCEV(U->getValue()); 744 NewOps.push_back(X); 745 } 746 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 747 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 748 // The running sum is an integer, and there's a pointer at this level. 749 // Try to form a getelementptr. If the running sum is instructions, 750 // use a SCEVUnknown to avoid re-analyzing them. 751 SmallVector<const SCEV *, 4> NewOps; 752 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 753 SE.getSCEV(Sum)); 754 for (++I; I != E && I->first == CurLoop; ++I) 755 NewOps.push_back(I->second); 756 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 757 } else if (Op->isNonConstantNegative()) { 758 // Instead of doing a negate and add, just do a subtract. 759 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 760 Sum = InsertNoopCastOfTo(Sum, Ty); 761 Sum = InsertBinop(Instruction::Sub, Sum, W); 762 ++I; 763 } else { 764 // A simple add. 765 Value *W = expandCodeFor(Op, Ty); 766 Sum = InsertNoopCastOfTo(Sum, Ty); 767 // Canonicalize a constant to the RHS. 768 if (isa<Constant>(Sum)) std::swap(Sum, W); 769 Sum = InsertBinop(Instruction::Add, Sum, W); 770 ++I; 771 } 772 } 773 774 return Sum; 775} 776 777Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 778 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 779 780 // Collect all the mul operands in a loop, along with their associated loops. 781 // Iterate in reverse so that constants are emitted last, all else equal. 782 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 783 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 784 E(S->op_begin()); I != E; ++I) 785 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 786 787 // Sort by loop. Use a stable sort so that constants follow non-constants. 788 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 789 790 // Emit instructions to mul all the operands. Hoist as much as possible 791 // out of loops. 792 Value *Prod = 0; 793 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 794 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 795 const SCEV *Op = I->second; 796 if (!Prod) { 797 // This is the first operand. Just expand it. 798 Prod = expand(Op); 799 ++I; 800 } else if (Op->isAllOnesValue()) { 801 // Instead of doing a multiply by negative one, just do a negate. 802 Prod = InsertNoopCastOfTo(Prod, Ty); 803 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 804 ++I; 805 } else { 806 // A simple mul. 807 Value *W = expandCodeFor(Op, Ty); 808 Prod = InsertNoopCastOfTo(Prod, Ty); 809 // Canonicalize a constant to the RHS. 810 if (isa<Constant>(Prod)) std::swap(Prod, W); 811 Prod = InsertBinop(Instruction::Mul, Prod, W); 812 ++I; 813 } 814 } 815 816 return Prod; 817} 818 819Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 820 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 821 822 Value *LHS = expandCodeFor(S->getLHS(), Ty); 823 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 824 const APInt &RHS = SC->getValue()->getValue(); 825 if (RHS.isPowerOf2()) 826 return InsertBinop(Instruction::LShr, LHS, 827 ConstantInt::get(Ty, RHS.logBase2())); 828 } 829 830 Value *RHS = expandCodeFor(S->getRHS(), Ty); 831 return InsertBinop(Instruction::UDiv, LHS, RHS); 832} 833 834/// Move parts of Base into Rest to leave Base with the minimal 835/// expression that provides a pointer operand suitable for a 836/// GEP expansion. 837static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 838 ScalarEvolution &SE) { 839 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 840 Base = A->getStart(); 841 Rest = SE.getAddExpr(Rest, 842 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 843 A->getStepRecurrence(SE), 844 A->getLoop(), 845 // FIXME: A->getNoWrapFlags(FlagNW) 846 SCEV::FlagAnyWrap)); 847 } 848 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 849 Base = A->getOperand(A->getNumOperands()-1); 850 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 851 NewAddOps.back() = Rest; 852 Rest = SE.getAddExpr(NewAddOps); 853 ExposePointerBase(Base, Rest, SE); 854 } 855} 856 857/// Determine if this is a well-behaved chain of instructions leading back to 858/// the PHI. If so, it may be reused by expanded expressions. 859bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 860 const Loop *L) { 861 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 862 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 863 return false; 864 // If any of the operands don't dominate the insert position, bail. 865 // Addrec operands are always loop-invariant, so this can only happen 866 // if there are instructions which haven't been hoisted. 867 if (L == IVIncInsertLoop) { 868 for (User::op_iterator OI = IncV->op_begin()+1, 869 OE = IncV->op_end(); OI != OE; ++OI) 870 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 871 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 872 return false; 873 } 874 // Advance to the next instruction. 875 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 876 if (!IncV) 877 return false; 878 879 if (IncV->mayHaveSideEffects()) 880 return false; 881 882 if (IncV != PN) 883 return true; 884 885 return isNormalAddRecExprPHI(PN, IncV, L); 886} 887 888/// getIVIncOperand returns an induction variable increment's induction 889/// variable operand. 890/// 891/// If allowScale is set, any type of GEP is allowed as long as the nonIV 892/// operands dominate InsertPos. 893/// 894/// If allowScale is not set, ensure that a GEP increment conforms to one of the 895/// simple patterns generated by getAddRecExprPHILiterally and 896/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 897Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 898 Instruction *InsertPos, 899 bool allowScale) { 900 if (IncV == InsertPos) 901 return NULL; 902 903 switch (IncV->getOpcode()) { 904 default: 905 return NULL; 906 // Check for a simple Add/Sub or GEP of a loop invariant step. 907 case Instruction::Add: 908 case Instruction::Sub: { 909 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 910 if (!OInst || SE.DT->dominates(OInst, InsertPos)) 911 return dyn_cast<Instruction>(IncV->getOperand(0)); 912 return NULL; 913 } 914 case Instruction::BitCast: 915 return dyn_cast<Instruction>(IncV->getOperand(0)); 916 case Instruction::GetElementPtr: 917 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 918 I != E; ++I) { 919 if (isa<Constant>(*I)) 920 continue; 921 if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 922 if (!SE.DT->dominates(OInst, InsertPos)) 923 return NULL; 924 } 925 if (allowScale) { 926 // allow any kind of GEP as long as it can be hoisted. 927 continue; 928 } 929 // This must be a pointer addition of constants (pretty), which is already 930 // handled, or some number of address-size elements (ugly). Ugly geps 931 // have 2 operands. i1* is used by the expander to represent an 932 // address-size element. 933 if (IncV->getNumOperands() != 2) 934 return NULL; 935 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 936 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 937 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 938 return NULL; 939 break; 940 } 941 return dyn_cast<Instruction>(IncV->getOperand(0)); 942 } 943} 944 945/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 946/// it available to other uses in this loop. Recursively hoist any operands, 947/// until we reach a value that dominates InsertPos. 948bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 949 if (SE.DT->dominates(IncV, InsertPos)) 950 return true; 951 952 // InsertPos must itself dominate IncV so that IncV's new position satisfies 953 // its existing users. 954 if (!SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 955 return false; 956 957 // Check that the chain of IV operands leading back to Phi can be hoisted. 958 SmallVector<Instruction*, 4> IVIncs; 959 for(;;) { 960 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 961 if (!Oper) 962 return false; 963 // IncV is safe to hoist. 964 IVIncs.push_back(IncV); 965 IncV = Oper; 966 if (SE.DT->dominates(IncV, InsertPos)) 967 break; 968 } 969 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 970 E = IVIncs.rend(); I != E; ++I) { 971 (*I)->moveBefore(InsertPos); 972 } 973 return true; 974} 975 976/// Determine if this cyclic phi is in a form that would have been generated by 977/// LSR. We don't care if the phi was actually expanded in this pass, as long 978/// as it is in a low-cost form, for example, no implied multiplication. This 979/// should match any patterns generated by getAddRecExprPHILiterally and 980/// expandAddtoGEP. 981bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 982 const Loop *L) { 983 for(Instruction *IVOper = IncV; 984 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 985 /*allowScale=*/false));) { 986 if (IVOper == PN) 987 return true; 988 } 989 return false; 990} 991 992/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 993/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 994/// need to materialize IV increments elsewhere to handle difficult situations. 995Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 996 Type *ExpandTy, Type *IntTy, 997 bool useSubtract) { 998 Value *IncV; 999 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 1000 if (ExpandTy->isPointerTy()) { 1001 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 1002 // If the step isn't constant, don't use an implicitly scaled GEP, because 1003 // that would require a multiply inside the loop. 1004 if (!isa<ConstantInt>(StepV)) 1005 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1006 GEPPtrTy->getAddressSpace()); 1007 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1008 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1009 if (IncV->getType() != PN->getType()) { 1010 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1011 rememberInstruction(IncV); 1012 } 1013 } else { 1014 IncV = useSubtract ? 1015 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1016 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1017 rememberInstruction(IncV); 1018 } 1019 return IncV; 1020} 1021 1022/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1023/// the base addrec, which is the addrec without any non-loop-dominating 1024/// values, and return the PHI. 1025PHINode * 1026SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1027 const Loop *L, 1028 Type *ExpandTy, 1029 Type *IntTy) { 1030 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1031 1032 // Reuse a previously-inserted PHI, if present. 1033 BasicBlock *LatchBlock = L->getLoopLatch(); 1034 if (LatchBlock) { 1035 for (BasicBlock::iterator I = L->getHeader()->begin(); 1036 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1037 if (!SE.isSCEVable(PN->getType()) || 1038 (SE.getEffectiveSCEVType(PN->getType()) != 1039 SE.getEffectiveSCEVType(Normalized->getType())) || 1040 SE.getSCEV(PN) != Normalized) 1041 continue; 1042 1043 Instruction *IncV = 1044 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1045 1046 if (LSRMode) { 1047 if (!isExpandedAddRecExprPHI(PN, IncV, L)) 1048 continue; 1049 if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos)) 1050 continue; 1051 } 1052 else { 1053 if (!isNormalAddRecExprPHI(PN, IncV, L)) 1054 continue; 1055 if (L == IVIncInsertLoop) 1056 do { 1057 if (SE.DT->dominates(IncV, IVIncInsertPos)) 1058 break; 1059 // Make sure the increment is where we want it. But don't move it 1060 // down past a potential existing post-inc user. 1061 IncV->moveBefore(IVIncInsertPos); 1062 IVIncInsertPos = IncV; 1063 IncV = cast<Instruction>(IncV->getOperand(0)); 1064 } while (IncV != PN); 1065 } 1066 // Ok, the add recurrence looks usable. 1067 // Remember this PHI, even in post-inc mode. 1068 InsertedValues.insert(PN); 1069 // Remember the increment. 1070 rememberInstruction(IncV); 1071 return PN; 1072 } 1073 } 1074 1075 // Save the original insertion point so we can restore it when we're done. 1076 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1077 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1078 1079 // Another AddRec may need to be recursively expanded below. For example, if 1080 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1081 // loop. Remove this loop from the PostIncLoops set before expanding such 1082 // AddRecs. Otherwise, we cannot find a valid position for the step 1083 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1084 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1085 // so it's not worth implementing SmallPtrSet::swap. 1086 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1087 PostIncLoops.clear(); 1088 1089 // Expand code for the start value. 1090 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1091 L->getHeader()->begin()); 1092 1093 // StartV must be hoisted into L's preheader to dominate the new phi. 1094 assert(!isa<Instruction>(StartV) || 1095 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1096 L->getHeader())); 1097 1098 // Expand code for the step value. Do this before creating the PHI so that PHI 1099 // reuse code doesn't see an incomplete PHI. 1100 const SCEV *Step = Normalized->getStepRecurrence(SE); 1101 // If the stride is negative, insert a sub instead of an add for the increment 1102 // (unless it's a constant, because subtracts of constants are canonicalized 1103 // to adds). 1104 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1105 if (useSubtract) 1106 Step = SE.getNegativeSCEV(Step); 1107 // Expand the step somewhere that dominates the loop header. 1108 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1109 1110 // Create the PHI. 1111 BasicBlock *Header = L->getHeader(); 1112 Builder.SetInsertPoint(Header, Header->begin()); 1113 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1114 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1115 Twine(IVName) + ".iv"); 1116 rememberInstruction(PN); 1117 1118 // Create the step instructions and populate the PHI. 1119 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1120 BasicBlock *Pred = *HPI; 1121 1122 // Add a start value. 1123 if (!L->contains(Pred)) { 1124 PN->addIncoming(StartV, Pred); 1125 continue; 1126 } 1127 1128 // Create a step value and add it to the PHI. 1129 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1130 // instructions at IVIncInsertPos. 1131 Instruction *InsertPos = L == IVIncInsertLoop ? 1132 IVIncInsertPos : Pred->getTerminator(); 1133 Builder.SetInsertPoint(InsertPos); 1134 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1135 1136 PN->addIncoming(IncV, Pred); 1137 } 1138 1139 // Restore the original insert point. 1140 if (SaveInsertBB) 1141 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1142 1143 // After expanding subexpressions, restore the PostIncLoops set so the caller 1144 // can ensure that IVIncrement dominates the current uses. 1145 PostIncLoops = SavedPostIncLoops; 1146 1147 // Remember this PHI, even in post-inc mode. 1148 InsertedValues.insert(PN); 1149 1150 return PN; 1151} 1152 1153Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1154 Type *STy = S->getType(); 1155 Type *IntTy = SE.getEffectiveSCEVType(STy); 1156 const Loop *L = S->getLoop(); 1157 1158 // Determine a normalized form of this expression, which is the expression 1159 // before any post-inc adjustment is made. 1160 const SCEVAddRecExpr *Normalized = S; 1161 if (PostIncLoops.count(L)) { 1162 PostIncLoopSet Loops; 1163 Loops.insert(L); 1164 Normalized = 1165 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1166 Loops, SE, *SE.DT)); 1167 } 1168 1169 // Strip off any non-loop-dominating component from the addrec start. 1170 const SCEV *Start = Normalized->getStart(); 1171 const SCEV *PostLoopOffset = 0; 1172 if (!SE.properlyDominates(Start, L->getHeader())) { 1173 PostLoopOffset = Start; 1174 Start = SE.getConstant(Normalized->getType(), 0); 1175 Normalized = cast<SCEVAddRecExpr>( 1176 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1177 Normalized->getLoop(), 1178 // FIXME: Normalized->getNoWrapFlags(FlagNW) 1179 SCEV::FlagAnyWrap)); 1180 } 1181 1182 // Strip off any non-loop-dominating component from the addrec step. 1183 const SCEV *Step = Normalized->getStepRecurrence(SE); 1184 const SCEV *PostLoopScale = 0; 1185 if (!SE.dominates(Step, L->getHeader())) { 1186 PostLoopScale = Step; 1187 Step = SE.getConstant(Normalized->getType(), 1); 1188 Normalized = 1189 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 1190 Normalized->getLoop(), 1191 // FIXME: Normalized 1192 // ->getNoWrapFlags(FlagNW) 1193 SCEV::FlagAnyWrap)); 1194 } 1195 1196 // Expand the core addrec. If we need post-loop scaling, force it to 1197 // expand to an integer type to avoid the need for additional casting. 1198 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1199 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1200 1201 // Accommodate post-inc mode, if necessary. 1202 Value *Result; 1203 if (!PostIncLoops.count(L)) 1204 Result = PN; 1205 else { 1206 // In PostInc mode, use the post-incremented value. 1207 BasicBlock *LatchBlock = L->getLoopLatch(); 1208 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1209 Result = PN->getIncomingValueForBlock(LatchBlock); 1210 1211 // For an expansion to use the postinc form, the client must call 1212 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1213 // or dominated by IVIncInsertPos. 1214 if (isa<Instruction>(Result) 1215 && !SE.DT->dominates(cast<Instruction>(Result), 1216 Builder.GetInsertPoint())) { 1217 // The induction variable's postinc expansion does not dominate this use. 1218 // IVUsers tries to prevent this case, so it is rare. However, it can 1219 // happen when an IVUser outside the loop is not dominated by the latch 1220 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1221 // all cases. Consider a phi outide whose operand is replaced during 1222 // expansion with the value of the postinc user. Without fundamentally 1223 // changing the way postinc users are tracked, the only remedy is 1224 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1225 // but hopefully expandCodeFor handles that. 1226 bool useSubtract = 1227 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1228 if (useSubtract) 1229 Step = SE.getNegativeSCEV(Step); 1230 // Expand the step somewhere that dominates the loop header. 1231 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1232 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1233 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1234 // Restore the insertion point to the place where the caller has 1235 // determined dominates all uses. 1236 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1237 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1238 } 1239 } 1240 1241 // Re-apply any non-loop-dominating scale. 1242 if (PostLoopScale) { 1243 Result = InsertNoopCastOfTo(Result, IntTy); 1244 Result = Builder.CreateMul(Result, 1245 expandCodeFor(PostLoopScale, IntTy)); 1246 rememberInstruction(Result); 1247 } 1248 1249 // Re-apply any non-loop-dominating offset. 1250 if (PostLoopOffset) { 1251 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1252 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1253 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1254 } else { 1255 Result = InsertNoopCastOfTo(Result, IntTy); 1256 Result = Builder.CreateAdd(Result, 1257 expandCodeFor(PostLoopOffset, IntTy)); 1258 rememberInstruction(Result); 1259 } 1260 } 1261 1262 return Result; 1263} 1264 1265Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1266 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1267 1268 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1269 const Loop *L = S->getLoop(); 1270 1271 // First check for an existing canonical IV in a suitable type. 1272 PHINode *CanonicalIV = 0; 1273 if (PHINode *PN = L->getCanonicalInductionVariable()) 1274 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1275 CanonicalIV = PN; 1276 1277 // Rewrite an AddRec in terms of the canonical induction variable, if 1278 // its type is more narrow. 1279 if (CanonicalIV && 1280 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1281 SE.getTypeSizeInBits(Ty)) { 1282 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1283 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1284 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1285 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1286 // FIXME: S->getNoWrapFlags(FlagNW) 1287 SCEV::FlagAnyWrap)); 1288 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1289 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1290 BasicBlock::iterator NewInsertPt = 1291 llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1292 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1293 isa<LandingPadInst>(NewInsertPt)) 1294 ++NewInsertPt; 1295 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1296 NewInsertPt); 1297 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1298 return V; 1299 } 1300 1301 // {X,+,F} --> X + {0,+,F} 1302 if (!S->getStart()->isZero()) { 1303 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1304 NewOps[0] = SE.getConstant(Ty, 0); 1305 // FIXME: can use S->getNoWrapFlags() 1306 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap); 1307 1308 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1309 // comments on expandAddToGEP for details. 1310 const SCEV *Base = S->getStart(); 1311 const SCEV *RestArray[1] = { Rest }; 1312 // Dig into the expression to find the pointer base for a GEP. 1313 ExposePointerBase(Base, RestArray[0], SE); 1314 // If we found a pointer, expand the AddRec with a GEP. 1315 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1316 // Make sure the Base isn't something exotic, such as a multiplied 1317 // or divided pointer value. In those cases, the result type isn't 1318 // actually a pointer type. 1319 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1320 Value *StartV = expand(Base); 1321 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1322 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1323 } 1324 } 1325 1326 // Just do a normal add. Pre-expand the operands to suppress folding. 1327 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1328 SE.getUnknown(expand(Rest)))); 1329 } 1330 1331 // If we don't yet have a canonical IV, create one. 1332 if (!CanonicalIV) { 1333 // Create and insert the PHI node for the induction variable in the 1334 // specified loop. 1335 BasicBlock *Header = L->getHeader(); 1336 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1337 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1338 Header->begin()); 1339 rememberInstruction(CanonicalIV); 1340 1341 Constant *One = ConstantInt::get(Ty, 1); 1342 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1343 BasicBlock *HP = *HPI; 1344 if (L->contains(HP)) { 1345 // Insert a unit add instruction right before the terminator 1346 // corresponding to the back-edge. 1347 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1348 "indvar.next", 1349 HP->getTerminator()); 1350 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1351 rememberInstruction(Add); 1352 CanonicalIV->addIncoming(Add, HP); 1353 } else { 1354 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1355 } 1356 } 1357 } 1358 1359 // {0,+,1} --> Insert a canonical induction variable into the loop! 1360 if (S->isAffine() && S->getOperand(1)->isOne()) { 1361 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1362 "IVs with types different from the canonical IV should " 1363 "already have been handled!"); 1364 return CanonicalIV; 1365 } 1366 1367 // {0,+,F} --> {0,+,1} * F 1368 1369 // If this is a simple linear addrec, emit it now as a special case. 1370 if (S->isAffine()) // {0,+,F} --> i*F 1371 return 1372 expand(SE.getTruncateOrNoop( 1373 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1374 SE.getNoopOrAnyExtend(S->getOperand(1), 1375 CanonicalIV->getType())), 1376 Ty)); 1377 1378 // If this is a chain of recurrences, turn it into a closed form, using the 1379 // folders, then expandCodeFor the closed form. This allows the folders to 1380 // simplify the expression without having to build a bunch of special code 1381 // into this folder. 1382 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1383 1384 // Promote S up to the canonical IV type, if the cast is foldable. 1385 const SCEV *NewS = S; 1386 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1387 if (isa<SCEVAddRecExpr>(Ext)) 1388 NewS = Ext; 1389 1390 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1391 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1392 1393 // Truncate the result down to the original type, if needed. 1394 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1395 return expand(T); 1396} 1397 1398Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1399 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1400 Value *V = expandCodeFor(S->getOperand(), 1401 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1402 Value *I = Builder.CreateTrunc(V, Ty); 1403 rememberInstruction(I); 1404 return I; 1405} 1406 1407Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1408 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1409 Value *V = expandCodeFor(S->getOperand(), 1410 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1411 Value *I = Builder.CreateZExt(V, Ty); 1412 rememberInstruction(I); 1413 return I; 1414} 1415 1416Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1417 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1418 Value *V = expandCodeFor(S->getOperand(), 1419 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1420 Value *I = Builder.CreateSExt(V, Ty); 1421 rememberInstruction(I); 1422 return I; 1423} 1424 1425Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1426 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1427 Type *Ty = LHS->getType(); 1428 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1429 // In the case of mixed integer and pointer types, do the 1430 // rest of the comparisons as integer. 1431 if (S->getOperand(i)->getType() != Ty) { 1432 Ty = SE.getEffectiveSCEVType(Ty); 1433 LHS = InsertNoopCastOfTo(LHS, Ty); 1434 } 1435 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1436 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1437 rememberInstruction(ICmp); 1438 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1439 rememberInstruction(Sel); 1440 LHS = Sel; 1441 } 1442 // In the case of mixed integer and pointer types, cast the 1443 // final result back to the pointer type. 1444 if (LHS->getType() != S->getType()) 1445 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1446 return LHS; 1447} 1448 1449Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1450 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1451 Type *Ty = LHS->getType(); 1452 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1453 // In the case of mixed integer and pointer types, do the 1454 // rest of the comparisons as integer. 1455 if (S->getOperand(i)->getType() != Ty) { 1456 Ty = SE.getEffectiveSCEVType(Ty); 1457 LHS = InsertNoopCastOfTo(LHS, Ty); 1458 } 1459 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1460 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1461 rememberInstruction(ICmp); 1462 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1463 rememberInstruction(Sel); 1464 LHS = Sel; 1465 } 1466 // In the case of mixed integer and pointer types, cast the 1467 // final result back to the pointer type. 1468 if (LHS->getType() != S->getType()) 1469 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1470 return LHS; 1471} 1472 1473Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1474 Instruction *IP) { 1475 Builder.SetInsertPoint(IP->getParent(), IP); 1476 return expandCodeFor(SH, Ty); 1477} 1478 1479Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1480 // Expand the code for this SCEV. 1481 Value *V = expand(SH); 1482 if (Ty) { 1483 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1484 "non-trivial casts should be done with the SCEVs directly!"); 1485 V = InsertNoopCastOfTo(V, Ty); 1486 } 1487 return V; 1488} 1489 1490Value *SCEVExpander::expand(const SCEV *S) { 1491 // Compute an insertion point for this SCEV object. Hoist the instructions 1492 // as far out in the loop nest as possible. 1493 Instruction *InsertPt = Builder.GetInsertPoint(); 1494 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1495 L = L->getParentLoop()) 1496 if (SE.isLoopInvariant(S, L)) { 1497 if (!L) break; 1498 if (BasicBlock *Preheader = L->getLoopPreheader()) 1499 InsertPt = Preheader->getTerminator(); 1500 else { 1501 // LSR sets the insertion point for AddRec start/step values to the 1502 // block start to simplify value reuse, even though it's an invalid 1503 // position. SCEVExpander must correct for this in all cases. 1504 InsertPt = L->getHeader()->getFirstInsertionPt(); 1505 } 1506 } else { 1507 // If the SCEV is computable at this level, insert it into the header 1508 // after the PHIs (and after any other instructions that we've inserted 1509 // there) so that it is guaranteed to dominate any user inside the loop. 1510 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1511 InsertPt = L->getHeader()->getFirstInsertionPt(); 1512 while (InsertPt != Builder.GetInsertPoint() 1513 && (isInsertedInstruction(InsertPt) 1514 || isa<DbgInfoIntrinsic>(InsertPt))) { 1515 InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1516 } 1517 break; 1518 } 1519 1520 // Check to see if we already expanded this here. 1521 std::map<std::pair<const SCEV *, Instruction *>, 1522 AssertingVH<Value> >::iterator I = 1523 InsertedExpressions.find(std::make_pair(S, InsertPt)); 1524 if (I != InsertedExpressions.end()) 1525 return I->second; 1526 1527 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1528 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1529 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1530 1531 // Expand the expression into instructions. 1532 Value *V = visit(S); 1533 1534 // Remember the expanded value for this SCEV at this location. 1535 // 1536 // This is independent of PostIncLoops. The mapped value simply materializes 1537 // the expression at this insertion point. If the mapped value happened to be 1538 // a postinc expansion, it could be reused by a non postinc user, but only if 1539 // its insertion point was already at the head of the loop. 1540 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1541 1542 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1543 return V; 1544} 1545 1546void SCEVExpander::rememberInstruction(Value *I) { 1547 if (!PostIncLoops.empty()) 1548 InsertedPostIncValues.insert(I); 1549 else 1550 InsertedValues.insert(I); 1551} 1552 1553void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { 1554 Builder.SetInsertPoint(BB, I); 1555} 1556 1557/// getOrInsertCanonicalInductionVariable - This method returns the 1558/// canonical induction variable of the specified type for the specified 1559/// loop (inserting one if there is none). A canonical induction variable 1560/// starts at zero and steps by one on each iteration. 1561PHINode * 1562SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1563 Type *Ty) { 1564 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1565 1566 // Build a SCEV for {0,+,1}<L>. 1567 // Conservatively use FlagAnyWrap for now. 1568 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1569 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1570 1571 // Emit code for it. 1572 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1573 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1574 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1575 if (SaveInsertBB) 1576 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1577 1578 return V; 1579} 1580 1581/// Sort values by integer width for replaceCongruentIVs. 1582static bool width_descending(Value *lhs, Value *rhs) { 1583 // Put pointers at the back and make sure pointer < pointer = false. 1584 if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy()) 1585 return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy(); 1586 return rhs->getType()->getPrimitiveSizeInBits() 1587 < lhs->getType()->getPrimitiveSizeInBits(); 1588} 1589 1590/// replaceCongruentIVs - Check for congruent phis in this loop header and 1591/// replace them with their most canonical representative. Return the number of 1592/// phis eliminated. 1593/// 1594/// This does not depend on any SCEVExpander state but should be used in 1595/// the same context that SCEVExpander is used. 1596unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1597 SmallVectorImpl<WeakVH> &DeadInsts, 1598 const TargetLowering *TLI) { 1599 // Find integer phis in order of increasing width. 1600 SmallVector<PHINode*, 8> Phis; 1601 for (BasicBlock::iterator I = L->getHeader()->begin(); 1602 PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1603 Phis.push_back(Phi); 1604 } 1605 if (TLI) 1606 std::sort(Phis.begin(), Phis.end(), width_descending); 1607 1608 unsigned NumElim = 0; 1609 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1610 // Process phis from wide to narrow. Mapping wide phis to the their truncation 1611 // so narrow phis can reuse them. 1612 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1613 PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1614 PHINode *Phi = *PIter; 1615 1616 if (!SE.isSCEVable(Phi->getType())) 1617 continue; 1618 1619 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1620 if (!OrigPhiRef) { 1621 OrigPhiRef = Phi; 1622 if (Phi->getType()->isIntegerTy() && TLI 1623 && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1624 // This phi can be freely truncated to the narrowest phi type. Map the 1625 // truncated expression to it so it will be reused for narrow types. 1626 const SCEV *TruncExpr = 1627 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1628 ExprToIVMap[TruncExpr] = Phi; 1629 } 1630 continue; 1631 } 1632 1633 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1634 // sense. 1635 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1636 continue; 1637 1638 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1639 Instruction *OrigInc = 1640 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1641 Instruction *IsomorphicInc = 1642 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1643 1644 // If this phi has the same width but is more canonical, replace the 1645 // original with it. As part of the "more canonical" determination, 1646 // respect a prior decision to use an IV chain. 1647 if (OrigPhiRef->getType() == Phi->getType() 1648 && !(ChainedPhis.count(Phi) 1649 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1650 && (ChainedPhis.count(Phi) 1651 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1652 std::swap(OrigPhiRef, Phi); 1653 std::swap(OrigInc, IsomorphicInc); 1654 } 1655 // Replacing the congruent phi is sufficient because acyclic redundancy 1656 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1657 // that a phi is congruent, it's often the head of an IV user cycle that 1658 // is isomorphic with the original phi. It's worth eagerly cleaning up the 1659 // common case of a single IV increment so that DeleteDeadPHIs can remove 1660 // cycles that had postinc uses. 1661 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1662 IsomorphicInc->getType()); 1663 if (OrigInc != IsomorphicInc 1664 && TruncExpr == SE.getSCEV(IsomorphicInc) 1665 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1666 || hoistIVInc(OrigInc, IsomorphicInc))) { 1667 DEBUG_WITH_TYPE(DebugType, dbgs() 1668 << "INDVARS: Eliminated congruent iv.inc: " 1669 << *IsomorphicInc << '\n'); 1670 Value *NewInc = OrigInc; 1671 if (OrigInc->getType() != IsomorphicInc->getType()) { 1672 Instruction *IP = isa<PHINode>(OrigInc) 1673 ? (Instruction*)L->getHeader()->getFirstInsertionPt() 1674 : OrigInc->getNextNode(); 1675 IRBuilder<> Builder(IP); 1676 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1677 NewInc = Builder. 1678 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1679 } 1680 IsomorphicInc->replaceAllUsesWith(NewInc); 1681 DeadInsts.push_back(IsomorphicInc); 1682 } 1683 } 1684 DEBUG_WITH_TYPE(DebugType, dbgs() 1685 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1686 ++NumElim; 1687 Value *NewIV = OrigPhiRef; 1688 if (OrigPhiRef->getType() != Phi->getType()) { 1689 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1690 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1691 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1692 } 1693 Phi->replaceAllUsesWith(NewIV); 1694 DeadInsts.push_back(Phi); 1695 } 1696 return NumElim; 1697} 1698