ScalarEvolutionExpander.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/ADT/STLExtras.h" 18#include "llvm/ADT/SmallSet.h" 19#include "llvm/Analysis/LoopInfo.h" 20#include "llvm/Analysis/TargetTransformInfo.h" 21#include "llvm/IR/DataLayout.h" 22#include "llvm/IR/Dominators.h" 23#include "llvm/IR/IntrinsicInst.h" 24#include "llvm/IR/LLVMContext.h" 25#include "llvm/Support/Debug.h" 26 27using namespace llvm; 28 29/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 30/// reusing an existing cast if a suitable one exists, moving an existing 31/// cast if a suitable one exists but isn't in the right place, or 32/// creating a new one. 33Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 34 Instruction::CastOps Op, 35 BasicBlock::iterator IP) { 36 // This function must be called with the builder having a valid insertion 37 // point. It doesn't need to be the actual IP where the uses of the returned 38 // cast will be added, but it must dominate such IP. 39 // We use this precondition to produce a cast that will dominate all its 40 // uses. In particular, this is crucial for the case where the builder's 41 // insertion point *is* the point where we were asked to put the cast. 42 // Since we don't know the builder's insertion point is actually 43 // where the uses will be added (only that it dominates it), we are 44 // not allowed to move it. 45 BasicBlock::iterator BIP = Builder.GetInsertPoint(); 46 47 Instruction *Ret = nullptr; 48 49 // Check to see if there is already a cast! 50 for (User *U : V->users()) 51 if (U->getType() == Ty) 52 if (CastInst *CI = dyn_cast<CastInst>(U)) 53 if (CI->getOpcode() == Op) { 54 // If the cast isn't where we want it, create a new cast at IP. 55 // Likewise, do not reuse a cast at BIP because it must dominate 56 // instructions that might be inserted before BIP. 57 if (BasicBlock::iterator(CI) != IP || BIP == IP) { 58 // Create a new cast, and leave the old cast in place in case 59 // it is being used as an insert point. Clear its operand 60 // so that it doesn't hold anything live. 61 Ret = CastInst::Create(Op, V, Ty, "", IP); 62 Ret->takeName(CI); 63 CI->replaceAllUsesWith(Ret); 64 CI->setOperand(0, UndefValue::get(V->getType())); 65 break; 66 } 67 Ret = CI; 68 break; 69 } 70 71 // Create a new cast. 72 if (!Ret) 73 Ret = CastInst::Create(Op, V, Ty, V->getName(), IP); 74 75 // We assert at the end of the function since IP might point to an 76 // instruction with different dominance properties than a cast 77 // (an invoke for example) and not dominate BIP (but the cast does). 78 assert(SE.DT->dominates(Ret, BIP)); 79 80 rememberInstruction(Ret); 81 return Ret; 82} 83 84/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 85/// which must be possible with a noop cast, doing what we can to share 86/// the casts. 87Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 88 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 89 assert((Op == Instruction::BitCast || 90 Op == Instruction::PtrToInt || 91 Op == Instruction::IntToPtr) && 92 "InsertNoopCastOfTo cannot perform non-noop casts!"); 93 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 94 "InsertNoopCastOfTo cannot change sizes!"); 95 96 // Short-circuit unnecessary bitcasts. 97 if (Op == Instruction::BitCast) { 98 if (V->getType() == Ty) 99 return V; 100 if (CastInst *CI = dyn_cast<CastInst>(V)) { 101 if (CI->getOperand(0)->getType() == Ty) 102 return CI->getOperand(0); 103 } 104 } 105 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 106 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 107 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 108 if (CastInst *CI = dyn_cast<CastInst>(V)) 109 if ((CI->getOpcode() == Instruction::PtrToInt || 110 CI->getOpcode() == Instruction::IntToPtr) && 111 SE.getTypeSizeInBits(CI->getType()) == 112 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 113 return CI->getOperand(0); 114 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 115 if ((CE->getOpcode() == Instruction::PtrToInt || 116 CE->getOpcode() == Instruction::IntToPtr) && 117 SE.getTypeSizeInBits(CE->getType()) == 118 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 119 return CE->getOperand(0); 120 } 121 122 // Fold a cast of a constant. 123 if (Constant *C = dyn_cast<Constant>(V)) 124 return ConstantExpr::getCast(Op, C, Ty); 125 126 // Cast the argument at the beginning of the entry block, after 127 // any bitcasts of other arguments. 128 if (Argument *A = dyn_cast<Argument>(V)) { 129 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 130 while ((isa<BitCastInst>(IP) && 131 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 132 cast<BitCastInst>(IP)->getOperand(0) != A) || 133 isa<DbgInfoIntrinsic>(IP) || 134 isa<LandingPadInst>(IP)) 135 ++IP; 136 return ReuseOrCreateCast(A, Ty, Op, IP); 137 } 138 139 // Cast the instruction immediately after the instruction. 140 Instruction *I = cast<Instruction>(V); 141 BasicBlock::iterator IP = I; ++IP; 142 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 143 IP = II->getNormalDest()->begin(); 144 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 145 ++IP; 146 return ReuseOrCreateCast(I, Ty, Op, IP); 147} 148 149/// InsertBinop - Insert the specified binary operator, doing a small amount 150/// of work to avoid inserting an obviously redundant operation. 151Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 152 Value *LHS, Value *RHS) { 153 // Fold a binop with constant operands. 154 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 155 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 156 return ConstantExpr::get(Opcode, CLHS, CRHS); 157 158 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 159 unsigned ScanLimit = 6; 160 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 161 // Scanning starts from the last instruction before the insertion point. 162 BasicBlock::iterator IP = Builder.GetInsertPoint(); 163 if (IP != BlockBegin) { 164 --IP; 165 for (; ScanLimit; --IP, --ScanLimit) { 166 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 167 // generated code. 168 if (isa<DbgInfoIntrinsic>(IP)) 169 ScanLimit++; 170 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 171 IP->getOperand(1) == RHS) 172 return IP; 173 if (IP == BlockBegin) break; 174 } 175 } 176 177 // Save the original insertion point so we can restore it when we're done. 178 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc(); 179 BuilderType::InsertPointGuard Guard(Builder); 180 181 // Move the insertion point out of as many loops as we can. 182 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 183 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 184 BasicBlock *Preheader = L->getLoopPreheader(); 185 if (!Preheader) break; 186 187 // Ok, move up a level. 188 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 189 } 190 191 // If we haven't found this binop, insert it. 192 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 193 BO->setDebugLoc(Loc); 194 rememberInstruction(BO); 195 196 return BO; 197} 198 199/// FactorOutConstant - Test if S is divisible by Factor, using signed 200/// division. If so, update S with Factor divided out and return true. 201/// S need not be evenly divisible if a reasonable remainder can be 202/// computed. 203/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 204/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 205/// check to see if the divide was folded. 206static bool FactorOutConstant(const SCEV *&S, 207 const SCEV *&Remainder, 208 const SCEV *Factor, 209 ScalarEvolution &SE, 210 const DataLayout *DL) { 211 // Everything is divisible by one. 212 if (Factor->isOne()) 213 return true; 214 215 // x/x == 1. 216 if (S == Factor) { 217 S = SE.getConstant(S->getType(), 1); 218 return true; 219 } 220 221 // For a Constant, check for a multiple of the given factor. 222 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 223 // 0/x == 0. 224 if (C->isZero()) 225 return true; 226 // Check for divisibility. 227 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 228 ConstantInt *CI = 229 ConstantInt::get(SE.getContext(), 230 C->getValue()->getValue().sdiv( 231 FC->getValue()->getValue())); 232 // If the quotient is zero and the remainder is non-zero, reject 233 // the value at this scale. It will be considered for subsequent 234 // smaller scales. 235 if (!CI->isZero()) { 236 const SCEV *Div = SE.getConstant(CI); 237 S = Div; 238 Remainder = 239 SE.getAddExpr(Remainder, 240 SE.getConstant(C->getValue()->getValue().srem( 241 FC->getValue()->getValue()))); 242 return true; 243 } 244 } 245 } 246 247 // In a Mul, check if there is a constant operand which is a multiple 248 // of the given factor. 249 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 250 if (DL) { 251 // With DataLayout, the size is known. Check if there is a constant 252 // operand which is a multiple of the given factor. If so, we can 253 // factor it. 254 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 255 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 256 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 257 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 258 NewMulOps[0] = 259 SE.getConstant(C->getValue()->getValue().sdiv( 260 FC->getValue()->getValue())); 261 S = SE.getMulExpr(NewMulOps); 262 return true; 263 } 264 } else { 265 // Without DataLayout, check if Factor can be factored out of any of the 266 // Mul's operands. If so, we can just remove it. 267 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 268 const SCEV *SOp = M->getOperand(i); 269 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 270 if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) && 271 Remainder->isZero()) { 272 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 273 NewMulOps[i] = SOp; 274 S = SE.getMulExpr(NewMulOps); 275 return true; 276 } 277 } 278 } 279 } 280 281 // In an AddRec, check if both start and step are divisible. 282 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 283 const SCEV *Step = A->getStepRecurrence(SE); 284 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 285 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) 286 return false; 287 if (!StepRem->isZero()) 288 return false; 289 const SCEV *Start = A->getStart(); 290 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL)) 291 return false; 292 S = SE.getAddRecExpr(Start, Step, A->getLoop(), 293 A->getNoWrapFlags(SCEV::FlagNW)); 294 return true; 295 } 296 297 return false; 298} 299 300/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 301/// is the number of SCEVAddRecExprs present, which are kept at the end of 302/// the list. 303/// 304static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 305 Type *Ty, 306 ScalarEvolution &SE) { 307 unsigned NumAddRecs = 0; 308 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 309 ++NumAddRecs; 310 // Group Ops into non-addrecs and addrecs. 311 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 312 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 313 // Let ScalarEvolution sort and simplify the non-addrecs list. 314 const SCEV *Sum = NoAddRecs.empty() ? 315 SE.getConstant(Ty, 0) : 316 SE.getAddExpr(NoAddRecs); 317 // If it returned an add, use the operands. Otherwise it simplified 318 // the sum into a single value, so just use that. 319 Ops.clear(); 320 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 321 Ops.append(Add->op_begin(), Add->op_end()); 322 else if (!Sum->isZero()) 323 Ops.push_back(Sum); 324 // Then append the addrecs. 325 Ops.append(AddRecs.begin(), AddRecs.end()); 326} 327 328/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 329/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 330/// This helps expose more opportunities for folding parts of the expressions 331/// into GEP indices. 332/// 333static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 334 Type *Ty, 335 ScalarEvolution &SE) { 336 // Find the addrecs. 337 SmallVector<const SCEV *, 8> AddRecs; 338 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 339 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 340 const SCEV *Start = A->getStart(); 341 if (Start->isZero()) break; 342 const SCEV *Zero = SE.getConstant(Ty, 0); 343 AddRecs.push_back(SE.getAddRecExpr(Zero, 344 A->getStepRecurrence(SE), 345 A->getLoop(), 346 A->getNoWrapFlags(SCEV::FlagNW))); 347 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 348 Ops[i] = Zero; 349 Ops.append(Add->op_begin(), Add->op_end()); 350 e += Add->getNumOperands(); 351 } else { 352 Ops[i] = Start; 353 } 354 } 355 if (!AddRecs.empty()) { 356 // Add the addrecs onto the end of the list. 357 Ops.append(AddRecs.begin(), AddRecs.end()); 358 // Resort the operand list, moving any constants to the front. 359 SimplifyAddOperands(Ops, Ty, SE); 360 } 361} 362 363/// expandAddToGEP - Expand an addition expression with a pointer type into 364/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 365/// BasicAliasAnalysis and other passes analyze the result. See the rules 366/// for getelementptr vs. inttoptr in 367/// http://llvm.org/docs/LangRef.html#pointeraliasing 368/// for details. 369/// 370/// Design note: The correctness of using getelementptr here depends on 371/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 372/// they may introduce pointer arithmetic which may not be safely converted 373/// into getelementptr. 374/// 375/// Design note: It might seem desirable for this function to be more 376/// loop-aware. If some of the indices are loop-invariant while others 377/// aren't, it might seem desirable to emit multiple GEPs, keeping the 378/// loop-invariant portions of the overall computation outside the loop. 379/// However, there are a few reasons this is not done here. Hoisting simple 380/// arithmetic is a low-level optimization that often isn't very 381/// important until late in the optimization process. In fact, passes 382/// like InstructionCombining will combine GEPs, even if it means 383/// pushing loop-invariant computation down into loops, so even if the 384/// GEPs were split here, the work would quickly be undone. The 385/// LoopStrengthReduction pass, which is usually run quite late (and 386/// after the last InstructionCombining pass), takes care of hoisting 387/// loop-invariant portions of expressions, after considering what 388/// can be folded using target addressing modes. 389/// 390Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 391 const SCEV *const *op_end, 392 PointerType *PTy, 393 Type *Ty, 394 Value *V) { 395 Type *ElTy = PTy->getElementType(); 396 SmallVector<Value *, 4> GepIndices; 397 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 398 bool AnyNonZeroIndices = false; 399 400 // Split AddRecs up into parts as either of the parts may be usable 401 // without the other. 402 SplitAddRecs(Ops, Ty, SE); 403 404 Type *IntPtrTy = SE.DL 405 ? SE.DL->getIntPtrType(PTy) 406 : Type::getInt64Ty(PTy->getContext()); 407 408 // Descend down the pointer's type and attempt to convert the other 409 // operands into GEP indices, at each level. The first index in a GEP 410 // indexes into the array implied by the pointer operand; the rest of 411 // the indices index into the element or field type selected by the 412 // preceding index. 413 for (;;) { 414 // If the scale size is not 0, attempt to factor out a scale for 415 // array indexing. 416 SmallVector<const SCEV *, 8> ScaledOps; 417 if (ElTy->isSized()) { 418 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy); 419 if (!ElSize->isZero()) { 420 SmallVector<const SCEV *, 8> NewOps; 421 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 422 const SCEV *Op = Ops[i]; 423 const SCEV *Remainder = SE.getConstant(Ty, 0); 424 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) { 425 // Op now has ElSize factored out. 426 ScaledOps.push_back(Op); 427 if (!Remainder->isZero()) 428 NewOps.push_back(Remainder); 429 AnyNonZeroIndices = true; 430 } else { 431 // The operand was not divisible, so add it to the list of operands 432 // we'll scan next iteration. 433 NewOps.push_back(Ops[i]); 434 } 435 } 436 // If we made any changes, update Ops. 437 if (!ScaledOps.empty()) { 438 Ops = NewOps; 439 SimplifyAddOperands(Ops, Ty, SE); 440 } 441 } 442 } 443 444 // Record the scaled array index for this level of the type. If 445 // we didn't find any operands that could be factored, tentatively 446 // assume that element zero was selected (since the zero offset 447 // would obviously be folded away). 448 Value *Scaled = ScaledOps.empty() ? 449 Constant::getNullValue(Ty) : 450 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 451 GepIndices.push_back(Scaled); 452 453 // Collect struct field index operands. 454 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 455 bool FoundFieldNo = false; 456 // An empty struct has no fields. 457 if (STy->getNumElements() == 0) break; 458 if (SE.DL) { 459 // With DataLayout, field offsets are known. See if a constant offset 460 // falls within any of the struct fields. 461 if (Ops.empty()) break; 462 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 463 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 464 const StructLayout &SL = *SE.DL->getStructLayout(STy); 465 uint64_t FullOffset = C->getValue()->getZExtValue(); 466 if (FullOffset < SL.getSizeInBytes()) { 467 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 468 GepIndices.push_back( 469 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 470 ElTy = STy->getTypeAtIndex(ElIdx); 471 Ops[0] = 472 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 473 AnyNonZeroIndices = true; 474 FoundFieldNo = true; 475 } 476 } 477 } else { 478 // Without DataLayout, just check for an offsetof expression of the 479 // appropriate struct type. 480 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 481 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 482 Type *CTy; 483 Constant *FieldNo; 484 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 485 GepIndices.push_back(FieldNo); 486 ElTy = 487 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 488 Ops[i] = SE.getConstant(Ty, 0); 489 AnyNonZeroIndices = true; 490 FoundFieldNo = true; 491 break; 492 } 493 } 494 } 495 // If no struct field offsets were found, tentatively assume that 496 // field zero was selected (since the zero offset would obviously 497 // be folded away). 498 if (!FoundFieldNo) { 499 ElTy = STy->getTypeAtIndex(0u); 500 GepIndices.push_back( 501 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 502 } 503 } 504 505 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 506 ElTy = ATy->getElementType(); 507 else 508 break; 509 } 510 511 // If none of the operands were convertible to proper GEP indices, cast 512 // the base to i8* and do an ugly getelementptr with that. It's still 513 // better than ptrtoint+arithmetic+inttoptr at least. 514 if (!AnyNonZeroIndices) { 515 // Cast the base to i8*. 516 V = InsertNoopCastOfTo(V, 517 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 518 519 assert(!isa<Instruction>(V) || 520 SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint())); 521 522 // Expand the operands for a plain byte offset. 523 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 524 525 // Fold a GEP with constant operands. 526 if (Constant *CLHS = dyn_cast<Constant>(V)) 527 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 528 return ConstantExpr::getGetElementPtr(CLHS, CRHS); 529 530 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 531 unsigned ScanLimit = 6; 532 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 533 // Scanning starts from the last instruction before the insertion point. 534 BasicBlock::iterator IP = Builder.GetInsertPoint(); 535 if (IP != BlockBegin) { 536 --IP; 537 for (; ScanLimit; --IP, --ScanLimit) { 538 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 539 // generated code. 540 if (isa<DbgInfoIntrinsic>(IP)) 541 ScanLimit++; 542 if (IP->getOpcode() == Instruction::GetElementPtr && 543 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 544 return IP; 545 if (IP == BlockBegin) break; 546 } 547 } 548 549 // Save the original insertion point so we can restore it when we're done. 550 BuilderType::InsertPointGuard Guard(Builder); 551 552 // Move the insertion point out of as many loops as we can. 553 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 554 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 555 BasicBlock *Preheader = L->getLoopPreheader(); 556 if (!Preheader) break; 557 558 // Ok, move up a level. 559 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 560 } 561 562 // Emit a GEP. 563 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 564 rememberInstruction(GEP); 565 566 return GEP; 567 } 568 569 // Save the original insertion point so we can restore it when we're done. 570 BuilderType::InsertPoint SaveInsertPt = Builder.saveIP(); 571 572 // Move the insertion point out of as many loops as we can. 573 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 574 if (!L->isLoopInvariant(V)) break; 575 576 bool AnyIndexNotLoopInvariant = false; 577 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 578 E = GepIndices.end(); I != E; ++I) 579 if (!L->isLoopInvariant(*I)) { 580 AnyIndexNotLoopInvariant = true; 581 break; 582 } 583 if (AnyIndexNotLoopInvariant) 584 break; 585 586 BasicBlock *Preheader = L->getLoopPreheader(); 587 if (!Preheader) break; 588 589 // Ok, move up a level. 590 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 591 } 592 593 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 594 // because ScalarEvolution may have changed the address arithmetic to 595 // compute a value which is beyond the end of the allocated object. 596 Value *Casted = V; 597 if (V->getType() != PTy) 598 Casted = InsertNoopCastOfTo(Casted, PTy); 599 Value *GEP = Builder.CreateGEP(Casted, 600 GepIndices, 601 "scevgep"); 602 Ops.push_back(SE.getUnknown(GEP)); 603 rememberInstruction(GEP); 604 605 // Restore the original insert point. 606 Builder.restoreIP(SaveInsertPt); 607 608 return expand(SE.getAddExpr(Ops)); 609} 610 611/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 612/// SCEV expansion. If they are nested, this is the most nested. If they are 613/// neighboring, pick the later. 614static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 615 DominatorTree &DT) { 616 if (!A) return B; 617 if (!B) return A; 618 if (A->contains(B)) return B; 619 if (B->contains(A)) return A; 620 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 621 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 622 return A; // Arbitrarily break the tie. 623} 624 625/// getRelevantLoop - Get the most relevant loop associated with the given 626/// expression, according to PickMostRelevantLoop. 627const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 628 // Test whether we've already computed the most relevant loop for this SCEV. 629 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 630 RelevantLoops.insert(std::make_pair(S, nullptr)); 631 if (!Pair.second) 632 return Pair.first->second; 633 634 if (isa<SCEVConstant>(S)) 635 // A constant has no relevant loops. 636 return nullptr; 637 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 638 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 639 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 640 // A non-instruction has no relevant loops. 641 return nullptr; 642 } 643 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 644 const Loop *L = nullptr; 645 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 646 L = AR->getLoop(); 647 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 648 I != E; ++I) 649 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 650 return RelevantLoops[N] = L; 651 } 652 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 653 const Loop *Result = getRelevantLoop(C->getOperand()); 654 return RelevantLoops[C] = Result; 655 } 656 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 657 const Loop *Result = 658 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 659 getRelevantLoop(D->getRHS()), 660 *SE.DT); 661 return RelevantLoops[D] = Result; 662 } 663 llvm_unreachable("Unexpected SCEV type!"); 664} 665 666namespace { 667 668/// LoopCompare - Compare loops by PickMostRelevantLoop. 669class LoopCompare { 670 DominatorTree &DT; 671public: 672 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 673 674 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 675 std::pair<const Loop *, const SCEV *> RHS) const { 676 // Keep pointer operands sorted at the end. 677 if (LHS.second->getType()->isPointerTy() != 678 RHS.second->getType()->isPointerTy()) 679 return LHS.second->getType()->isPointerTy(); 680 681 // Compare loops with PickMostRelevantLoop. 682 if (LHS.first != RHS.first) 683 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 684 685 // If one operand is a non-constant negative and the other is not, 686 // put the non-constant negative on the right so that a sub can 687 // be used instead of a negate and add. 688 if (LHS.second->isNonConstantNegative()) { 689 if (!RHS.second->isNonConstantNegative()) 690 return false; 691 } else if (RHS.second->isNonConstantNegative()) 692 return true; 693 694 // Otherwise they are equivalent according to this comparison. 695 return false; 696 } 697}; 698 699} 700 701Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 702 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 703 704 // Collect all the add operands in a loop, along with their associated loops. 705 // Iterate in reverse so that constants are emitted last, all else equal, and 706 // so that pointer operands are inserted first, which the code below relies on 707 // to form more involved GEPs. 708 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 709 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 710 E(S->op_begin()); I != E; ++I) 711 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 712 713 // Sort by loop. Use a stable sort so that constants follow non-constants and 714 // pointer operands precede non-pointer operands. 715 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 716 717 // Emit instructions to add all the operands. Hoist as much as possible 718 // out of loops, and form meaningful getelementptrs where possible. 719 Value *Sum = nullptr; 720 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 721 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 722 const Loop *CurLoop = I->first; 723 const SCEV *Op = I->second; 724 if (!Sum) { 725 // This is the first operand. Just expand it. 726 Sum = expand(Op); 727 ++I; 728 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 729 // The running sum expression is a pointer. Try to form a getelementptr 730 // at this level with that as the base. 731 SmallVector<const SCEV *, 4> NewOps; 732 for (; I != E && I->first == CurLoop; ++I) { 733 // If the operand is SCEVUnknown and not instructions, peek through 734 // it, to enable more of it to be folded into the GEP. 735 const SCEV *X = I->second; 736 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 737 if (!isa<Instruction>(U->getValue())) 738 X = SE.getSCEV(U->getValue()); 739 NewOps.push_back(X); 740 } 741 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 742 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 743 // The running sum is an integer, and there's a pointer at this level. 744 // Try to form a getelementptr. If the running sum is instructions, 745 // use a SCEVUnknown to avoid re-analyzing them. 746 SmallVector<const SCEV *, 4> NewOps; 747 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 748 SE.getSCEV(Sum)); 749 for (++I; I != E && I->first == CurLoop; ++I) 750 NewOps.push_back(I->second); 751 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 752 } else if (Op->isNonConstantNegative()) { 753 // Instead of doing a negate and add, just do a subtract. 754 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 755 Sum = InsertNoopCastOfTo(Sum, Ty); 756 Sum = InsertBinop(Instruction::Sub, Sum, W); 757 ++I; 758 } else { 759 // A simple add. 760 Value *W = expandCodeFor(Op, Ty); 761 Sum = InsertNoopCastOfTo(Sum, Ty); 762 // Canonicalize a constant to the RHS. 763 if (isa<Constant>(Sum)) std::swap(Sum, W); 764 Sum = InsertBinop(Instruction::Add, Sum, W); 765 ++I; 766 } 767 } 768 769 return Sum; 770} 771 772Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 773 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 774 775 // Collect all the mul operands in a loop, along with their associated loops. 776 // Iterate in reverse so that constants are emitted last, all else equal. 777 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 778 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 779 E(S->op_begin()); I != E; ++I) 780 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 781 782 // Sort by loop. Use a stable sort so that constants follow non-constants. 783 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 784 785 // Emit instructions to mul all the operands. Hoist as much as possible 786 // out of loops. 787 Value *Prod = nullptr; 788 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 789 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 790 const SCEV *Op = I->second; 791 if (!Prod) { 792 // This is the first operand. Just expand it. 793 Prod = expand(Op); 794 ++I; 795 } else if (Op->isAllOnesValue()) { 796 // Instead of doing a multiply by negative one, just do a negate. 797 Prod = InsertNoopCastOfTo(Prod, Ty); 798 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 799 ++I; 800 } else { 801 // A simple mul. 802 Value *W = expandCodeFor(Op, Ty); 803 Prod = InsertNoopCastOfTo(Prod, Ty); 804 // Canonicalize a constant to the RHS. 805 if (isa<Constant>(Prod)) std::swap(Prod, W); 806 Prod = InsertBinop(Instruction::Mul, Prod, W); 807 ++I; 808 } 809 } 810 811 return Prod; 812} 813 814Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 815 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 816 817 Value *LHS = expandCodeFor(S->getLHS(), Ty); 818 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 819 const APInt &RHS = SC->getValue()->getValue(); 820 if (RHS.isPowerOf2()) 821 return InsertBinop(Instruction::LShr, LHS, 822 ConstantInt::get(Ty, RHS.logBase2())); 823 } 824 825 Value *RHS = expandCodeFor(S->getRHS(), Ty); 826 return InsertBinop(Instruction::UDiv, LHS, RHS); 827} 828 829/// Move parts of Base into Rest to leave Base with the minimal 830/// expression that provides a pointer operand suitable for a 831/// GEP expansion. 832static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 833 ScalarEvolution &SE) { 834 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 835 Base = A->getStart(); 836 Rest = SE.getAddExpr(Rest, 837 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 838 A->getStepRecurrence(SE), 839 A->getLoop(), 840 A->getNoWrapFlags(SCEV::FlagNW))); 841 } 842 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 843 Base = A->getOperand(A->getNumOperands()-1); 844 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 845 NewAddOps.back() = Rest; 846 Rest = SE.getAddExpr(NewAddOps); 847 ExposePointerBase(Base, Rest, SE); 848 } 849} 850 851/// Determine if this is a well-behaved chain of instructions leading back to 852/// the PHI. If so, it may be reused by expanded expressions. 853bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 854 const Loop *L) { 855 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 856 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 857 return false; 858 // If any of the operands don't dominate the insert position, bail. 859 // Addrec operands are always loop-invariant, so this can only happen 860 // if there are instructions which haven't been hoisted. 861 if (L == IVIncInsertLoop) { 862 for (User::op_iterator OI = IncV->op_begin()+1, 863 OE = IncV->op_end(); OI != OE; ++OI) 864 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 865 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 866 return false; 867 } 868 // Advance to the next instruction. 869 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 870 if (!IncV) 871 return false; 872 873 if (IncV->mayHaveSideEffects()) 874 return false; 875 876 if (IncV != PN) 877 return true; 878 879 return isNormalAddRecExprPHI(PN, IncV, L); 880} 881 882/// getIVIncOperand returns an induction variable increment's induction 883/// variable operand. 884/// 885/// If allowScale is set, any type of GEP is allowed as long as the nonIV 886/// operands dominate InsertPos. 887/// 888/// If allowScale is not set, ensure that a GEP increment conforms to one of the 889/// simple patterns generated by getAddRecExprPHILiterally and 890/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 891Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 892 Instruction *InsertPos, 893 bool allowScale) { 894 if (IncV == InsertPos) 895 return nullptr; 896 897 switch (IncV->getOpcode()) { 898 default: 899 return nullptr; 900 // Check for a simple Add/Sub or GEP of a loop invariant step. 901 case Instruction::Add: 902 case Instruction::Sub: { 903 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 904 if (!OInst || SE.DT->dominates(OInst, InsertPos)) 905 return dyn_cast<Instruction>(IncV->getOperand(0)); 906 return nullptr; 907 } 908 case Instruction::BitCast: 909 return dyn_cast<Instruction>(IncV->getOperand(0)); 910 case Instruction::GetElementPtr: 911 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 912 I != E; ++I) { 913 if (isa<Constant>(*I)) 914 continue; 915 if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 916 if (!SE.DT->dominates(OInst, InsertPos)) 917 return nullptr; 918 } 919 if (allowScale) { 920 // allow any kind of GEP as long as it can be hoisted. 921 continue; 922 } 923 // This must be a pointer addition of constants (pretty), which is already 924 // handled, or some number of address-size elements (ugly). Ugly geps 925 // have 2 operands. i1* is used by the expander to represent an 926 // address-size element. 927 if (IncV->getNumOperands() != 2) 928 return nullptr; 929 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 930 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 931 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 932 return nullptr; 933 break; 934 } 935 return dyn_cast<Instruction>(IncV->getOperand(0)); 936 } 937} 938 939/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 940/// it available to other uses in this loop. Recursively hoist any operands, 941/// until we reach a value that dominates InsertPos. 942bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 943 if (SE.DT->dominates(IncV, InsertPos)) 944 return true; 945 946 // InsertPos must itself dominate IncV so that IncV's new position satisfies 947 // its existing users. 948 if (isa<PHINode>(InsertPos) 949 || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 950 return false; 951 952 // Check that the chain of IV operands leading back to Phi can be hoisted. 953 SmallVector<Instruction*, 4> IVIncs; 954 for(;;) { 955 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 956 if (!Oper) 957 return false; 958 // IncV is safe to hoist. 959 IVIncs.push_back(IncV); 960 IncV = Oper; 961 if (SE.DT->dominates(IncV, InsertPos)) 962 break; 963 } 964 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 965 E = IVIncs.rend(); I != E; ++I) { 966 (*I)->moveBefore(InsertPos); 967 } 968 return true; 969} 970 971/// Determine if this cyclic phi is in a form that would have been generated by 972/// LSR. We don't care if the phi was actually expanded in this pass, as long 973/// as it is in a low-cost form, for example, no implied multiplication. This 974/// should match any patterns generated by getAddRecExprPHILiterally and 975/// expandAddtoGEP. 976bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 977 const Loop *L) { 978 for(Instruction *IVOper = IncV; 979 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 980 /*allowScale=*/false));) { 981 if (IVOper == PN) 982 return true; 983 } 984 return false; 985} 986 987/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 988/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 989/// need to materialize IV increments elsewhere to handle difficult situations. 990Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 991 Type *ExpandTy, Type *IntTy, 992 bool useSubtract) { 993 Value *IncV; 994 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 995 if (ExpandTy->isPointerTy()) { 996 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 997 // If the step isn't constant, don't use an implicitly scaled GEP, because 998 // that would require a multiply inside the loop. 999 if (!isa<ConstantInt>(StepV)) 1000 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1001 GEPPtrTy->getAddressSpace()); 1002 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1003 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1004 if (IncV->getType() != PN->getType()) { 1005 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1006 rememberInstruction(IncV); 1007 } 1008 } else { 1009 IncV = useSubtract ? 1010 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1011 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1012 rememberInstruction(IncV); 1013 } 1014 return IncV; 1015} 1016 1017/// \brief Hoist the addrec instruction chain rooted in the loop phi above the 1018/// position. This routine assumes that this is possible (has been checked). 1019static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, 1020 Instruction *Pos, PHINode *LoopPhi) { 1021 do { 1022 if (DT->dominates(InstToHoist, Pos)) 1023 break; 1024 // Make sure the increment is where we want it. But don't move it 1025 // down past a potential existing post-inc user. 1026 InstToHoist->moveBefore(Pos); 1027 Pos = InstToHoist; 1028 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0)); 1029 } while (InstToHoist != LoopPhi); 1030} 1031 1032/// \brief Check whether we can cheaply express the requested SCEV in terms of 1033/// the available PHI SCEV by truncation and/or invertion of the step. 1034static bool canBeCheaplyTransformed(ScalarEvolution &SE, 1035 const SCEVAddRecExpr *Phi, 1036 const SCEVAddRecExpr *Requested, 1037 bool &InvertStep) { 1038 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType()); 1039 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType()); 1040 1041 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth()) 1042 return false; 1043 1044 // Try truncate it if necessary. 1045 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy)); 1046 if (!Phi) 1047 return false; 1048 1049 // Check whether truncation will help. 1050 if (Phi == Requested) { 1051 InvertStep = false; 1052 return true; 1053 } 1054 1055 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}. 1056 if (SE.getAddExpr(Requested->getStart(), 1057 SE.getNegativeSCEV(Requested)) == Phi) { 1058 InvertStep = true; 1059 return true; 1060 } 1061 1062 return false; 1063} 1064 1065/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1066/// the base addrec, which is the addrec without any non-loop-dominating 1067/// values, and return the PHI. 1068PHINode * 1069SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1070 const Loop *L, 1071 Type *ExpandTy, 1072 Type *IntTy, 1073 Type *&TruncTy, 1074 bool &InvertStep) { 1075 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1076 1077 // Reuse a previously-inserted PHI, if present. 1078 BasicBlock *LatchBlock = L->getLoopLatch(); 1079 if (LatchBlock) { 1080 PHINode *AddRecPhiMatch = nullptr; 1081 Instruction *IncV = nullptr; 1082 TruncTy = nullptr; 1083 InvertStep = false; 1084 1085 // Only try partially matching scevs that need truncation and/or 1086 // step-inversion if we know this loop is outside the current loop. 1087 bool TryNonMatchingSCEV = IVIncInsertLoop && 1088 SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); 1089 1090 for (BasicBlock::iterator I = L->getHeader()->begin(); 1091 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1092 if (!SE.isSCEVable(PN->getType())) 1093 continue; 1094 1095 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN)); 1096 if (!PhiSCEV) 1097 continue; 1098 1099 bool IsMatchingSCEV = PhiSCEV == Normalized; 1100 // We only handle truncation and inversion of phi recurrences for the 1101 // expanded expression if the expanded expression's loop dominates the 1102 // loop we insert to. Check now, so we can bail out early. 1103 if (!IsMatchingSCEV && !TryNonMatchingSCEV) 1104 continue; 1105 1106 Instruction *TempIncV = 1107 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1108 1109 // Check whether we can reuse this PHI node. 1110 if (LSRMode) { 1111 if (!isExpandedAddRecExprPHI(PN, TempIncV, L)) 1112 continue; 1113 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos)) 1114 continue; 1115 } else { 1116 if (!isNormalAddRecExprPHI(PN, TempIncV, L)) 1117 continue; 1118 } 1119 1120 // Stop if we have found an exact match SCEV. 1121 if (IsMatchingSCEV) { 1122 IncV = TempIncV; 1123 TruncTy = nullptr; 1124 InvertStep = false; 1125 AddRecPhiMatch = PN; 1126 break; 1127 } 1128 1129 // Try whether the phi can be translated into the requested form 1130 // (truncated and/or offset by a constant). 1131 if ((!TruncTy || InvertStep) && 1132 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) { 1133 // Record the phi node. But don't stop we might find an exact match 1134 // later. 1135 AddRecPhiMatch = PN; 1136 IncV = TempIncV; 1137 TruncTy = SE.getEffectiveSCEVType(Normalized->getType()); 1138 } 1139 } 1140 1141 if (AddRecPhiMatch) { 1142 // Potentially, move the increment. We have made sure in 1143 // isExpandedAddRecExprPHI or hoistIVInc that this is possible. 1144 if (L == IVIncInsertLoop) 1145 hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); 1146 1147 // Ok, the add recurrence looks usable. 1148 // Remember this PHI, even in post-inc mode. 1149 InsertedValues.insert(AddRecPhiMatch); 1150 // Remember the increment. 1151 rememberInstruction(IncV); 1152 return AddRecPhiMatch; 1153 } 1154 } 1155 1156 // Save the original insertion point so we can restore it when we're done. 1157 BuilderType::InsertPointGuard Guard(Builder); 1158 1159 // Another AddRec may need to be recursively expanded below. For example, if 1160 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1161 // loop. Remove this loop from the PostIncLoops set before expanding such 1162 // AddRecs. Otherwise, we cannot find a valid position for the step 1163 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1164 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1165 // so it's not worth implementing SmallPtrSet::swap. 1166 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1167 PostIncLoops.clear(); 1168 1169 // Expand code for the start value. 1170 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1171 L->getHeader()->begin()); 1172 1173 // StartV must be hoisted into L's preheader to dominate the new phi. 1174 assert(!isa<Instruction>(StartV) || 1175 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1176 L->getHeader())); 1177 1178 // Expand code for the step value. Do this before creating the PHI so that PHI 1179 // reuse code doesn't see an incomplete PHI. 1180 const SCEV *Step = Normalized->getStepRecurrence(SE); 1181 // If the stride is negative, insert a sub instead of an add for the increment 1182 // (unless it's a constant, because subtracts of constants are canonicalized 1183 // to adds). 1184 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1185 if (useSubtract) 1186 Step = SE.getNegativeSCEV(Step); 1187 // Expand the step somewhere that dominates the loop header. 1188 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1189 1190 // Create the PHI. 1191 BasicBlock *Header = L->getHeader(); 1192 Builder.SetInsertPoint(Header, Header->begin()); 1193 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1194 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1195 Twine(IVName) + ".iv"); 1196 rememberInstruction(PN); 1197 1198 // Create the step instructions and populate the PHI. 1199 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1200 BasicBlock *Pred = *HPI; 1201 1202 // Add a start value. 1203 if (!L->contains(Pred)) { 1204 PN->addIncoming(StartV, Pred); 1205 continue; 1206 } 1207 1208 // Create a step value and add it to the PHI. 1209 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1210 // instructions at IVIncInsertPos. 1211 Instruction *InsertPos = L == IVIncInsertLoop ? 1212 IVIncInsertPos : Pred->getTerminator(); 1213 Builder.SetInsertPoint(InsertPos); 1214 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1215 if (isa<OverflowingBinaryOperator>(IncV)) { 1216 if (Normalized->getNoWrapFlags(SCEV::FlagNUW)) 1217 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1218 if (Normalized->getNoWrapFlags(SCEV::FlagNSW)) 1219 cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1220 } 1221 PN->addIncoming(IncV, Pred); 1222 } 1223 1224 // After expanding subexpressions, restore the PostIncLoops set so the caller 1225 // can ensure that IVIncrement dominates the current uses. 1226 PostIncLoops = SavedPostIncLoops; 1227 1228 // Remember this PHI, even in post-inc mode. 1229 InsertedValues.insert(PN); 1230 1231 return PN; 1232} 1233 1234Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1235 Type *STy = S->getType(); 1236 Type *IntTy = SE.getEffectiveSCEVType(STy); 1237 const Loop *L = S->getLoop(); 1238 1239 // Determine a normalized form of this expression, which is the expression 1240 // before any post-inc adjustment is made. 1241 const SCEVAddRecExpr *Normalized = S; 1242 if (PostIncLoops.count(L)) { 1243 PostIncLoopSet Loops; 1244 Loops.insert(L); 1245 Normalized = 1246 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr, 1247 nullptr, Loops, SE, *SE.DT)); 1248 } 1249 1250 // Strip off any non-loop-dominating component from the addrec start. 1251 const SCEV *Start = Normalized->getStart(); 1252 const SCEV *PostLoopOffset = nullptr; 1253 if (!SE.properlyDominates(Start, L->getHeader())) { 1254 PostLoopOffset = Start; 1255 Start = SE.getConstant(Normalized->getType(), 0); 1256 Normalized = cast<SCEVAddRecExpr>( 1257 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1258 Normalized->getLoop(), 1259 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1260 } 1261 1262 // Strip off any non-loop-dominating component from the addrec step. 1263 const SCEV *Step = Normalized->getStepRecurrence(SE); 1264 const SCEV *PostLoopScale = nullptr; 1265 if (!SE.dominates(Step, L->getHeader())) { 1266 PostLoopScale = Step; 1267 Step = SE.getConstant(Normalized->getType(), 1); 1268 Normalized = 1269 cast<SCEVAddRecExpr>(SE.getAddRecExpr( 1270 Start, Step, Normalized->getLoop(), 1271 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1272 } 1273 1274 // Expand the core addrec. If we need post-loop scaling, force it to 1275 // expand to an integer type to avoid the need for additional casting. 1276 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1277 // In some cases, we decide to reuse an existing phi node but need to truncate 1278 // it and/or invert the step. 1279 Type *TruncTy = nullptr; 1280 bool InvertStep = false; 1281 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy, 1282 TruncTy, InvertStep); 1283 1284 // Accommodate post-inc mode, if necessary. 1285 Value *Result; 1286 if (!PostIncLoops.count(L)) 1287 Result = PN; 1288 else { 1289 // In PostInc mode, use the post-incremented value. 1290 BasicBlock *LatchBlock = L->getLoopLatch(); 1291 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1292 Result = PN->getIncomingValueForBlock(LatchBlock); 1293 1294 // For an expansion to use the postinc form, the client must call 1295 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1296 // or dominated by IVIncInsertPos. 1297 if (isa<Instruction>(Result) 1298 && !SE.DT->dominates(cast<Instruction>(Result), 1299 Builder.GetInsertPoint())) { 1300 // The induction variable's postinc expansion does not dominate this use. 1301 // IVUsers tries to prevent this case, so it is rare. However, it can 1302 // happen when an IVUser outside the loop is not dominated by the latch 1303 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1304 // all cases. Consider a phi outide whose operand is replaced during 1305 // expansion with the value of the postinc user. Without fundamentally 1306 // changing the way postinc users are tracked, the only remedy is 1307 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1308 // but hopefully expandCodeFor handles that. 1309 bool useSubtract = 1310 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1311 if (useSubtract) 1312 Step = SE.getNegativeSCEV(Step); 1313 Value *StepV; 1314 { 1315 // Expand the step somewhere that dominates the loop header. 1316 BuilderType::InsertPointGuard Guard(Builder); 1317 StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1318 } 1319 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1320 } 1321 } 1322 1323 // We have decided to reuse an induction variable of a dominating loop. Apply 1324 // truncation and/or invertion of the step. 1325 if (TruncTy) { 1326 Type *ResTy = Result->getType(); 1327 // Normalize the result type. 1328 if (ResTy != SE.getEffectiveSCEVType(ResTy)) 1329 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy)); 1330 // Truncate the result. 1331 if (TruncTy != Result->getType()) { 1332 Result = Builder.CreateTrunc(Result, TruncTy); 1333 rememberInstruction(Result); 1334 } 1335 // Invert the result. 1336 if (InvertStep) { 1337 Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy), 1338 Result); 1339 rememberInstruction(Result); 1340 } 1341 } 1342 1343 // Re-apply any non-loop-dominating scale. 1344 if (PostLoopScale) { 1345 assert(S->isAffine() && "Can't linearly scale non-affine recurrences."); 1346 Result = InsertNoopCastOfTo(Result, IntTy); 1347 Result = Builder.CreateMul(Result, 1348 expandCodeFor(PostLoopScale, IntTy)); 1349 rememberInstruction(Result); 1350 } 1351 1352 // Re-apply any non-loop-dominating offset. 1353 if (PostLoopOffset) { 1354 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1355 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1356 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1357 } else { 1358 Result = InsertNoopCastOfTo(Result, IntTy); 1359 Result = Builder.CreateAdd(Result, 1360 expandCodeFor(PostLoopOffset, IntTy)); 1361 rememberInstruction(Result); 1362 } 1363 } 1364 1365 return Result; 1366} 1367 1368Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1369 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1370 1371 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1372 const Loop *L = S->getLoop(); 1373 1374 // First check for an existing canonical IV in a suitable type. 1375 PHINode *CanonicalIV = nullptr; 1376 if (PHINode *PN = L->getCanonicalInductionVariable()) 1377 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1378 CanonicalIV = PN; 1379 1380 // Rewrite an AddRec in terms of the canonical induction variable, if 1381 // its type is more narrow. 1382 if (CanonicalIV && 1383 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1384 SE.getTypeSizeInBits(Ty)) { 1385 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1386 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1387 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1388 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1389 S->getNoWrapFlags(SCEV::FlagNW))); 1390 BasicBlock::iterator NewInsertPt = 1391 std::next(BasicBlock::iterator(cast<Instruction>(V))); 1392 BuilderType::InsertPointGuard Guard(Builder); 1393 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1394 isa<LandingPadInst>(NewInsertPt)) 1395 ++NewInsertPt; 1396 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr, 1397 NewInsertPt); 1398 return V; 1399 } 1400 1401 // {X,+,F} --> X + {0,+,F} 1402 if (!S->getStart()->isZero()) { 1403 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1404 NewOps[0] = SE.getConstant(Ty, 0); 1405 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, 1406 S->getNoWrapFlags(SCEV::FlagNW)); 1407 1408 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1409 // comments on expandAddToGEP for details. 1410 const SCEV *Base = S->getStart(); 1411 const SCEV *RestArray[1] = { Rest }; 1412 // Dig into the expression to find the pointer base for a GEP. 1413 ExposePointerBase(Base, RestArray[0], SE); 1414 // If we found a pointer, expand the AddRec with a GEP. 1415 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1416 // Make sure the Base isn't something exotic, such as a multiplied 1417 // or divided pointer value. In those cases, the result type isn't 1418 // actually a pointer type. 1419 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1420 Value *StartV = expand(Base); 1421 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1422 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1423 } 1424 } 1425 1426 // Just do a normal add. Pre-expand the operands to suppress folding. 1427 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1428 SE.getUnknown(expand(Rest)))); 1429 } 1430 1431 // If we don't yet have a canonical IV, create one. 1432 if (!CanonicalIV) { 1433 // Create and insert the PHI node for the induction variable in the 1434 // specified loop. 1435 BasicBlock *Header = L->getHeader(); 1436 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1437 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1438 Header->begin()); 1439 rememberInstruction(CanonicalIV); 1440 1441 SmallSet<BasicBlock *, 4> PredSeen; 1442 Constant *One = ConstantInt::get(Ty, 1); 1443 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1444 BasicBlock *HP = *HPI; 1445 if (!PredSeen.insert(HP)) 1446 continue; 1447 1448 if (L->contains(HP)) { 1449 // Insert a unit add instruction right before the terminator 1450 // corresponding to the back-edge. 1451 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1452 "indvar.next", 1453 HP->getTerminator()); 1454 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1455 rememberInstruction(Add); 1456 CanonicalIV->addIncoming(Add, HP); 1457 } else { 1458 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1459 } 1460 } 1461 } 1462 1463 // {0,+,1} --> Insert a canonical induction variable into the loop! 1464 if (S->isAffine() && S->getOperand(1)->isOne()) { 1465 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1466 "IVs with types different from the canonical IV should " 1467 "already have been handled!"); 1468 return CanonicalIV; 1469 } 1470 1471 // {0,+,F} --> {0,+,1} * F 1472 1473 // If this is a simple linear addrec, emit it now as a special case. 1474 if (S->isAffine()) // {0,+,F} --> i*F 1475 return 1476 expand(SE.getTruncateOrNoop( 1477 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1478 SE.getNoopOrAnyExtend(S->getOperand(1), 1479 CanonicalIV->getType())), 1480 Ty)); 1481 1482 // If this is a chain of recurrences, turn it into a closed form, using the 1483 // folders, then expandCodeFor the closed form. This allows the folders to 1484 // simplify the expression without having to build a bunch of special code 1485 // into this folder. 1486 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1487 1488 // Promote S up to the canonical IV type, if the cast is foldable. 1489 const SCEV *NewS = S; 1490 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1491 if (isa<SCEVAddRecExpr>(Ext)) 1492 NewS = Ext; 1493 1494 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1495 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1496 1497 // Truncate the result down to the original type, if needed. 1498 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1499 return expand(T); 1500} 1501 1502Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1503 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1504 Value *V = expandCodeFor(S->getOperand(), 1505 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1506 Value *I = Builder.CreateTrunc(V, Ty); 1507 rememberInstruction(I); 1508 return I; 1509} 1510 1511Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1512 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1513 Value *V = expandCodeFor(S->getOperand(), 1514 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1515 Value *I = Builder.CreateZExt(V, Ty); 1516 rememberInstruction(I); 1517 return I; 1518} 1519 1520Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1521 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1522 Value *V = expandCodeFor(S->getOperand(), 1523 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1524 Value *I = Builder.CreateSExt(V, Ty); 1525 rememberInstruction(I); 1526 return I; 1527} 1528 1529Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1530 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1531 Type *Ty = LHS->getType(); 1532 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1533 // In the case of mixed integer and pointer types, do the 1534 // rest of the comparisons as integer. 1535 if (S->getOperand(i)->getType() != Ty) { 1536 Ty = SE.getEffectiveSCEVType(Ty); 1537 LHS = InsertNoopCastOfTo(LHS, Ty); 1538 } 1539 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1540 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1541 rememberInstruction(ICmp); 1542 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1543 rememberInstruction(Sel); 1544 LHS = Sel; 1545 } 1546 // In the case of mixed integer and pointer types, cast the 1547 // final result back to the pointer type. 1548 if (LHS->getType() != S->getType()) 1549 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1550 return LHS; 1551} 1552 1553Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1554 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1555 Type *Ty = LHS->getType(); 1556 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1557 // In the case of mixed integer and pointer types, do the 1558 // rest of the comparisons as integer. 1559 if (S->getOperand(i)->getType() != Ty) { 1560 Ty = SE.getEffectiveSCEVType(Ty); 1561 LHS = InsertNoopCastOfTo(LHS, Ty); 1562 } 1563 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1564 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1565 rememberInstruction(ICmp); 1566 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1567 rememberInstruction(Sel); 1568 LHS = Sel; 1569 } 1570 // In the case of mixed integer and pointer types, cast the 1571 // final result back to the pointer type. 1572 if (LHS->getType() != S->getType()) 1573 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1574 return LHS; 1575} 1576 1577Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1578 Instruction *IP) { 1579 Builder.SetInsertPoint(IP->getParent(), IP); 1580 return expandCodeFor(SH, Ty); 1581} 1582 1583Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1584 // Expand the code for this SCEV. 1585 Value *V = expand(SH); 1586 if (Ty) { 1587 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1588 "non-trivial casts should be done with the SCEVs directly!"); 1589 V = InsertNoopCastOfTo(V, Ty); 1590 } 1591 return V; 1592} 1593 1594Value *SCEVExpander::expand(const SCEV *S) { 1595 // Compute an insertion point for this SCEV object. Hoist the instructions 1596 // as far out in the loop nest as possible. 1597 Instruction *InsertPt = Builder.GetInsertPoint(); 1598 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1599 L = L->getParentLoop()) 1600 if (SE.isLoopInvariant(S, L)) { 1601 if (!L) break; 1602 if (BasicBlock *Preheader = L->getLoopPreheader()) 1603 InsertPt = Preheader->getTerminator(); 1604 else { 1605 // LSR sets the insertion point for AddRec start/step values to the 1606 // block start to simplify value reuse, even though it's an invalid 1607 // position. SCEVExpander must correct for this in all cases. 1608 InsertPt = L->getHeader()->getFirstInsertionPt(); 1609 } 1610 } else { 1611 // If the SCEV is computable at this level, insert it into the header 1612 // after the PHIs (and after any other instructions that we've inserted 1613 // there) so that it is guaranteed to dominate any user inside the loop. 1614 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1615 InsertPt = L->getHeader()->getFirstInsertionPt(); 1616 while (InsertPt != Builder.GetInsertPoint() 1617 && (isInsertedInstruction(InsertPt) 1618 || isa<DbgInfoIntrinsic>(InsertPt))) { 1619 InsertPt = std::next(BasicBlock::iterator(InsertPt)); 1620 } 1621 break; 1622 } 1623 1624 // Check to see if we already expanded this here. 1625 std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator 1626 I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1627 if (I != InsertedExpressions.end()) 1628 return I->second; 1629 1630 BuilderType::InsertPointGuard Guard(Builder); 1631 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1632 1633 // Expand the expression into instructions. 1634 Value *V = visit(S); 1635 1636 // Remember the expanded value for this SCEV at this location. 1637 // 1638 // This is independent of PostIncLoops. The mapped value simply materializes 1639 // the expression at this insertion point. If the mapped value happened to be 1640 // a postinc expansion, it could be reused by a non-postinc user, but only if 1641 // its insertion point was already at the head of the loop. 1642 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1643 return V; 1644} 1645 1646void SCEVExpander::rememberInstruction(Value *I) { 1647 if (!PostIncLoops.empty()) 1648 InsertedPostIncValues.insert(I); 1649 else 1650 InsertedValues.insert(I); 1651} 1652 1653/// getOrInsertCanonicalInductionVariable - This method returns the 1654/// canonical induction variable of the specified type for the specified 1655/// loop (inserting one if there is none). A canonical induction variable 1656/// starts at zero and steps by one on each iteration. 1657PHINode * 1658SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1659 Type *Ty) { 1660 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1661 1662 // Build a SCEV for {0,+,1}<L>. 1663 // Conservatively use FlagAnyWrap for now. 1664 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1665 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1666 1667 // Emit code for it. 1668 BuilderType::InsertPointGuard Guard(Builder); 1669 PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr, 1670 L->getHeader()->begin())); 1671 1672 return V; 1673} 1674 1675/// replaceCongruentIVs - Check for congruent phis in this loop header and 1676/// replace them with their most canonical representative. Return the number of 1677/// phis eliminated. 1678/// 1679/// This does not depend on any SCEVExpander state but should be used in 1680/// the same context that SCEVExpander is used. 1681unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1682 SmallVectorImpl<WeakVH> &DeadInsts, 1683 const TargetTransformInfo *TTI) { 1684 // Find integer phis in order of increasing width. 1685 SmallVector<PHINode*, 8> Phis; 1686 for (BasicBlock::iterator I = L->getHeader()->begin(); 1687 PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1688 Phis.push_back(Phi); 1689 } 1690 if (TTI) 1691 std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) { 1692 // Put pointers at the back and make sure pointer < pointer = false. 1693 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 1694 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); 1695 return RHS->getType()->getPrimitiveSizeInBits() < 1696 LHS->getType()->getPrimitiveSizeInBits(); 1697 }); 1698 1699 unsigned NumElim = 0; 1700 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1701 // Process phis from wide to narrow. Mapping wide phis to the their truncation 1702 // so narrow phis can reuse them. 1703 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1704 PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1705 PHINode *Phi = *PIter; 1706 1707 // Fold constant phis. They may be congruent to other constant phis and 1708 // would confuse the logic below that expects proper IVs. 1709 if (Value *V = Phi->hasConstantValue()) { 1710 Phi->replaceAllUsesWith(V); 1711 DeadInsts.push_back(Phi); 1712 ++NumElim; 1713 DEBUG_WITH_TYPE(DebugType, dbgs() 1714 << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); 1715 continue; 1716 } 1717 1718 if (!SE.isSCEVable(Phi->getType())) 1719 continue; 1720 1721 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1722 if (!OrigPhiRef) { 1723 OrigPhiRef = Phi; 1724 if (Phi->getType()->isIntegerTy() && TTI 1725 && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1726 // This phi can be freely truncated to the narrowest phi type. Map the 1727 // truncated expression to it so it will be reused for narrow types. 1728 const SCEV *TruncExpr = 1729 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1730 ExprToIVMap[TruncExpr] = Phi; 1731 } 1732 continue; 1733 } 1734 1735 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1736 // sense. 1737 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1738 continue; 1739 1740 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1741 Instruction *OrigInc = 1742 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1743 Instruction *IsomorphicInc = 1744 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1745 1746 // If this phi has the same width but is more canonical, replace the 1747 // original with it. As part of the "more canonical" determination, 1748 // respect a prior decision to use an IV chain. 1749 if (OrigPhiRef->getType() == Phi->getType() 1750 && !(ChainedPhis.count(Phi) 1751 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1752 && (ChainedPhis.count(Phi) 1753 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1754 std::swap(OrigPhiRef, Phi); 1755 std::swap(OrigInc, IsomorphicInc); 1756 } 1757 // Replacing the congruent phi is sufficient because acyclic redundancy 1758 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1759 // that a phi is congruent, it's often the head of an IV user cycle that 1760 // is isomorphic with the original phi. It's worth eagerly cleaning up the 1761 // common case of a single IV increment so that DeleteDeadPHIs can remove 1762 // cycles that had postinc uses. 1763 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1764 IsomorphicInc->getType()); 1765 if (OrigInc != IsomorphicInc 1766 && TruncExpr == SE.getSCEV(IsomorphicInc) 1767 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1768 || hoistIVInc(OrigInc, IsomorphicInc))) { 1769 DEBUG_WITH_TYPE(DebugType, dbgs() 1770 << "INDVARS: Eliminated congruent iv.inc: " 1771 << *IsomorphicInc << '\n'); 1772 Value *NewInc = OrigInc; 1773 if (OrigInc->getType() != IsomorphicInc->getType()) { 1774 Instruction *IP = isa<PHINode>(OrigInc) 1775 ? (Instruction*)L->getHeader()->getFirstInsertionPt() 1776 : OrigInc->getNextNode(); 1777 IRBuilder<> Builder(IP); 1778 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1779 NewInc = Builder. 1780 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1781 } 1782 IsomorphicInc->replaceAllUsesWith(NewInc); 1783 DeadInsts.push_back(IsomorphicInc); 1784 } 1785 } 1786 DEBUG_WITH_TYPE(DebugType, dbgs() 1787 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1788 ++NumElim; 1789 Value *NewIV = OrigPhiRef; 1790 if (OrigPhiRef->getType() != Phi->getType()) { 1791 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1792 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1793 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1794 } 1795 Phi->replaceAllUsesWith(NewIV); 1796 DeadInsts.push_back(Phi); 1797 } 1798 return NumElim; 1799} 1800 1801namespace { 1802// Search for a SCEV subexpression that is not safe to expand. Any expression 1803// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 1804// UDiv expressions. We don't know if the UDiv is derived from an IR divide 1805// instruction, but the important thing is that we prove the denominator is 1806// nonzero before expansion. 1807// 1808// IVUsers already checks that IV-derived expressions are safe. So this check is 1809// only needed when the expression includes some subexpression that is not IV 1810// derived. 1811// 1812// Currently, we only allow division by a nonzero constant here. If this is 1813// inadequate, we could easily allow division by SCEVUnknown by using 1814// ValueTracking to check isKnownNonZero(). 1815// 1816// We cannot generally expand recurrences unless the step dominates the loop 1817// header. The expander handles the special case of affine recurrences by 1818// scaling the recurrence outside the loop, but this technique isn't generally 1819// applicable. Expanding a nested recurrence outside a loop requires computing 1820// binomial coefficients. This could be done, but the recurrence has to be in a 1821// perfectly reduced form, which can't be guaranteed. 1822struct SCEVFindUnsafe { 1823 ScalarEvolution &SE; 1824 bool IsUnsafe; 1825 1826 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} 1827 1828 bool follow(const SCEV *S) { 1829 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1830 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 1831 if (!SC || SC->getValue()->isZero()) { 1832 IsUnsafe = true; 1833 return false; 1834 } 1835 } 1836 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1837 const SCEV *Step = AR->getStepRecurrence(SE); 1838 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { 1839 IsUnsafe = true; 1840 return false; 1841 } 1842 } 1843 return true; 1844 } 1845 bool isDone() const { return IsUnsafe; } 1846}; 1847} 1848 1849namespace llvm { 1850bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) { 1851 SCEVFindUnsafe Search(SE); 1852 visitAll(S, Search); 1853 return !Search.IsUnsafe; 1854} 1855} 1856