CodeGenPrepare.cpp revision 164b86b4399559e45fab7846f1e3e09119cab4e2
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass munges the code in the input function to better prepare it for 11// SelectionDAG-based code generation. This works around limitations in it's 12// basic-block-at-a-time approach. It should eventually be removed. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "codegenprepare" 17#include "llvm/Transforms/Scalar.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/InlineAsm.h" 22#include "llvm/Instructions.h" 23#include "llvm/IntrinsicInst.h" 24#include "llvm/Pass.h" 25#include "llvm/Analysis/Dominators.h" 26#include "llvm/Analysis/InstructionSimplify.h" 27#include "llvm/Analysis/ProfileInfo.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/Target/TargetLibraryInfo.h" 30#include "llvm/Target/TargetLowering.h" 31#include "llvm/Transforms/Utils/AddrModeMatcher.h" 32#include "llvm/Transforms/Utils/BasicBlockUtils.h" 33#include "llvm/Transforms/Utils/Local.h" 34#include "llvm/Transforms/Utils/BuildLibCalls.h" 35#include "llvm/ADT/DenseMap.h" 36#include "llvm/ADT/SmallSet.h" 37#include "llvm/ADT/Statistic.h" 38#include "llvm/Assembly/Writer.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/Debug.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Support/PatternMatch.h" 44#include "llvm/Support/raw_ostream.h" 45#include "llvm/Support/IRBuilder.h" 46#include "llvm/Support/ValueHandle.h" 47using namespace llvm; 48using namespace llvm::PatternMatch; 49 50STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 51STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 52STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 53STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 54 "sunken Cmps"); 55STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 56 "of sunken Casts"); 57STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 58 "computations were sunk"); 59STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 60STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 61STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 62STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 63 64static cl::opt<bool> DisableBranchOpts( 65 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 66 cl::desc("Disable branch optimizations in CodeGenPrepare")); 67 68namespace { 69 class CodeGenPrepare : public FunctionPass { 70 /// TLI - Keep a pointer of a TargetLowering to consult for determining 71 /// transformation profitability. 72 const TargetLowering *TLI; 73 const TargetLibraryInfo *TLInfo; 74 DominatorTree *DT; 75 ProfileInfo *PFI; 76 77 /// CurInstIterator - As we scan instructions optimizing them, this is the 78 /// next instruction to optimize. Xforms that can invalidate this should 79 /// update it. 80 BasicBlock::iterator CurInstIterator; 81 82 /// Keeps track of non-local addresses that have been sunk into a block. 83 /// This allows us to avoid inserting duplicate code for blocks with 84 /// multiple load/stores of the same address. 85 DenseMap<Value*, Value*> SunkAddrs; 86 87 /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to 88 /// be updated. 89 bool ModifiedDT; 90 91 public: 92 static char ID; // Pass identification, replacement for typeid 93 explicit CodeGenPrepare(const TargetLowering *tli = 0) 94 : FunctionPass(ID), TLI(tli) { 95 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 96 } 97 bool runOnFunction(Function &F); 98 99 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 100 AU.addPreserved<DominatorTree>(); 101 AU.addPreserved<ProfileInfo>(); 102 AU.addRequired<TargetLibraryInfo>(); 103 } 104 105 private: 106 bool EliminateMostlyEmptyBlocks(Function &F); 107 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 108 void EliminateMostlyEmptyBlock(BasicBlock *BB); 109 bool OptimizeBlock(BasicBlock &BB); 110 bool OptimizeInst(Instruction *I); 111 bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy); 112 bool OptimizeInlineAsmInst(CallInst *CS); 113 bool OptimizeCallInst(CallInst *CI); 114 bool MoveExtToFormExtLoad(Instruction *I); 115 bool OptimizeExtUses(Instruction *I); 116 bool DupRetToEnableTailCallOpts(ReturnInst *RI); 117 bool PlaceDbgValues(Function &F); 118 }; 119} 120 121char CodeGenPrepare::ID = 0; 122INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare", 123 "Optimize for code generation", false, false) 124INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 125INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare", 126 "Optimize for code generation", false, false) 127 128FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { 129 return new CodeGenPrepare(TLI); 130} 131 132bool CodeGenPrepare::runOnFunction(Function &F) { 133 bool EverMadeChange = false; 134 135 ModifiedDT = false; 136 TLInfo = &getAnalysis<TargetLibraryInfo>(); 137 DT = getAnalysisIfAvailable<DominatorTree>(); 138 PFI = getAnalysisIfAvailable<ProfileInfo>(); 139 140 // First pass, eliminate blocks that contain only PHI nodes and an 141 // unconditional branch. 142 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 143 144 // llvm.dbg.value is far away from the value then iSel may not be able 145 // handle it properly. iSel will drop llvm.dbg.value if it can not 146 // find a node corresponding to the value. 147 EverMadeChange |= PlaceDbgValues(F); 148 149 bool MadeChange = true; 150 while (MadeChange) { 151 MadeChange = false; 152 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 153 BasicBlock *BB = I++; 154 MadeChange |= OptimizeBlock(*BB); 155 } 156 EverMadeChange |= MadeChange; 157 } 158 159 SunkAddrs.clear(); 160 161 if (!DisableBranchOpts) { 162 MadeChange = false; 163 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 164 MadeChange |= ConstantFoldTerminator(BB, true); 165 166 if (MadeChange) 167 ModifiedDT = true; 168 EverMadeChange |= MadeChange; 169 } 170 171 if (ModifiedDT && DT) 172 DT->DT->recalculate(F); 173 174 return EverMadeChange; 175} 176 177/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 178/// debug info directives, and an unconditional branch. Passes before isel 179/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 180/// isel. Start by eliminating these blocks so we can split them the way we 181/// want them. 182bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 183 bool MadeChange = false; 184 // Note that this intentionally skips the entry block. 185 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) { 186 BasicBlock *BB = I++; 187 188 // If this block doesn't end with an uncond branch, ignore it. 189 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 190 if (!BI || !BI->isUnconditional()) 191 continue; 192 193 // If the instruction before the branch (skipping debug info) isn't a phi 194 // node, then other stuff is happening here. 195 BasicBlock::iterator BBI = BI; 196 if (BBI != BB->begin()) { 197 --BBI; 198 while (isa<DbgInfoIntrinsic>(BBI)) { 199 if (BBI == BB->begin()) 200 break; 201 --BBI; 202 } 203 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 204 continue; 205 } 206 207 // Do not break infinite loops. 208 BasicBlock *DestBB = BI->getSuccessor(0); 209 if (DestBB == BB) 210 continue; 211 212 if (!CanMergeBlocks(BB, DestBB)) 213 continue; 214 215 EliminateMostlyEmptyBlock(BB); 216 MadeChange = true; 217 } 218 return MadeChange; 219} 220 221/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 222/// single uncond branch between them, and BB contains no other non-phi 223/// instructions. 224bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 225 const BasicBlock *DestBB) const { 226 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 227 // the successor. If there are more complex condition (e.g. preheaders), 228 // don't mess around with them. 229 BasicBlock::const_iterator BBI = BB->begin(); 230 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 231 for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end(); 232 UI != E; ++UI) { 233 const Instruction *User = cast<Instruction>(*UI); 234 if (User->getParent() != DestBB || !isa<PHINode>(User)) 235 return false; 236 // If User is inside DestBB block and it is a PHINode then check 237 // incoming value. If incoming value is not from BB then this is 238 // a complex condition (e.g. preheaders) we want to avoid here. 239 if (User->getParent() == DestBB) { 240 if (const PHINode *UPN = dyn_cast<PHINode>(User)) 241 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 242 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 243 if (Insn && Insn->getParent() == BB && 244 Insn->getParent() != UPN->getIncomingBlock(I)) 245 return false; 246 } 247 } 248 } 249 } 250 251 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 252 // and DestBB may have conflicting incoming values for the block. If so, we 253 // can't merge the block. 254 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 255 if (!DestBBPN) return true; // no conflict. 256 257 // Collect the preds of BB. 258 SmallPtrSet<const BasicBlock*, 16> BBPreds; 259 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 260 // It is faster to get preds from a PHI than with pred_iterator. 261 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 262 BBPreds.insert(BBPN->getIncomingBlock(i)); 263 } else { 264 BBPreds.insert(pred_begin(BB), pred_end(BB)); 265 } 266 267 // Walk the preds of DestBB. 268 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 269 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 270 if (BBPreds.count(Pred)) { // Common predecessor? 271 BBI = DestBB->begin(); 272 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 273 const Value *V1 = PN->getIncomingValueForBlock(Pred); 274 const Value *V2 = PN->getIncomingValueForBlock(BB); 275 276 // If V2 is a phi node in BB, look up what the mapped value will be. 277 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 278 if (V2PN->getParent() == BB) 279 V2 = V2PN->getIncomingValueForBlock(Pred); 280 281 // If there is a conflict, bail out. 282 if (V1 != V2) return false; 283 } 284 } 285 } 286 287 return true; 288} 289 290 291/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 292/// an unconditional branch in it. 293void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 294 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 295 BasicBlock *DestBB = BI->getSuccessor(0); 296 297 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 298 299 // If the destination block has a single pred, then this is a trivial edge, 300 // just collapse it. 301 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 302 if (SinglePred != DestBB) { 303 // Remember if SinglePred was the entry block of the function. If so, we 304 // will need to move BB back to the entry position. 305 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 306 MergeBasicBlockIntoOnlyPred(DestBB, this); 307 308 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 309 BB->moveBefore(&BB->getParent()->getEntryBlock()); 310 311 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 312 return; 313 } 314 } 315 316 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 317 // to handle the new incoming edges it is about to have. 318 PHINode *PN; 319 for (BasicBlock::iterator BBI = DestBB->begin(); 320 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 321 // Remove the incoming value for BB, and remember it. 322 Value *InVal = PN->removeIncomingValue(BB, false); 323 324 // Two options: either the InVal is a phi node defined in BB or it is some 325 // value that dominates BB. 326 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 327 if (InValPhi && InValPhi->getParent() == BB) { 328 // Add all of the input values of the input PHI as inputs of this phi. 329 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 330 PN->addIncoming(InValPhi->getIncomingValue(i), 331 InValPhi->getIncomingBlock(i)); 332 } else { 333 // Otherwise, add one instance of the dominating value for each edge that 334 // we will be adding. 335 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 336 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 337 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 338 } else { 339 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 340 PN->addIncoming(InVal, *PI); 341 } 342 } 343 } 344 345 // The PHIs are now updated, change everything that refers to BB to use 346 // DestBB and remove BB. 347 BB->replaceAllUsesWith(DestBB); 348 if (DT && !ModifiedDT) { 349 BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock(); 350 BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock(); 351 BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom); 352 DT->changeImmediateDominator(DestBB, NewIDom); 353 DT->eraseNode(BB); 354 } 355 if (PFI) { 356 PFI->replaceAllUses(BB, DestBB); 357 PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); 358 } 359 BB->eraseFromParent(); 360 ++NumBlocksElim; 361 362 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 363} 364 365/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 366/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 367/// sink it into user blocks to reduce the number of virtual 368/// registers that must be created and coalesced. 369/// 370/// Return true if any changes are made. 371/// 372static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 373 // If this is a noop copy, 374 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 375 EVT DstVT = TLI.getValueType(CI->getType()); 376 377 // This is an fp<->int conversion? 378 if (SrcVT.isInteger() != DstVT.isInteger()) 379 return false; 380 381 // If this is an extension, it will be a zero or sign extension, which 382 // isn't a noop. 383 if (SrcVT.bitsLT(DstVT)) return false; 384 385 // If these values will be promoted, find out what they will be promoted 386 // to. This helps us consider truncates on PPC as noop copies when they 387 // are. 388 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 389 TargetLowering::TypePromoteInteger) 390 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 391 if (TLI.getTypeAction(CI->getContext(), DstVT) == 392 TargetLowering::TypePromoteInteger) 393 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 394 395 // If, after promotion, these are the same types, this is a noop copy. 396 if (SrcVT != DstVT) 397 return false; 398 399 BasicBlock *DefBB = CI->getParent(); 400 401 /// InsertedCasts - Only insert a cast in each block once. 402 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 403 404 bool MadeChange = false; 405 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 406 UI != E; ) { 407 Use &TheUse = UI.getUse(); 408 Instruction *User = cast<Instruction>(*UI); 409 410 // Figure out which BB this cast is used in. For PHI's this is the 411 // appropriate predecessor block. 412 BasicBlock *UserBB = User->getParent(); 413 if (PHINode *PN = dyn_cast<PHINode>(User)) { 414 UserBB = PN->getIncomingBlock(UI); 415 } 416 417 // Preincrement use iterator so we don't invalidate it. 418 ++UI; 419 420 // If this user is in the same block as the cast, don't change the cast. 421 if (UserBB == DefBB) continue; 422 423 // If we have already inserted a cast into this block, use it. 424 CastInst *&InsertedCast = InsertedCasts[UserBB]; 425 426 if (!InsertedCast) { 427 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 428 InsertedCast = 429 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 430 InsertPt); 431 MadeChange = true; 432 } 433 434 // Replace a use of the cast with a use of the new cast. 435 TheUse = InsertedCast; 436 ++NumCastUses; 437 } 438 439 // If we removed all uses, nuke the cast. 440 if (CI->use_empty()) { 441 CI->eraseFromParent(); 442 MadeChange = true; 443 } 444 445 return MadeChange; 446} 447 448/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 449/// the number of virtual registers that must be created and coalesced. This is 450/// a clear win except on targets with multiple condition code registers 451/// (PowerPC), where it might lose; some adjustment may be wanted there. 452/// 453/// Return true if any changes are made. 454static bool OptimizeCmpExpression(CmpInst *CI) { 455 BasicBlock *DefBB = CI->getParent(); 456 457 /// InsertedCmp - Only insert a cmp in each block once. 458 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 459 460 bool MadeChange = false; 461 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 462 UI != E; ) { 463 Use &TheUse = UI.getUse(); 464 Instruction *User = cast<Instruction>(*UI); 465 466 // Preincrement use iterator so we don't invalidate it. 467 ++UI; 468 469 // Don't bother for PHI nodes. 470 if (isa<PHINode>(User)) 471 continue; 472 473 // Figure out which BB this cmp is used in. 474 BasicBlock *UserBB = User->getParent(); 475 476 // If this user is in the same block as the cmp, don't change the cmp. 477 if (UserBB == DefBB) continue; 478 479 // If we have already inserted a cmp into this block, use it. 480 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 481 482 if (!InsertedCmp) { 483 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 484 InsertedCmp = 485 CmpInst::Create(CI->getOpcode(), 486 CI->getPredicate(), CI->getOperand(0), 487 CI->getOperand(1), "", InsertPt); 488 MadeChange = true; 489 } 490 491 // Replace a use of the cmp with a use of the new cmp. 492 TheUse = InsertedCmp; 493 ++NumCmpUses; 494 } 495 496 // If we removed all uses, nuke the cmp. 497 if (CI->use_empty()) 498 CI->eraseFromParent(); 499 500 return MadeChange; 501} 502 503namespace { 504class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { 505protected: 506 void replaceCall(Value *With) { 507 CI->replaceAllUsesWith(With); 508 CI->eraseFromParent(); 509 } 510 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const { 511 if (ConstantInt *SizeCI = 512 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) 513 return SizeCI->isAllOnesValue(); 514 return false; 515 } 516}; 517} // end anonymous namespace 518 519bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { 520 BasicBlock *BB = CI->getParent(); 521 522 // Lower inline assembly if we can. 523 // If we found an inline asm expession, and if the target knows how to 524 // lower it to normal LLVM code, do so now. 525 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 526 if (TLI->ExpandInlineAsm(CI)) { 527 // Avoid invalidating the iterator. 528 CurInstIterator = BB->begin(); 529 // Avoid processing instructions out of order, which could cause 530 // reuse before a value is defined. 531 SunkAddrs.clear(); 532 return true; 533 } 534 // Sink address computing for memory operands into the block. 535 if (OptimizeInlineAsmInst(CI)) 536 return true; 537 } 538 539 // Lower all uses of llvm.objectsize.* 540 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 541 if (II && II->getIntrinsicID() == Intrinsic::objectsize) { 542 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 543 Type *ReturnTy = CI->getType(); 544 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 545 546 // Substituting this can cause recursive simplifications, which can 547 // invalidate our iterator. Use a WeakVH to hold onto it in case this 548 // happens. 549 WeakVH IterHandle(CurInstIterator); 550 551 ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0, 552 TLInfo, ModifiedDT ? 0 : DT); 553 554 // If the iterator instruction was recursively deleted, start over at the 555 // start of the block. 556 if (IterHandle != CurInstIterator) { 557 CurInstIterator = BB->begin(); 558 SunkAddrs.clear(); 559 } 560 return true; 561 } 562 563 // From here on out we're working with named functions. 564 if (CI->getCalledFunction() == 0) return false; 565 566 // We'll need TargetData from here on out. 567 const TargetData *TD = TLI ? TLI->getTargetData() : 0; 568 if (!TD) return false; 569 570 // Lower all default uses of _chk calls. This is very similar 571 // to what InstCombineCalls does, but here we are only lowering calls 572 // that have the default "don't know" as the objectsize. Anything else 573 // should be left alone. 574 CodeGenPrepareFortifiedLibCalls Simplifier; 575 return Simplifier.fold(CI, TD); 576} 577 578/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return 579/// instructions to the predecessor to enable tail call optimizations. The 580/// case it is currently looking for is: 581/// bb0: 582/// %tmp0 = tail call i32 @f0() 583/// br label %return 584/// bb1: 585/// %tmp1 = tail call i32 @f1() 586/// br label %return 587/// bb2: 588/// %tmp2 = tail call i32 @f2() 589/// br label %return 590/// return: 591/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 592/// ret i32 %retval 593/// 594/// => 595/// 596/// bb0: 597/// %tmp0 = tail call i32 @f0() 598/// ret i32 %tmp0 599/// bb1: 600/// %tmp1 = tail call i32 @f1() 601/// ret i32 %tmp1 602/// bb2: 603/// %tmp2 = tail call i32 @f2() 604/// ret i32 %tmp2 605/// 606bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) { 607 if (!TLI) 608 return false; 609 610 Value *V = RI->getReturnValue(); 611 PHINode *PN = V ? dyn_cast<PHINode>(V) : NULL; 612 if (V && !PN) 613 return false; 614 615 BasicBlock *BB = RI->getParent(); 616 if (PN && PN->getParent() != BB) 617 return false; 618 619 // It's not safe to eliminate the sign / zero extension of the return value. 620 // See llvm::isInTailCallPosition(). 621 const Function *F = BB->getParent(); 622 Attributes CallerRetAttr = F->getAttributes().getRetAttributes(); 623 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) 624 return false; 625 626 // Make sure there are no instructions between the PHI and return, or that the 627 // return is the first instruction in the block. 628 if (PN) { 629 BasicBlock::iterator BI = BB->begin(); 630 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 631 if (&*BI != RI) 632 return false; 633 } else { 634 BasicBlock::iterator BI = BB->begin(); 635 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 636 if (&*BI != RI) 637 return false; 638 } 639 640 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 641 /// call. 642 SmallVector<CallInst*, 4> TailCalls; 643 if (PN) { 644 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 645 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 646 // Make sure the phi value is indeed produced by the tail call. 647 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 648 TLI->mayBeEmittedAsTailCall(CI)) 649 TailCalls.push_back(CI); 650 } 651 } else { 652 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 653 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 654 if (!VisitedBBs.insert(*PI)) 655 continue; 656 657 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 658 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 659 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 660 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 661 if (RI == RE) 662 continue; 663 664 CallInst *CI = dyn_cast<CallInst>(&*RI); 665 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 666 TailCalls.push_back(CI); 667 } 668 } 669 670 bool Changed = false; 671 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 672 CallInst *CI = TailCalls[i]; 673 CallSite CS(CI); 674 675 // Conservatively require the attributes of the call to match those of the 676 // return. Ignore noalias because it doesn't affect the call sequence. 677 Attributes CalleeRetAttr = CS.getAttributes().getRetAttributes(); 678 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) 679 continue; 680 681 // Make sure the call instruction is followed by an unconditional branch to 682 // the return block. 683 BasicBlock *CallBB = CI->getParent(); 684 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 685 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 686 continue; 687 688 // Duplicate the return into CallBB. 689 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 690 ModifiedDT = Changed = true; 691 ++NumRetsDup; 692 } 693 694 // If we eliminated all predecessors of the block, delete the block now. 695 if (Changed && pred_begin(BB) == pred_end(BB)) 696 BB->eraseFromParent(); 697 698 return Changed; 699} 700 701//===----------------------------------------------------------------------===// 702// Memory Optimization 703//===----------------------------------------------------------------------===// 704 705/// IsNonLocalValue - Return true if the specified values are defined in a 706/// different basic block than BB. 707static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 708 if (Instruction *I = dyn_cast<Instruction>(V)) 709 return I->getParent() != BB; 710 return false; 711} 712 713/// OptimizeMemoryInst - Load and Store Instructions often have 714/// addressing modes that can do significant amounts of computation. As such, 715/// instruction selection will try to get the load or store to do as much 716/// computation as possible for the program. The problem is that isel can only 717/// see within a single block. As such, we sink as much legal addressing mode 718/// stuff into the block as possible. 719/// 720/// This method is used to optimize both load/store and inline asms with memory 721/// operands. 722bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 723 Type *AccessTy) { 724 Value *Repl = Addr; 725 726 // Try to collapse single-value PHI nodes. This is necessary to undo 727 // unprofitable PRE transformations. 728 SmallVector<Value*, 8> worklist; 729 SmallPtrSet<Value*, 16> Visited; 730 worklist.push_back(Addr); 731 732 // Use a worklist to iteratively look through PHI nodes, and ensure that 733 // the addressing mode obtained from the non-PHI roots of the graph 734 // are equivalent. 735 Value *Consensus = 0; 736 unsigned NumUsesConsensus = 0; 737 bool IsNumUsesConsensusValid = false; 738 SmallVector<Instruction*, 16> AddrModeInsts; 739 ExtAddrMode AddrMode; 740 while (!worklist.empty()) { 741 Value *V = worklist.back(); 742 worklist.pop_back(); 743 744 // Break use-def graph loops. 745 if (!Visited.insert(V)) { 746 Consensus = 0; 747 break; 748 } 749 750 // For a PHI node, push all of its incoming values. 751 if (PHINode *P = dyn_cast<PHINode>(V)) { 752 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) 753 worklist.push_back(P->getIncomingValue(i)); 754 continue; 755 } 756 757 // For non-PHIs, determine the addressing mode being computed. 758 SmallVector<Instruction*, 16> NewAddrModeInsts; 759 ExtAddrMode NewAddrMode = 760 AddressingModeMatcher::Match(V, AccessTy, MemoryInst, 761 NewAddrModeInsts, *TLI); 762 763 // This check is broken into two cases with very similar code to avoid using 764 // getNumUses() as much as possible. Some values have a lot of uses, so 765 // calling getNumUses() unconditionally caused a significant compile-time 766 // regression. 767 if (!Consensus) { 768 Consensus = V; 769 AddrMode = NewAddrMode; 770 AddrModeInsts = NewAddrModeInsts; 771 continue; 772 } else if (NewAddrMode == AddrMode) { 773 if (!IsNumUsesConsensusValid) { 774 NumUsesConsensus = Consensus->getNumUses(); 775 IsNumUsesConsensusValid = true; 776 } 777 778 // Ensure that the obtained addressing mode is equivalent to that obtained 779 // for all other roots of the PHI traversal. Also, when choosing one 780 // such root as representative, select the one with the most uses in order 781 // to keep the cost modeling heuristics in AddressingModeMatcher 782 // applicable. 783 unsigned NumUses = V->getNumUses(); 784 if (NumUses > NumUsesConsensus) { 785 Consensus = V; 786 NumUsesConsensus = NumUses; 787 AddrModeInsts = NewAddrModeInsts; 788 } 789 continue; 790 } 791 792 Consensus = 0; 793 break; 794 } 795 796 // If the addressing mode couldn't be determined, or if multiple different 797 // ones were determined, bail out now. 798 if (!Consensus) return false; 799 800 // Check to see if any of the instructions supersumed by this addr mode are 801 // non-local to I's BB. 802 bool AnyNonLocal = false; 803 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 804 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 805 AnyNonLocal = true; 806 break; 807 } 808 } 809 810 // If all the instructions matched are already in this BB, don't do anything. 811 if (!AnyNonLocal) { 812 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 813 return false; 814 } 815 816 // Insert this computation right after this user. Since our caller is 817 // scanning from the top of the BB to the bottom, reuse of the expr are 818 // guaranteed to happen later. 819 IRBuilder<> Builder(MemoryInst); 820 821 // Now that we determined the addressing expression we want to use and know 822 // that we have to sink it into this block. Check to see if we have already 823 // done this for some other load/store instr in this block. If so, reuse the 824 // computation. 825 Value *&SunkAddr = SunkAddrs[Addr]; 826 if (SunkAddr) { 827 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 828 << *MemoryInst); 829 if (SunkAddr->getType() != Addr->getType()) 830 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 831 } else { 832 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 833 << *MemoryInst); 834 Type *IntPtrTy = 835 TLI->getTargetData()->getIntPtrType(AccessTy->getContext()); 836 837 Value *Result = 0; 838 839 // Start with the base register. Do this first so that subsequent address 840 // matching finds it last, which will prevent it from trying to match it 841 // as the scaled value in case it happens to be a mul. That would be 842 // problematic if we've sunk a different mul for the scale, because then 843 // we'd end up sinking both muls. 844 if (AddrMode.BaseReg) { 845 Value *V = AddrMode.BaseReg; 846 if (V->getType()->isPointerTy()) 847 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 848 if (V->getType() != IntPtrTy) 849 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 850 Result = V; 851 } 852 853 // Add the scale value. 854 if (AddrMode.Scale) { 855 Value *V = AddrMode.ScaledReg; 856 if (V->getType() == IntPtrTy) { 857 // done. 858 } else if (V->getType()->isPointerTy()) { 859 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 860 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 861 cast<IntegerType>(V->getType())->getBitWidth()) { 862 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 863 } else { 864 V = Builder.CreateSExt(V, IntPtrTy, "sunkaddr"); 865 } 866 if (AddrMode.Scale != 1) 867 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 868 "sunkaddr"); 869 if (Result) 870 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 871 else 872 Result = V; 873 } 874 875 // Add in the BaseGV if present. 876 if (AddrMode.BaseGV) { 877 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 878 if (Result) 879 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 880 else 881 Result = V; 882 } 883 884 // Add in the Base Offset if present. 885 if (AddrMode.BaseOffs) { 886 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 887 if (Result) 888 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 889 else 890 Result = V; 891 } 892 893 if (Result == 0) 894 SunkAddr = Constant::getNullValue(Addr->getType()); 895 else 896 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 897 } 898 899 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 900 901 // If we have no uses, recursively delete the value and all dead instructions 902 // using it. 903 if (Repl->use_empty()) { 904 // This can cause recursive deletion, which can invalidate our iterator. 905 // Use a WeakVH to hold onto it in case this happens. 906 WeakVH IterHandle(CurInstIterator); 907 BasicBlock *BB = CurInstIterator->getParent(); 908 909 RecursivelyDeleteTriviallyDeadInstructions(Repl); 910 911 if (IterHandle != CurInstIterator) { 912 // If the iterator instruction was recursively deleted, start over at the 913 // start of the block. 914 CurInstIterator = BB->begin(); 915 SunkAddrs.clear(); 916 } else { 917 // This address is now available for reassignment, so erase the table 918 // entry; we don't want to match some completely different instruction. 919 SunkAddrs[Addr] = 0; 920 } 921 } 922 ++NumMemoryInsts; 923 return true; 924} 925 926/// OptimizeInlineAsmInst - If there are any memory operands, use 927/// OptimizeMemoryInst to sink their address computing into the block when 928/// possible / profitable. 929bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { 930 bool MadeChange = false; 931 932 TargetLowering::AsmOperandInfoVector 933 TargetConstraints = TLI->ParseConstraints(CS); 934 unsigned ArgNo = 0; 935 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 936 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 937 938 // Compute the constraint code and ConstraintType to use. 939 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 940 941 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 942 OpInfo.isIndirect) { 943 Value *OpVal = CS->getArgOperand(ArgNo++); 944 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType()); 945 } else if (OpInfo.Type == InlineAsm::isInput) 946 ArgNo++; 947 } 948 949 return MadeChange; 950} 951 952/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 953/// basic block as the load, unless conditions are unfavorable. This allows 954/// SelectionDAG to fold the extend into the load. 955/// 956bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 957 // Look for a load being extended. 958 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 959 if (!LI) return false; 960 961 // If they're already in the same block, there's nothing to do. 962 if (LI->getParent() == I->getParent()) 963 return false; 964 965 // If the load has other users and the truncate is not free, this probably 966 // isn't worthwhile. 967 if (!LI->hasOneUse() && 968 TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) || 969 !TLI->isTypeLegal(TLI->getValueType(I->getType()))) && 970 !TLI->isTruncateFree(I->getType(), LI->getType())) 971 return false; 972 973 // Check whether the target supports casts folded into loads. 974 unsigned LType; 975 if (isa<ZExtInst>(I)) 976 LType = ISD::ZEXTLOAD; 977 else { 978 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 979 LType = ISD::SEXTLOAD; 980 } 981 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 982 return false; 983 984 // Move the extend into the same block as the load, so that SelectionDAG 985 // can fold it. 986 I->removeFromParent(); 987 I->insertAfter(LI); 988 ++NumExtsMoved; 989 return true; 990} 991 992bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 993 BasicBlock *DefBB = I->getParent(); 994 995 // If the result of a {s|z}ext and its source are both live out, rewrite all 996 // other uses of the source with result of extension. 997 Value *Src = I->getOperand(0); 998 if (Src->hasOneUse()) 999 return false; 1000 1001 // Only do this xform if truncating is free. 1002 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 1003 return false; 1004 1005 // Only safe to perform the optimization if the source is also defined in 1006 // this block. 1007 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 1008 return false; 1009 1010 bool DefIsLiveOut = false; 1011 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 1012 UI != E; ++UI) { 1013 Instruction *User = cast<Instruction>(*UI); 1014 1015 // Figure out which BB this ext is used in. 1016 BasicBlock *UserBB = User->getParent(); 1017 if (UserBB == DefBB) continue; 1018 DefIsLiveOut = true; 1019 break; 1020 } 1021 if (!DefIsLiveOut) 1022 return false; 1023 1024 // Make sure non of the uses are PHI nodes. 1025 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 1026 UI != E; ++UI) { 1027 Instruction *User = cast<Instruction>(*UI); 1028 BasicBlock *UserBB = User->getParent(); 1029 if (UserBB == DefBB) continue; 1030 // Be conservative. We don't want this xform to end up introducing 1031 // reloads just before load / store instructions. 1032 if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User)) 1033 return false; 1034 } 1035 1036 // InsertedTruncs - Only insert one trunc in each block once. 1037 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 1038 1039 bool MadeChange = false; 1040 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 1041 UI != E; ++UI) { 1042 Use &TheUse = UI.getUse(); 1043 Instruction *User = cast<Instruction>(*UI); 1044 1045 // Figure out which BB this ext is used in. 1046 BasicBlock *UserBB = User->getParent(); 1047 if (UserBB == DefBB) continue; 1048 1049 // Both src and def are live in this block. Rewrite the use. 1050 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 1051 1052 if (!InsertedTrunc) { 1053 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1054 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 1055 } 1056 1057 // Replace a use of the {s|z}ext source with a use of the result. 1058 TheUse = InsertedTrunc; 1059 ++NumExtUses; 1060 MadeChange = true; 1061 } 1062 1063 return MadeChange; 1064} 1065 1066bool CodeGenPrepare::OptimizeInst(Instruction *I) { 1067 if (PHINode *P = dyn_cast<PHINode>(I)) { 1068 // It is possible for very late stage optimizations (such as SimplifyCFG) 1069 // to introduce PHI nodes too late to be cleaned up. If we detect such a 1070 // trivial PHI, go ahead and zap it here. 1071 if (Value *V = SimplifyInstruction(P)) { 1072 P->replaceAllUsesWith(V); 1073 P->eraseFromParent(); 1074 ++NumPHIsElim; 1075 return true; 1076 } 1077 return false; 1078 } 1079 1080 if (CastInst *CI = dyn_cast<CastInst>(I)) { 1081 // If the source of the cast is a constant, then this should have 1082 // already been constant folded. The only reason NOT to constant fold 1083 // it is if something (e.g. LSR) was careful to place the constant 1084 // evaluation in a block other than then one that uses it (e.g. to hoist 1085 // the address of globals out of a loop). If this is the case, we don't 1086 // want to forward-subst the cast. 1087 if (isa<Constant>(CI->getOperand(0))) 1088 return false; 1089 1090 if (TLI && OptimizeNoopCopyExpression(CI, *TLI)) 1091 return true; 1092 1093 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 1094 bool MadeChange = MoveExtToFormExtLoad(I); 1095 return MadeChange | OptimizeExtUses(I); 1096 } 1097 return false; 1098 } 1099 1100 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 1101 return OptimizeCmpExpression(CI); 1102 1103 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1104 if (TLI) 1105 return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); 1106 return false; 1107 } 1108 1109 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1110 if (TLI) 1111 return OptimizeMemoryInst(I, SI->getOperand(1), 1112 SI->getOperand(0)->getType()); 1113 return false; 1114 } 1115 1116 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 1117 if (GEPI->hasAllZeroIndices()) { 1118 /// The GEP operand must be a pointer, so must its result -> BitCast 1119 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 1120 GEPI->getName(), GEPI); 1121 GEPI->replaceAllUsesWith(NC); 1122 GEPI->eraseFromParent(); 1123 ++NumGEPsElim; 1124 OptimizeInst(NC); 1125 return true; 1126 } 1127 return false; 1128 } 1129 1130 if (CallInst *CI = dyn_cast<CallInst>(I)) 1131 return OptimizeCallInst(CI); 1132 1133 if (ReturnInst *RI = dyn_cast<ReturnInst>(I)) 1134 return DupRetToEnableTailCallOpts(RI); 1135 1136 return false; 1137} 1138 1139// In this pass we look for GEP and cast instructions that are used 1140// across basic blocks and rewrite them to improve basic-block-at-a-time 1141// selection. 1142bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 1143 SunkAddrs.clear(); 1144 bool MadeChange = false; 1145 1146 CurInstIterator = BB.begin(); 1147 for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; ) 1148 MadeChange |= OptimizeInst(CurInstIterator++); 1149 1150 return MadeChange; 1151} 1152 1153// llvm.dbg.value is far away from the value then iSel may not be able 1154// handle it properly. iSel will drop llvm.dbg.value if it can not 1155// find a node corresponding to the value. 1156bool CodeGenPrepare::PlaceDbgValues(Function &F) { 1157 bool MadeChange = false; 1158 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { 1159 Instruction *PrevNonDbgInst = NULL; 1160 for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) { 1161 Instruction *Insn = BI; ++BI; 1162 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 1163 if (!DVI) { 1164 PrevNonDbgInst = Insn; 1165 continue; 1166 } 1167 1168 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 1169 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 1170 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 1171 DVI->removeFromParent(); 1172 if (isa<PHINode>(VI)) 1173 DVI->insertBefore(VI->getParent()->getFirstInsertionPt()); 1174 else 1175 DVI->insertAfter(VI); 1176 MadeChange = true; 1177 ++NumDbgValueMoved; 1178 } 1179 } 1180 } 1181 return MadeChange; 1182} 1183