CodeGenPrepare.cpp revision 7579609bfe0d915b6c2d8dc094a132d793ec8855
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass munges the code in the input function to better prepare it for 11// SelectionDAG-based code generation. This works around limitations in it's 12// basic-block-at-a-time approach. It should eventually be removed. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "codegenprepare" 17#include "llvm/Transforms/Scalar.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/InlineAsm.h" 22#include "llvm/Instructions.h" 23#include "llvm/IntrinsicInst.h" 24#include "llvm/Pass.h" 25#include "llvm/Analysis/Dominators.h" 26#include "llvm/Analysis/InstructionSimplify.h" 27#include "llvm/Analysis/ProfileInfo.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/Target/TargetLowering.h" 30#include "llvm/Transforms/Utils/AddrModeMatcher.h" 31#include "llvm/Transforms/Utils/BasicBlockUtils.h" 32#include "llvm/Transforms/Utils/Local.h" 33#include "llvm/Transforms/Utils/BuildLibCalls.h" 34#include "llvm/ADT/DenseMap.h" 35#include "llvm/ADT/SmallSet.h" 36#include "llvm/ADT/Statistic.h" 37#include "llvm/Assembly/Writer.h" 38#include "llvm/Support/CallSite.h" 39#include "llvm/Support/CommandLine.h" 40#include "llvm/Support/Debug.h" 41#include "llvm/Support/GetElementPtrTypeIterator.h" 42#include "llvm/Support/PatternMatch.h" 43#include "llvm/Support/raw_ostream.h" 44#include "llvm/Support/IRBuilder.h" 45using namespace llvm; 46using namespace llvm::PatternMatch; 47 48STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 49STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 50STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 51STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 52 "sunken Cmps"); 53STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 54 "of sunken Casts"); 55STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 56 "computations were sunk"); 57STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 58STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 59 60static cl::opt<bool> 61CriticalEdgeSplit("cgp-critical-edge-splitting", 62 cl::desc("Split critical edges during codegen prepare"), 63 cl::init(false), cl::Hidden); 64 65namespace { 66 class CodeGenPrepare : public FunctionPass { 67 /// TLI - Keep a pointer of a TargetLowering to consult for determining 68 /// transformation profitability. 69 const TargetLowering *TLI; 70 DominatorTree *DT; 71 ProfileInfo *PFI; 72 73 /// CurInstIterator - As we scan instructions optimizing them, this is the 74 /// next instruction to optimize. Xforms that can invalidate this should 75 /// update it. 76 BasicBlock::iterator CurInstIterator; 77 78 /// BackEdges - Keep a set of all the loop back edges. 79 /// 80 SmallSet<std::pair<const BasicBlock*, const BasicBlock*>, 8> BackEdges; 81 82 // Keeps track of non-local addresses that have been sunk into a block. This 83 // allows us to avoid inserting duplicate code for blocks with multiple 84 // load/stores of the same address. 85 DenseMap<Value*, Value*> SunkAddrs; 86 87 public: 88 static char ID; // Pass identification, replacement for typeid 89 explicit CodeGenPrepare(const TargetLowering *tli = 0) 90 : FunctionPass(ID), TLI(tli) { 91 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 92 } 93 bool runOnFunction(Function &F); 94 95 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 96 AU.addPreserved<DominatorTree>(); 97 AU.addPreserved<ProfileInfo>(); 98 } 99 100 virtual void releaseMemory() { 101 BackEdges.clear(); 102 } 103 104 private: 105 bool EliminateMostlyEmptyBlocks(Function &F); 106 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 107 void EliminateMostlyEmptyBlock(BasicBlock *BB); 108 bool OptimizeBlock(BasicBlock &BB); 109 bool OptimizeInst(Instruction *I); 110 bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy, 111 DenseMap<Value*,Value*> &SunkAddrs); 112 bool OptimizeInlineAsmInst(CallInst *CS); 113 bool OptimizeCallInst(CallInst *CI); 114 bool MoveExtToFormExtLoad(Instruction *I); 115 bool OptimizeExtUses(Instruction *I); 116 void findLoopBackEdges(const Function &F); 117 }; 118} 119 120char CodeGenPrepare::ID = 0; 121INITIALIZE_PASS(CodeGenPrepare, "codegenprepare", 122 "Optimize for code generation", false, false) 123 124FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { 125 return new CodeGenPrepare(TLI); 126} 127 128/// findLoopBackEdges - Do a DFS walk to find loop back edges. 129/// 130void CodeGenPrepare::findLoopBackEdges(const Function &F) { 131 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 132 FindFunctionBackedges(F, Edges); 133 134 BackEdges.insert(Edges.begin(), Edges.end()); 135} 136 137 138bool CodeGenPrepare::runOnFunction(Function &F) { 139 bool EverMadeChange = false; 140 141 DT = getAnalysisIfAvailable<DominatorTree>(); 142 PFI = getAnalysisIfAvailable<ProfileInfo>(); 143 // First pass, eliminate blocks that contain only PHI nodes and an 144 // unconditional branch. 145 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 146 147 // Now find loop back edges, but only if they are being used to decide which 148 // critical edges to split. 149 if (CriticalEdgeSplit) 150 findLoopBackEdges(F); 151 152 bool MadeChange = true; 153 while (MadeChange) { 154 MadeChange = false; 155 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 156 MadeChange |= OptimizeBlock(*BB); 157 EverMadeChange |= MadeChange; 158 } 159 160 SunkAddrs.clear(); 161 162 return EverMadeChange; 163} 164 165/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 166/// debug info directives, and an unconditional branch. Passes before isel 167/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 168/// isel. Start by eliminating these blocks so we can split them the way we 169/// want them. 170bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 171 bool MadeChange = false; 172 // Note that this intentionally skips the entry block. 173 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) { 174 BasicBlock *BB = I++; 175 176 // If this block doesn't end with an uncond branch, ignore it. 177 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 178 if (!BI || !BI->isUnconditional()) 179 continue; 180 181 // If the instruction before the branch (skipping debug info) isn't a phi 182 // node, then other stuff is happening here. 183 BasicBlock::iterator BBI = BI; 184 if (BBI != BB->begin()) { 185 --BBI; 186 while (isa<DbgInfoIntrinsic>(BBI)) { 187 if (BBI == BB->begin()) 188 break; 189 --BBI; 190 } 191 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 192 continue; 193 } 194 195 // Do not break infinite loops. 196 BasicBlock *DestBB = BI->getSuccessor(0); 197 if (DestBB == BB) 198 continue; 199 200 if (!CanMergeBlocks(BB, DestBB)) 201 continue; 202 203 EliminateMostlyEmptyBlock(BB); 204 MadeChange = true; 205 } 206 return MadeChange; 207} 208 209/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 210/// single uncond branch between them, and BB contains no other non-phi 211/// instructions. 212bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 213 const BasicBlock *DestBB) const { 214 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 215 // the successor. If there are more complex condition (e.g. preheaders), 216 // don't mess around with them. 217 BasicBlock::const_iterator BBI = BB->begin(); 218 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 219 for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end(); 220 UI != E; ++UI) { 221 const Instruction *User = cast<Instruction>(*UI); 222 if (User->getParent() != DestBB || !isa<PHINode>(User)) 223 return false; 224 // If User is inside DestBB block and it is a PHINode then check 225 // incoming value. If incoming value is not from BB then this is 226 // a complex condition (e.g. preheaders) we want to avoid here. 227 if (User->getParent() == DestBB) { 228 if (const PHINode *UPN = dyn_cast<PHINode>(User)) 229 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 230 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 231 if (Insn && Insn->getParent() == BB && 232 Insn->getParent() != UPN->getIncomingBlock(I)) 233 return false; 234 } 235 } 236 } 237 } 238 239 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 240 // and DestBB may have conflicting incoming values for the block. If so, we 241 // can't merge the block. 242 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 243 if (!DestBBPN) return true; // no conflict. 244 245 // Collect the preds of BB. 246 SmallPtrSet<const BasicBlock*, 16> BBPreds; 247 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 248 // It is faster to get preds from a PHI than with pred_iterator. 249 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 250 BBPreds.insert(BBPN->getIncomingBlock(i)); 251 } else { 252 BBPreds.insert(pred_begin(BB), pred_end(BB)); 253 } 254 255 // Walk the preds of DestBB. 256 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 257 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 258 if (BBPreds.count(Pred)) { // Common predecessor? 259 BBI = DestBB->begin(); 260 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 261 const Value *V1 = PN->getIncomingValueForBlock(Pred); 262 const Value *V2 = PN->getIncomingValueForBlock(BB); 263 264 // If V2 is a phi node in BB, look up what the mapped value will be. 265 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 266 if (V2PN->getParent() == BB) 267 V2 = V2PN->getIncomingValueForBlock(Pred); 268 269 // If there is a conflict, bail out. 270 if (V1 != V2) return false; 271 } 272 } 273 } 274 275 return true; 276} 277 278 279/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 280/// an unconditional branch in it. 281void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 282 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 283 BasicBlock *DestBB = BI->getSuccessor(0); 284 285 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 286 287 // If the destination block has a single pred, then this is a trivial edge, 288 // just collapse it. 289 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 290 if (SinglePred != DestBB) { 291 // Remember if SinglePred was the entry block of the function. If so, we 292 // will need to move BB back to the entry position. 293 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 294 MergeBasicBlockIntoOnlyPred(DestBB, this); 295 296 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 297 BB->moveBefore(&BB->getParent()->getEntryBlock()); 298 299 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 300 return; 301 } 302 } 303 304 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 305 // to handle the new incoming edges it is about to have. 306 PHINode *PN; 307 for (BasicBlock::iterator BBI = DestBB->begin(); 308 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 309 // Remove the incoming value for BB, and remember it. 310 Value *InVal = PN->removeIncomingValue(BB, false); 311 312 // Two options: either the InVal is a phi node defined in BB or it is some 313 // value that dominates BB. 314 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 315 if (InValPhi && InValPhi->getParent() == BB) { 316 // Add all of the input values of the input PHI as inputs of this phi. 317 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 318 PN->addIncoming(InValPhi->getIncomingValue(i), 319 InValPhi->getIncomingBlock(i)); 320 } else { 321 // Otherwise, add one instance of the dominating value for each edge that 322 // we will be adding. 323 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 324 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 325 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 326 } else { 327 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 328 PN->addIncoming(InVal, *PI); 329 } 330 } 331 } 332 333 // The PHIs are now updated, change everything that refers to BB to use 334 // DestBB and remove BB. 335 BB->replaceAllUsesWith(DestBB); 336 if (DT) { 337 BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock(); 338 BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock(); 339 BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom); 340 DT->changeImmediateDominator(DestBB, NewIDom); 341 DT->eraseNode(BB); 342 } 343 if (PFI) { 344 PFI->replaceAllUses(BB, DestBB); 345 PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); 346 } 347 BB->eraseFromParent(); 348 ++NumBlocksElim; 349 350 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 351} 352 353/// FindReusablePredBB - Check all of the predecessors of the block DestPHI 354/// lives in to see if there is a block that we can reuse as a critical edge 355/// from TIBB. 356static BasicBlock *FindReusablePredBB(PHINode *DestPHI, BasicBlock *TIBB) { 357 BasicBlock *Dest = DestPHI->getParent(); 358 359 /// TIPHIValues - This array is lazily computed to determine the values of 360 /// PHIs in Dest that TI would provide. 361 SmallVector<Value*, 32> TIPHIValues; 362 363 /// TIBBEntryNo - This is a cache to speed up pred queries for TIBB. 364 unsigned TIBBEntryNo = 0; 365 366 // Check to see if Dest has any blocks that can be used as a split edge for 367 // this terminator. 368 for (unsigned pi = 0, e = DestPHI->getNumIncomingValues(); pi != e; ++pi) { 369 BasicBlock *Pred = DestPHI->getIncomingBlock(pi); 370 // To be usable, the pred has to end with an uncond branch to the dest. 371 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator()); 372 if (!PredBr || !PredBr->isUnconditional()) 373 continue; 374 // Must be empty other than the branch and debug info. 375 BasicBlock::iterator I = Pred->begin(); 376 while (isa<DbgInfoIntrinsic>(I)) 377 I++; 378 if (&*I != PredBr) 379 continue; 380 // Cannot be the entry block; its label does not get emitted. 381 if (Pred == &Dest->getParent()->getEntryBlock()) 382 continue; 383 384 // Finally, since we know that Dest has phi nodes in it, we have to make 385 // sure that jumping to Pred will have the same effect as going to Dest in 386 // terms of PHI values. 387 PHINode *PN; 388 unsigned PHINo = 0; 389 unsigned PredEntryNo = pi; 390 391 bool FoundMatch = true; 392 for (BasicBlock::iterator I = Dest->begin(); 393 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) { 394 if (PHINo == TIPHIValues.size()) { 395 if (PN->getIncomingBlock(TIBBEntryNo) != TIBB) 396 TIBBEntryNo = PN->getBasicBlockIndex(TIBB); 397 TIPHIValues.push_back(PN->getIncomingValue(TIBBEntryNo)); 398 } 399 400 // If the PHI entry doesn't work, we can't use this pred. 401 if (PN->getIncomingBlock(PredEntryNo) != Pred) 402 PredEntryNo = PN->getBasicBlockIndex(Pred); 403 404 if (TIPHIValues[PHINo] != PN->getIncomingValue(PredEntryNo)) { 405 FoundMatch = false; 406 break; 407 } 408 } 409 410 // If we found a workable predecessor, change TI to branch to Succ. 411 if (FoundMatch) 412 return Pred; 413 } 414 return 0; 415} 416 417 418/// SplitEdgeNicely - Split the critical edge from TI to its specified 419/// successor if it will improve codegen. We only do this if the successor has 420/// phi nodes (otherwise critical edges are ok). If there is already another 421/// predecessor of the succ that is empty (and thus has no phi nodes), use it 422/// instead of introducing a new block. 423static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, 424 SmallSet<std::pair<const BasicBlock*, 425 const BasicBlock*>, 8> &BackEdges, 426 Pass *P) { 427 BasicBlock *TIBB = TI->getParent(); 428 BasicBlock *Dest = TI->getSuccessor(SuccNum); 429 assert(isa<PHINode>(Dest->begin()) && 430 "This should only be called if Dest has a PHI!"); 431 PHINode *DestPHI = cast<PHINode>(Dest->begin()); 432 433 // Do not split edges to EH landing pads. 434 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(TI)) 435 if (Invoke->getSuccessor(1) == Dest) 436 return; 437 438 // As a hack, never split backedges of loops. Even though the copy for any 439 // PHIs inserted on the backedge would be dead for exits from the loop, we 440 // assume that the cost of *splitting* the backedge would be too high. 441 if (BackEdges.count(std::make_pair(TIBB, Dest))) 442 return; 443 444 if (BasicBlock *ReuseBB = FindReusablePredBB(DestPHI, TIBB)) { 445 ProfileInfo *PFI = P->getAnalysisIfAvailable<ProfileInfo>(); 446 if (PFI) 447 PFI->splitEdge(TIBB, Dest, ReuseBB); 448 Dest->removePredecessor(TIBB); 449 TI->setSuccessor(SuccNum, ReuseBB); 450 return; 451 } 452 453 SplitCriticalEdge(TI, SuccNum, P, true); 454} 455 456 457/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 458/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 459/// sink it into user blocks to reduce the number of virtual 460/// registers that must be created and coalesced. 461/// 462/// Return true if any changes are made. 463/// 464static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 465 // If this is a noop copy, 466 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 467 EVT DstVT = TLI.getValueType(CI->getType()); 468 469 // This is an fp<->int conversion? 470 if (SrcVT.isInteger() != DstVT.isInteger()) 471 return false; 472 473 // If this is an extension, it will be a zero or sign extension, which 474 // isn't a noop. 475 if (SrcVT.bitsLT(DstVT)) return false; 476 477 // If these values will be promoted, find out what they will be promoted 478 // to. This helps us consider truncates on PPC as noop copies when they 479 // are. 480 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 481 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 482 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 483 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 484 485 // If, after promotion, these are the same types, this is a noop copy. 486 if (SrcVT != DstVT) 487 return false; 488 489 BasicBlock *DefBB = CI->getParent(); 490 491 /// InsertedCasts - Only insert a cast in each block once. 492 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 493 494 bool MadeChange = false; 495 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 496 UI != E; ) { 497 Use &TheUse = UI.getUse(); 498 Instruction *User = cast<Instruction>(*UI); 499 500 // Figure out which BB this cast is used in. For PHI's this is the 501 // appropriate predecessor block. 502 BasicBlock *UserBB = User->getParent(); 503 if (PHINode *PN = dyn_cast<PHINode>(User)) { 504 UserBB = PN->getIncomingBlock(UI); 505 } 506 507 // Preincrement use iterator so we don't invalidate it. 508 ++UI; 509 510 // If this user is in the same block as the cast, don't change the cast. 511 if (UserBB == DefBB) continue; 512 513 // If we have already inserted a cast into this block, use it. 514 CastInst *&InsertedCast = InsertedCasts[UserBB]; 515 516 if (!InsertedCast) { 517 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 518 519 InsertedCast = 520 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 521 InsertPt); 522 MadeChange = true; 523 } 524 525 // Replace a use of the cast with a use of the new cast. 526 TheUse = InsertedCast; 527 ++NumCastUses; 528 } 529 530 // If we removed all uses, nuke the cast. 531 if (CI->use_empty()) { 532 CI->eraseFromParent(); 533 MadeChange = true; 534 } 535 536 return MadeChange; 537} 538 539/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 540/// the number of virtual registers that must be created and coalesced. This is 541/// a clear win except on targets with multiple condition code registers 542/// (PowerPC), where it might lose; some adjustment may be wanted there. 543/// 544/// Return true if any changes are made. 545static bool OptimizeCmpExpression(CmpInst *CI) { 546 BasicBlock *DefBB = CI->getParent(); 547 548 /// InsertedCmp - Only insert a cmp in each block once. 549 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 550 551 bool MadeChange = false; 552 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 553 UI != E; ) { 554 Use &TheUse = UI.getUse(); 555 Instruction *User = cast<Instruction>(*UI); 556 557 // Preincrement use iterator so we don't invalidate it. 558 ++UI; 559 560 // Don't bother for PHI nodes. 561 if (isa<PHINode>(User)) 562 continue; 563 564 // Figure out which BB this cmp is used in. 565 BasicBlock *UserBB = User->getParent(); 566 567 // If this user is in the same block as the cmp, don't change the cmp. 568 if (UserBB == DefBB) continue; 569 570 // If we have already inserted a cmp into this block, use it. 571 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 572 573 if (!InsertedCmp) { 574 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 575 576 InsertedCmp = 577 CmpInst::Create(CI->getOpcode(), 578 CI->getPredicate(), CI->getOperand(0), 579 CI->getOperand(1), "", InsertPt); 580 MadeChange = true; 581 } 582 583 // Replace a use of the cmp with a use of the new cmp. 584 TheUse = InsertedCmp; 585 ++NumCmpUses; 586 } 587 588 // If we removed all uses, nuke the cmp. 589 if (CI->use_empty()) 590 CI->eraseFromParent(); 591 592 return MadeChange; 593} 594 595namespace { 596class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { 597protected: 598 void replaceCall(Value *With) { 599 CI->replaceAllUsesWith(With); 600 CI->eraseFromParent(); 601 } 602 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const { 603 if (ConstantInt *SizeCI = 604 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) 605 return SizeCI->isAllOnesValue(); 606 return false; 607 } 608}; 609} // end anonymous namespace 610 611bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { 612 BasicBlock *BB = CI->getParent(); 613 614 // Lower inline assembly if we can. 615 // If we found an inline asm expession, and if the target knows how to 616 // lower it to normal LLVM code, do so now. 617 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 618 if (TLI->ExpandInlineAsm(CI)) { 619 // Avoid invalidating the iterator. 620 CurInstIterator = BB->begin(); 621 // Avoid processing instructions out of order, which could cause 622 // reuse before a value is defined. 623 SunkAddrs.clear(); 624 return true; 625 } 626 // Sink address computing for memory operands into the block. 627 if (OptimizeInlineAsmInst(CI)) 628 return true; 629 } 630 631 // Lower all uses of llvm.objectsize.* 632 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 633 if (II && II->getIntrinsicID() == Intrinsic::objectsize) { 634 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 635 const Type *ReturnTy = CI->getType(); 636 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 637 CI->replaceAllUsesWith(RetVal); 638 CI->eraseFromParent(); 639 return true; 640 } 641 642 // From here on out we're working with named functions. 643 if (CI->getCalledFunction() == 0) return false; 644 645 // We'll need TargetData from here on out. 646 const TargetData *TD = TLI ? TLI->getTargetData() : 0; 647 if (!TD) return false; 648 649 // Lower all default uses of _chk calls. This is very similar 650 // to what InstCombineCalls does, but here we are only lowering calls 651 // that have the default "don't know" as the objectsize. Anything else 652 // should be left alone. 653 CodeGenPrepareFortifiedLibCalls Simplifier; 654 return Simplifier.fold(CI, TD); 655} 656//===----------------------------------------------------------------------===// 657// Memory Optimization 658//===----------------------------------------------------------------------===// 659 660/// IsNonLocalValue - Return true if the specified values are defined in a 661/// different basic block than BB. 662static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 663 if (Instruction *I = dyn_cast<Instruction>(V)) 664 return I->getParent() != BB; 665 return false; 666} 667 668/// OptimizeMemoryInst - Load and Store Instructions often have 669/// addressing modes that can do significant amounts of computation. As such, 670/// instruction selection will try to get the load or store to do as much 671/// computation as possible for the program. The problem is that isel can only 672/// see within a single block. As such, we sink as much legal addressing mode 673/// stuff into the block as possible. 674/// 675/// This method is used to optimize both load/store and inline asms with memory 676/// operands. 677bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 678 const Type *AccessTy, 679 DenseMap<Value*,Value*> &SunkAddrs) { 680 Value *Repl = Addr; 681 682 // Try to collapse single-value PHI nodes. This is necessary to undo 683 // unprofitable PRE transformations. 684 SmallVector<Value*, 8> worklist; 685 SmallPtrSet<Value*, 16> Visited; 686 worklist.push_back(Addr); 687 688 // Use a worklist to iteratively look through PHI nodes, and ensure that 689 // the addressing mode obtained from the non-PHI roots of the graph 690 // are equivalent. 691 Value *Consensus = 0; 692 unsigned NumUses = 0; 693 SmallVector<Instruction*, 16> AddrModeInsts; 694 ExtAddrMode AddrMode; 695 while (!worklist.empty()) { 696 Value *V = worklist.back(); 697 worklist.pop_back(); 698 699 // Break use-def graph loops. 700 if (Visited.count(V)) { 701 Consensus = 0; 702 break; 703 } 704 705 Visited.insert(V); 706 707 // For a PHI node, push all of its incoming values. 708 if (PHINode *P = dyn_cast<PHINode>(V)) { 709 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) 710 worklist.push_back(P->getIncomingValue(i)); 711 continue; 712 } 713 714 // For non-PHIs, determine the addressing mode being computed. 715 SmallVector<Instruction*, 16> NewAddrModeInsts; 716 ExtAddrMode NewAddrMode = 717 AddressingModeMatcher::Match(V, AccessTy,MemoryInst, 718 NewAddrModeInsts, *TLI); 719 720 // Ensure that the obtained addressing mode is equivalent to that obtained 721 // for all other roots of the PHI traversal. Also, when choosing one 722 // such root as representative, select the one with the most uses in order 723 // to keep the cost modeling heuristics in AddressingModeMatcher applicable. 724 if (!Consensus || NewAddrMode == AddrMode) { 725 if (V->getNumUses() > NumUses) { 726 Consensus = V; 727 NumUses = V->getNumUses(); 728 AddrMode = NewAddrMode; 729 AddrModeInsts = NewAddrModeInsts; 730 } 731 continue; 732 } 733 734 Consensus = 0; 735 break; 736 } 737 738 // If the addressing mode couldn't be determined, or if multiple different 739 // ones were determined, bail out now. 740 if (!Consensus) return false; 741 742 // Check to see if any of the instructions supersumed by this addr mode are 743 // non-local to I's BB. 744 bool AnyNonLocal = false; 745 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 746 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 747 AnyNonLocal = true; 748 break; 749 } 750 } 751 752 // If all the instructions matched are already in this BB, don't do anything. 753 if (!AnyNonLocal) { 754 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 755 return false; 756 } 757 758 // Insert this computation right after this user. Since our caller is 759 // scanning from the top of the BB to the bottom, reuse of the expr are 760 // guaranteed to happen later. 761 BasicBlock::iterator InsertPt = MemoryInst; 762 763 // Now that we determined the addressing expression we want to use and know 764 // that we have to sink it into this block. Check to see if we have already 765 // done this for some other load/store instr in this block. If so, reuse the 766 // computation. 767 Value *&SunkAddr = SunkAddrs[Addr]; 768 if (SunkAddr) { 769 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 770 << *MemoryInst); 771 if (SunkAddr->getType() != Addr->getType()) 772 SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt); 773 } else { 774 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 775 << *MemoryInst); 776 const Type *IntPtrTy = 777 TLI->getTargetData()->getIntPtrType(AccessTy->getContext()); 778 779 Value *Result = 0; 780 781 // Start with the base register. Do this first so that subsequent address 782 // matching finds it last, which will prevent it from trying to match it 783 // as the scaled value in case it happens to be a mul. That would be 784 // problematic if we've sunk a different mul for the scale, because then 785 // we'd end up sinking both muls. 786 if (AddrMode.BaseReg) { 787 Value *V = AddrMode.BaseReg; 788 if (V->getType()->isPointerTy()) 789 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 790 if (V->getType() != IntPtrTy) 791 V = CastInst::CreateIntegerCast(V, IntPtrTy, /*isSigned=*/true, 792 "sunkaddr", InsertPt); 793 Result = V; 794 } 795 796 // Add the scale value. 797 if (AddrMode.Scale) { 798 Value *V = AddrMode.ScaledReg; 799 if (V->getType() == IntPtrTy) { 800 // done. 801 } else if (V->getType()->isPointerTy()) { 802 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 803 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 804 cast<IntegerType>(V->getType())->getBitWidth()) { 805 V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt); 806 } else { 807 V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt); 808 } 809 if (AddrMode.Scale != 1) 810 V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy, 811 AddrMode.Scale), 812 "sunkaddr", InsertPt); 813 if (Result) 814 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 815 else 816 Result = V; 817 } 818 819 // Add in the BaseGV if present. 820 if (AddrMode.BaseGV) { 821 Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr", 822 InsertPt); 823 if (Result) 824 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 825 else 826 Result = V; 827 } 828 829 // Add in the Base Offset if present. 830 if (AddrMode.BaseOffs) { 831 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 832 if (Result) 833 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 834 else 835 Result = V; 836 } 837 838 if (Result == 0) 839 SunkAddr = Constant::getNullValue(Addr->getType()); 840 else 841 SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt); 842 } 843 844 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 845 846 if (Repl->use_empty()) { 847 RecursivelyDeleteTriviallyDeadInstructions(Repl); 848 // This address is now available for reassignment, so erase the table entry; 849 // we don't want to match some completely different instruction. 850 SunkAddrs[Addr] = 0; 851 } 852 ++NumMemoryInsts; 853 return true; 854} 855 856/// OptimizeInlineAsmInst - If there are any memory operands, use 857/// OptimizeMemoryInst to sink their address computing into the block when 858/// possible / profitable. 859bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { 860 bool MadeChange = false; 861 862 TargetLowering::AsmOperandInfoVector 863 TargetConstraints = TLI->ParseConstraints(CS); 864 unsigned ArgNo = 0; 865 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 866 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 867 868 // Compute the constraint code and ConstraintType to use. 869 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 870 871 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 872 OpInfo.isIndirect) { 873 Value *OpVal = CS->getArgOperand(ArgNo++); 874 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType(), SunkAddrs); 875 } else if (OpInfo.Type == InlineAsm::isInput) 876 ArgNo++; 877 } 878 879 return MadeChange; 880} 881 882/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 883/// basic block as the load, unless conditions are unfavorable. This allows 884/// SelectionDAG to fold the extend into the load. 885/// 886bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 887 // Look for a load being extended. 888 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 889 if (!LI) return false; 890 891 // If they're already in the same block, there's nothing to do. 892 if (LI->getParent() == I->getParent()) 893 return false; 894 895 // If the load has other users and the truncate is not free, this probably 896 // isn't worthwhile. 897 if (!LI->hasOneUse() && 898 TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) || 899 !TLI->isTypeLegal(TLI->getValueType(I->getType()))) && 900 !TLI->isTruncateFree(I->getType(), LI->getType())) 901 return false; 902 903 // Check whether the target supports casts folded into loads. 904 unsigned LType; 905 if (isa<ZExtInst>(I)) 906 LType = ISD::ZEXTLOAD; 907 else { 908 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 909 LType = ISD::SEXTLOAD; 910 } 911 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 912 return false; 913 914 // Move the extend into the same block as the load, so that SelectionDAG 915 // can fold it. 916 I->removeFromParent(); 917 I->insertAfter(LI); 918 ++NumExtsMoved; 919 return true; 920} 921 922bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 923 BasicBlock *DefBB = I->getParent(); 924 925 // If the result of a {s|z}ext and its source are both live out, rewrite all 926 // other uses of the source with result of extension. 927 Value *Src = I->getOperand(0); 928 if (Src->hasOneUse()) 929 return false; 930 931 // Only do this xform if truncating is free. 932 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 933 return false; 934 935 // Only safe to perform the optimization if the source is also defined in 936 // this block. 937 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 938 return false; 939 940 bool DefIsLiveOut = false; 941 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 942 UI != E; ++UI) { 943 Instruction *User = cast<Instruction>(*UI); 944 945 // Figure out which BB this ext is used in. 946 BasicBlock *UserBB = User->getParent(); 947 if (UserBB == DefBB) continue; 948 DefIsLiveOut = true; 949 break; 950 } 951 if (!DefIsLiveOut) 952 return false; 953 954 // Make sure non of the uses are PHI nodes. 955 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 956 UI != E; ++UI) { 957 Instruction *User = cast<Instruction>(*UI); 958 BasicBlock *UserBB = User->getParent(); 959 if (UserBB == DefBB) continue; 960 // Be conservative. We don't want this xform to end up introducing 961 // reloads just before load / store instructions. 962 if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User)) 963 return false; 964 } 965 966 // InsertedTruncs - Only insert one trunc in each block once. 967 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 968 969 bool MadeChange = false; 970 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 971 UI != E; ++UI) { 972 Use &TheUse = UI.getUse(); 973 Instruction *User = cast<Instruction>(*UI); 974 975 // Figure out which BB this ext is used in. 976 BasicBlock *UserBB = User->getParent(); 977 if (UserBB == DefBB) continue; 978 979 // Both src and def are live in this block. Rewrite the use. 980 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 981 982 if (!InsertedTrunc) { 983 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 984 985 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 986 } 987 988 // Replace a use of the {s|z}ext source with a use of the result. 989 TheUse = InsertedTrunc; 990 ++NumExtUses; 991 MadeChange = true; 992 } 993 994 return MadeChange; 995} 996 997bool CodeGenPrepare::OptimizeInst(Instruction *I) { 998 bool MadeChange = false; 999 1000 if (PHINode *P = dyn_cast<PHINode>(I)) { 1001 // It is possible for very late stage optimizations (such as SimplifyCFG) 1002 // to introduce PHI nodes too late to be cleaned up. If we detect such a 1003 // trivial PHI, go ahead and zap it here. 1004 if (Value *V = SimplifyInstruction(P)) { 1005 P->replaceAllUsesWith(V); 1006 P->eraseFromParent(); 1007 ++NumPHIsElim; 1008 } 1009 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 1010 // If the source of the cast is a constant, then this should have 1011 // already been constant folded. The only reason NOT to constant fold 1012 // it is if something (e.g. LSR) was careful to place the constant 1013 // evaluation in a block other than then one that uses it (e.g. to hoist 1014 // the address of globals out of a loop). If this is the case, we don't 1015 // want to forward-subst the cast. 1016 if (isa<Constant>(CI->getOperand(0))) 1017 return false; 1018 1019 bool Change = false; 1020 if (TLI) { 1021 Change = OptimizeNoopCopyExpression(CI, *TLI); 1022 MadeChange |= Change; 1023 } 1024 1025 if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) { 1026 MadeChange |= MoveExtToFormExtLoad(I); 1027 MadeChange |= OptimizeExtUses(I); 1028 } 1029 } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) { 1030 MadeChange |= OptimizeCmpExpression(CI); 1031 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1032 if (TLI) 1033 MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(), 1034 SunkAddrs); 1035 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1036 if (TLI) 1037 MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1), 1038 SI->getOperand(0)->getType(), 1039 SunkAddrs); 1040 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 1041 if (GEPI->hasAllZeroIndices()) { 1042 /// The GEP operand must be a pointer, so must its result -> BitCast 1043 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 1044 GEPI->getName(), GEPI); 1045 GEPI->replaceAllUsesWith(NC); 1046 GEPI->eraseFromParent(); 1047 ++NumGEPsElim; 1048 MadeChange = true; 1049 OptimizeInst(NC); 1050 } 1051 } else if (CallInst *CI = dyn_cast<CallInst>(I)) { 1052 MadeChange |= OptimizeCallInst(CI); 1053 } 1054 1055 return MadeChange; 1056} 1057 1058// In this pass we look for GEP and cast instructions that are used 1059// across basic blocks and rewrite them to improve basic-block-at-a-time 1060// selection. 1061bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 1062 bool MadeChange = false; 1063 1064 // Split all critical edges where the dest block has a PHI. 1065 if (CriticalEdgeSplit) { 1066 TerminatorInst *BBTI = BB.getTerminator(); 1067 if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) { 1068 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) { 1069 BasicBlock *SuccBB = BBTI->getSuccessor(i); 1070 if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true)) 1071 SplitEdgeNicely(BBTI, i, BackEdges, this); 1072 } 1073 } 1074 } 1075 1076 SunkAddrs.clear(); 1077 1078 CurInstIterator = BB.begin(); 1079 for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; ) { 1080 Instruction *I = CurInstIterator++; 1081 1082 if (CallInst *CI = dyn_cast<CallInst>(I)) 1083 MadeChange |= OptimizeCallInst(CI); 1084 else 1085 MadeChange |= OptimizeInst(I); 1086 } 1087 1088 return MadeChange; 1089} 1090