CodeGenPrepare.cpp revision d13db2c59cc94162d6cf0a04187d408bfef6d4a7
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass munges the code in the input function to better prepare it for 11// SelectionDAG-based code generation. This works around limitations in it's 12// basic-block-at-a-time approach. It should eventually be removed. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "codegenprepare" 17#include "llvm/Transforms/Scalar.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/InlineAsm.h" 22#include "llvm/Instructions.h" 23#include "llvm/IntrinsicInst.h" 24#include "llvm/Pass.h" 25#include "llvm/Analysis/ProfileInfo.h" 26#include "llvm/Target/TargetData.h" 27#include "llvm/Target/TargetLowering.h" 28#include "llvm/Transforms/Utils/AddrModeMatcher.h" 29#include "llvm/Transforms/Utils/BasicBlockUtils.h" 30#include "llvm/Transforms/Utils/Local.h" 31#include "llvm/Transforms/Utils/BuildLibCalls.h" 32#include "llvm/ADT/DenseMap.h" 33#include "llvm/ADT/SmallSet.h" 34#include "llvm/Assembly/Writer.h" 35#include "llvm/Support/CallSite.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Support/GetElementPtrTypeIterator.h" 38#include "llvm/Support/PatternMatch.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/Support/IRBuilder.h" 41using namespace llvm; 42using namespace llvm::PatternMatch; 43 44namespace { 45 class CodeGenPrepare : public FunctionPass { 46 /// TLI - Keep a pointer of a TargetLowering to consult for determining 47 /// transformation profitability. 48 const TargetLowering *TLI; 49 ProfileInfo *PFI; 50 51 /// BackEdges - Keep a set of all the loop back edges. 52 /// 53 SmallSet<std::pair<const BasicBlock*, const BasicBlock*>, 8> BackEdges; 54 public: 55 static char ID; // Pass identification, replacement for typeid 56 explicit CodeGenPrepare(const TargetLowering *tli = 0) 57 : FunctionPass(&ID), TLI(tli) {} 58 bool runOnFunction(Function &F); 59 60 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.addPreserved<ProfileInfo>(); 62 } 63 64 virtual void releaseMemory() { 65 BackEdges.clear(); 66 } 67 68 private: 69 bool EliminateMostlyEmptyBlocks(Function &F); 70 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 71 void EliminateMostlyEmptyBlock(BasicBlock *BB); 72 bool OptimizeBlock(BasicBlock &BB); 73 bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy, 74 DenseMap<Value*,Value*> &SunkAddrs); 75 bool OptimizeInlineAsmInst(Instruction *I, CallSite CS, 76 DenseMap<Value*,Value*> &SunkAddrs); 77 bool OptimizeCallInst(CallInst *CI); 78 bool MoveExtToFormExtLoad(Instruction *I); 79 bool OptimizeExtUses(Instruction *I); 80 void findLoopBackEdges(const Function &F); 81 }; 82} 83 84char CodeGenPrepare::ID = 0; 85INITIALIZE_PASS(CodeGenPrepare, "codegenprepare", 86 "Optimize for code generation", false, false); 87 88FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { 89 return new CodeGenPrepare(TLI); 90} 91 92/// findLoopBackEdges - Do a DFS walk to find loop back edges. 93/// 94void CodeGenPrepare::findLoopBackEdges(const Function &F) { 95 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 96 FindFunctionBackedges(F, Edges); 97 98 BackEdges.insert(Edges.begin(), Edges.end()); 99} 100 101 102bool CodeGenPrepare::runOnFunction(Function &F) { 103 bool EverMadeChange = false; 104 105 PFI = getAnalysisIfAvailable<ProfileInfo>(); 106 // First pass, eliminate blocks that contain only PHI nodes and an 107 // unconditional branch. 108 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 109 110 // Now find loop back edges. 111 findLoopBackEdges(F); 112 113 bool MadeChange = true; 114 while (MadeChange) { 115 MadeChange = false; 116 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 117 MadeChange |= OptimizeBlock(*BB); 118 EverMadeChange |= MadeChange; 119 } 120 return EverMadeChange; 121} 122 123/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 124/// debug info directives, and an unconditional branch. Passes before isel 125/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 126/// isel. Start by eliminating these blocks so we can split them the way we 127/// want them. 128bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 129 bool MadeChange = false; 130 // Note that this intentionally skips the entry block. 131 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) { 132 BasicBlock *BB = I++; 133 134 // If this block doesn't end with an uncond branch, ignore it. 135 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 136 if (!BI || !BI->isUnconditional()) 137 continue; 138 139 // If the instruction before the branch (skipping debug info) isn't a phi 140 // node, then other stuff is happening here. 141 BasicBlock::iterator BBI = BI; 142 if (BBI != BB->begin()) { 143 --BBI; 144 while (isa<DbgInfoIntrinsic>(BBI)) { 145 if (BBI == BB->begin()) 146 break; 147 --BBI; 148 } 149 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 150 continue; 151 } 152 153 // Do not break infinite loops. 154 BasicBlock *DestBB = BI->getSuccessor(0); 155 if (DestBB == BB) 156 continue; 157 158 if (!CanMergeBlocks(BB, DestBB)) 159 continue; 160 161 EliminateMostlyEmptyBlock(BB); 162 MadeChange = true; 163 } 164 return MadeChange; 165} 166 167/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 168/// single uncond branch between them, and BB contains no other non-phi 169/// instructions. 170bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 171 const BasicBlock *DestBB) const { 172 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 173 // the successor. If there are more complex condition (e.g. preheaders), 174 // don't mess around with them. 175 BasicBlock::const_iterator BBI = BB->begin(); 176 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 177 for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end(); 178 UI != E; ++UI) { 179 const Instruction *User = cast<Instruction>(*UI); 180 if (User->getParent() != DestBB || !isa<PHINode>(User)) 181 return false; 182 // If User is inside DestBB block and it is a PHINode then check 183 // incoming value. If incoming value is not from BB then this is 184 // a complex condition (e.g. preheaders) we want to avoid here. 185 if (User->getParent() == DestBB) { 186 if (const PHINode *UPN = dyn_cast<PHINode>(User)) 187 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 188 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 189 if (Insn && Insn->getParent() == BB && 190 Insn->getParent() != UPN->getIncomingBlock(I)) 191 return false; 192 } 193 } 194 } 195 } 196 197 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 198 // and DestBB may have conflicting incoming values for the block. If so, we 199 // can't merge the block. 200 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 201 if (!DestBBPN) return true; // no conflict. 202 203 // Collect the preds of BB. 204 SmallPtrSet<const BasicBlock*, 16> BBPreds; 205 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 206 // It is faster to get preds from a PHI than with pred_iterator. 207 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 208 BBPreds.insert(BBPN->getIncomingBlock(i)); 209 } else { 210 BBPreds.insert(pred_begin(BB), pred_end(BB)); 211 } 212 213 // Walk the preds of DestBB. 214 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 215 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 216 if (BBPreds.count(Pred)) { // Common predecessor? 217 BBI = DestBB->begin(); 218 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 219 const Value *V1 = PN->getIncomingValueForBlock(Pred); 220 const Value *V2 = PN->getIncomingValueForBlock(BB); 221 222 // If V2 is a phi node in BB, look up what the mapped value will be. 223 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 224 if (V2PN->getParent() == BB) 225 V2 = V2PN->getIncomingValueForBlock(Pred); 226 227 // If there is a conflict, bail out. 228 if (V1 != V2) return false; 229 } 230 } 231 } 232 233 return true; 234} 235 236 237/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 238/// an unconditional branch in it. 239void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 240 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 241 BasicBlock *DestBB = BI->getSuccessor(0); 242 243 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 244 245 // If the destination block has a single pred, then this is a trivial edge, 246 // just collapse it. 247 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 248 if (SinglePred != DestBB) { 249 // Remember if SinglePred was the entry block of the function. If so, we 250 // will need to move BB back to the entry position. 251 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 252 MergeBasicBlockIntoOnlyPred(DestBB, this); 253 254 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 255 BB->moveBefore(&BB->getParent()->getEntryBlock()); 256 257 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 258 return; 259 } 260 } 261 262 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 263 // to handle the new incoming edges it is about to have. 264 PHINode *PN; 265 for (BasicBlock::iterator BBI = DestBB->begin(); 266 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 267 // Remove the incoming value for BB, and remember it. 268 Value *InVal = PN->removeIncomingValue(BB, false); 269 270 // Two options: either the InVal is a phi node defined in BB or it is some 271 // value that dominates BB. 272 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 273 if (InValPhi && InValPhi->getParent() == BB) { 274 // Add all of the input values of the input PHI as inputs of this phi. 275 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 276 PN->addIncoming(InValPhi->getIncomingValue(i), 277 InValPhi->getIncomingBlock(i)); 278 } else { 279 // Otherwise, add one instance of the dominating value for each edge that 280 // we will be adding. 281 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 282 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 283 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 284 } else { 285 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 286 PN->addIncoming(InVal, *PI); 287 } 288 } 289 } 290 291 // The PHIs are now updated, change everything that refers to BB to use 292 // DestBB and remove BB. 293 BB->replaceAllUsesWith(DestBB); 294 if (PFI) { 295 PFI->replaceAllUses(BB, DestBB); 296 PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); 297 } 298 BB->eraseFromParent(); 299 300 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 301} 302 303/// FindReusablePredBB - Check all of the predecessors of the block DestPHI 304/// lives in to see if there is a block that we can reuse as a critical edge 305/// from TIBB. 306static BasicBlock *FindReusablePredBB(PHINode *DestPHI, BasicBlock *TIBB) { 307 BasicBlock *Dest = DestPHI->getParent(); 308 309 /// TIPHIValues - This array is lazily computed to determine the values of 310 /// PHIs in Dest that TI would provide. 311 SmallVector<Value*, 32> TIPHIValues; 312 313 /// TIBBEntryNo - This is a cache to speed up pred queries for TIBB. 314 unsigned TIBBEntryNo = 0; 315 316 // Check to see if Dest has any blocks that can be used as a split edge for 317 // this terminator. 318 for (unsigned pi = 0, e = DestPHI->getNumIncomingValues(); pi != e; ++pi) { 319 BasicBlock *Pred = DestPHI->getIncomingBlock(pi); 320 // To be usable, the pred has to end with an uncond branch to the dest. 321 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator()); 322 if (!PredBr || !PredBr->isUnconditional()) 323 continue; 324 // Must be empty other than the branch and debug info. 325 BasicBlock::iterator I = Pred->begin(); 326 while (isa<DbgInfoIntrinsic>(I)) 327 I++; 328 if (&*I != PredBr) 329 continue; 330 // Cannot be the entry block; its label does not get emitted. 331 if (Pred == &Dest->getParent()->getEntryBlock()) 332 continue; 333 334 // Finally, since we know that Dest has phi nodes in it, we have to make 335 // sure that jumping to Pred will have the same effect as going to Dest in 336 // terms of PHI values. 337 PHINode *PN; 338 unsigned PHINo = 0; 339 unsigned PredEntryNo = pi; 340 341 bool FoundMatch = true; 342 for (BasicBlock::iterator I = Dest->begin(); 343 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) { 344 if (PHINo == TIPHIValues.size()) { 345 if (PN->getIncomingBlock(TIBBEntryNo) != TIBB) 346 TIBBEntryNo = PN->getBasicBlockIndex(TIBB); 347 TIPHIValues.push_back(PN->getIncomingValue(TIBBEntryNo)); 348 } 349 350 // If the PHI entry doesn't work, we can't use this pred. 351 if (PN->getIncomingBlock(PredEntryNo) != Pred) 352 PredEntryNo = PN->getBasicBlockIndex(Pred); 353 354 if (TIPHIValues[PHINo] != PN->getIncomingValue(PredEntryNo)) { 355 FoundMatch = false; 356 break; 357 } 358 } 359 360 // If we found a workable predecessor, change TI to branch to Succ. 361 if (FoundMatch) 362 return Pred; 363 } 364 return 0; 365} 366 367 368/// SplitEdgeNicely - Split the critical edge from TI to its specified 369/// successor if it will improve codegen. We only do this if the successor has 370/// phi nodes (otherwise critical edges are ok). If there is already another 371/// predecessor of the succ that is empty (and thus has no phi nodes), use it 372/// instead of introducing a new block. 373static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, 374 SmallSet<std::pair<const BasicBlock*, 375 const BasicBlock*>, 8> &BackEdges, 376 Pass *P) { 377 BasicBlock *TIBB = TI->getParent(); 378 BasicBlock *Dest = TI->getSuccessor(SuccNum); 379 assert(isa<PHINode>(Dest->begin()) && 380 "This should only be called if Dest has a PHI!"); 381 PHINode *DestPHI = cast<PHINode>(Dest->begin()); 382 383 // Do not split edges to EH landing pads. 384 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(TI)) 385 if (Invoke->getSuccessor(1) == Dest) 386 return; 387 388 // As a hack, never split backedges of loops. Even though the copy for any 389 // PHIs inserted on the backedge would be dead for exits from the loop, we 390 // assume that the cost of *splitting* the backedge would be too high. 391 if (BackEdges.count(std::make_pair(TIBB, Dest))) 392 return; 393 394 if (BasicBlock *ReuseBB = FindReusablePredBB(DestPHI, TIBB)) { 395 ProfileInfo *PFI = P->getAnalysisIfAvailable<ProfileInfo>(); 396 if (PFI) 397 PFI->splitEdge(TIBB, Dest, ReuseBB); 398 Dest->removePredecessor(TIBB); 399 TI->setSuccessor(SuccNum, ReuseBB); 400 return; 401 } 402 403 SplitCriticalEdge(TI, SuccNum, P, true); 404} 405 406 407/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 408/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 409/// sink it into user blocks to reduce the number of virtual 410/// registers that must be created and coalesced. 411/// 412/// Return true if any changes are made. 413/// 414static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 415 // If this is a noop copy, 416 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 417 EVT DstVT = TLI.getValueType(CI->getType()); 418 419 // This is an fp<->int conversion? 420 if (SrcVT.isInteger() != DstVT.isInteger()) 421 return false; 422 423 // If this is an extension, it will be a zero or sign extension, which 424 // isn't a noop. 425 if (SrcVT.bitsLT(DstVT)) return false; 426 427 // If these values will be promoted, find out what they will be promoted 428 // to. This helps us consider truncates on PPC as noop copies when they 429 // are. 430 if (TLI.getTypeAction(CI->getContext(), SrcVT) == TargetLowering::Promote) 431 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 432 if (TLI.getTypeAction(CI->getContext(), DstVT) == TargetLowering::Promote) 433 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 434 435 // If, after promotion, these are the same types, this is a noop copy. 436 if (SrcVT != DstVT) 437 return false; 438 439 BasicBlock *DefBB = CI->getParent(); 440 441 /// InsertedCasts - Only insert a cast in each block once. 442 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 443 444 bool MadeChange = false; 445 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 446 UI != E; ) { 447 Use &TheUse = UI.getUse(); 448 Instruction *User = cast<Instruction>(*UI); 449 450 // Figure out which BB this cast is used in. For PHI's this is the 451 // appropriate predecessor block. 452 BasicBlock *UserBB = User->getParent(); 453 if (PHINode *PN = dyn_cast<PHINode>(User)) { 454 UserBB = PN->getIncomingBlock(UI); 455 } 456 457 // Preincrement use iterator so we don't invalidate it. 458 ++UI; 459 460 // If this user is in the same block as the cast, don't change the cast. 461 if (UserBB == DefBB) continue; 462 463 // If we have already inserted a cast into this block, use it. 464 CastInst *&InsertedCast = InsertedCasts[UserBB]; 465 466 if (!InsertedCast) { 467 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 468 469 InsertedCast = 470 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 471 InsertPt); 472 MadeChange = true; 473 } 474 475 // Replace a use of the cast with a use of the new cast. 476 TheUse = InsertedCast; 477 } 478 479 // If we removed all uses, nuke the cast. 480 if (CI->use_empty()) { 481 CI->eraseFromParent(); 482 MadeChange = true; 483 } 484 485 return MadeChange; 486} 487 488/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 489/// the number of virtual registers that must be created and coalesced. This is 490/// a clear win except on targets with multiple condition code registers 491/// (PowerPC), where it might lose; some adjustment may be wanted there. 492/// 493/// Return true if any changes are made. 494static bool OptimizeCmpExpression(CmpInst *CI) { 495 BasicBlock *DefBB = CI->getParent(); 496 497 /// InsertedCmp - Only insert a cmp in each block once. 498 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 499 500 bool MadeChange = false; 501 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 502 UI != E; ) { 503 Use &TheUse = UI.getUse(); 504 Instruction *User = cast<Instruction>(*UI); 505 506 // Preincrement use iterator so we don't invalidate it. 507 ++UI; 508 509 // Don't bother for PHI nodes. 510 if (isa<PHINode>(User)) 511 continue; 512 513 // Figure out which BB this cmp is used in. 514 BasicBlock *UserBB = User->getParent(); 515 516 // If this user is in the same block as the cmp, don't change the cmp. 517 if (UserBB == DefBB) continue; 518 519 // If we have already inserted a cmp into this block, use it. 520 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 521 522 if (!InsertedCmp) { 523 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 524 525 InsertedCmp = 526 CmpInst::Create(CI->getOpcode(), 527 CI->getPredicate(), CI->getOperand(0), 528 CI->getOperand(1), "", InsertPt); 529 MadeChange = true; 530 } 531 532 // Replace a use of the cmp with a use of the new cmp. 533 TheUse = InsertedCmp; 534 } 535 536 // If we removed all uses, nuke the cmp. 537 if (CI->use_empty()) 538 CI->eraseFromParent(); 539 540 return MadeChange; 541} 542 543namespace { 544class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { 545protected: 546 void replaceCall(Value *With) { 547 CI->replaceAllUsesWith(With); 548 CI->eraseFromParent(); 549 } 550 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const { 551 if (ConstantInt *SizeCI = 552 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) 553 return SizeCI->isAllOnesValue(); 554 return false; 555 } 556}; 557} // end anonymous namespace 558 559bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { 560 // Lower all uses of llvm.objectsize.* 561 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 562 if (II && II->getIntrinsicID() == Intrinsic::objectsize) { 563 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 564 const Type *ReturnTy = CI->getType(); 565 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 566 CI->replaceAllUsesWith(RetVal); 567 CI->eraseFromParent(); 568 return true; 569 } 570 571 // From here on out we're working with named functions. 572 if (CI->getCalledFunction() == 0) return false; 573 574 // We'll need TargetData from here on out. 575 const TargetData *TD = TLI ? TLI->getTargetData() : 0; 576 if (!TD) return false; 577 578 // Lower all default uses of _chk calls. This is very similar 579 // to what InstCombineCalls does, but here we are only lowering calls 580 // that have the default "don't know" as the objectsize. Anything else 581 // should be left alone. 582 CodeGenPrepareFortifiedLibCalls Simplifier; 583 return Simplifier.fold(CI, TD); 584} 585//===----------------------------------------------------------------------===// 586// Memory Optimization 587//===----------------------------------------------------------------------===// 588 589/// IsNonLocalValue - Return true if the specified values are defined in a 590/// different basic block than BB. 591static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 592 if (Instruction *I = dyn_cast<Instruction>(V)) 593 return I->getParent() != BB; 594 return false; 595} 596 597/// OptimizeMemoryInst - Load and Store Instructions often have 598/// addressing modes that can do significant amounts of computation. As such, 599/// instruction selection will try to get the load or store to do as much 600/// computation as possible for the program. The problem is that isel can only 601/// see within a single block. As such, we sink as much legal addressing mode 602/// stuff into the block as possible. 603/// 604/// This method is used to optimize both load/store and inline asms with memory 605/// operands. 606bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 607 const Type *AccessTy, 608 DenseMap<Value*,Value*> &SunkAddrs) { 609 // Figure out what addressing mode will be built up for this operation. 610 SmallVector<Instruction*, 16> AddrModeInsts; 611 ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst, 612 AddrModeInsts, *TLI); 613 614 // Check to see if any of the instructions supersumed by this addr mode are 615 // non-local to I's BB. 616 bool AnyNonLocal = false; 617 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 618 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 619 AnyNonLocal = true; 620 break; 621 } 622 } 623 624 // If all the instructions matched are already in this BB, don't do anything. 625 if (!AnyNonLocal) { 626 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 627 return false; 628 } 629 630 // Insert this computation right after this user. Since our caller is 631 // scanning from the top of the BB to the bottom, reuse of the expr are 632 // guaranteed to happen later. 633 BasicBlock::iterator InsertPt = MemoryInst; 634 635 // Now that we determined the addressing expression we want to use and know 636 // that we have to sink it into this block. Check to see if we have already 637 // done this for some other load/store instr in this block. If so, reuse the 638 // computation. 639 Value *&SunkAddr = SunkAddrs[Addr]; 640 if (SunkAddr) { 641 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 642 << *MemoryInst); 643 if (SunkAddr->getType() != Addr->getType()) 644 SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt); 645 } else { 646 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 647 << *MemoryInst); 648 const Type *IntPtrTy = 649 TLI->getTargetData()->getIntPtrType(AccessTy->getContext()); 650 651 Value *Result = 0; 652 653 // Start with the base register. Do this first so that subsequent address 654 // matching finds it last, which will prevent it from trying to match it 655 // as the scaled value in case it happens to be a mul. That would be 656 // problematic if we've sunk a different mul for the scale, because then 657 // we'd end up sinking both muls. 658 if (AddrMode.BaseReg) { 659 Value *V = AddrMode.BaseReg; 660 if (V->getType()->isPointerTy()) 661 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 662 if (V->getType() != IntPtrTy) 663 V = CastInst::CreateIntegerCast(V, IntPtrTy, /*isSigned=*/true, 664 "sunkaddr", InsertPt); 665 Result = V; 666 } 667 668 // Add the scale value. 669 if (AddrMode.Scale) { 670 Value *V = AddrMode.ScaledReg; 671 if (V->getType() == IntPtrTy) { 672 // done. 673 } else if (V->getType()->isPointerTy()) { 674 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 675 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 676 cast<IntegerType>(V->getType())->getBitWidth()) { 677 V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt); 678 } else { 679 V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt); 680 } 681 if (AddrMode.Scale != 1) 682 V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy, 683 AddrMode.Scale), 684 "sunkaddr", InsertPt); 685 if (Result) 686 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 687 else 688 Result = V; 689 } 690 691 // Add in the BaseGV if present. 692 if (AddrMode.BaseGV) { 693 Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr", 694 InsertPt); 695 if (Result) 696 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 697 else 698 Result = V; 699 } 700 701 // Add in the Base Offset if present. 702 if (AddrMode.BaseOffs) { 703 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 704 if (Result) 705 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 706 else 707 Result = V; 708 } 709 710 if (Result == 0) 711 SunkAddr = Constant::getNullValue(Addr->getType()); 712 else 713 SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt); 714 } 715 716 MemoryInst->replaceUsesOfWith(Addr, SunkAddr); 717 718 if (Addr->use_empty()) { 719 RecursivelyDeleteTriviallyDeadInstructions(Addr); 720 // This address is now available for reassignment, so erase the table entry; 721 // we don't want to match some completely different instruction. 722 SunkAddrs[Addr] = 0; 723 } 724 return true; 725} 726 727/// OptimizeInlineAsmInst - If there are any memory operands, use 728/// OptimizeMemoryInst to sink their address computing into the block when 729/// possible / profitable. 730bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, 731 DenseMap<Value*,Value*> &SunkAddrs) { 732 bool MadeChange = false; 733 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 734 735 // Do a prepass over the constraints, canonicalizing them, and building up the 736 // ConstraintOperands list. 737 std::vector<InlineAsm::ConstraintInfo> 738 ConstraintInfos = IA->ParseConstraints(); 739 740 /// ConstraintOperands - Information about all of the constraints. 741 std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands; 742 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 743 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) { 744 ConstraintOperands. 745 push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i])); 746 TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back(); 747 748 // Compute the value type for each operand. 749 switch (OpInfo.Type) { 750 case InlineAsm::isOutput: 751 if (OpInfo.isIndirect) 752 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 753 break; 754 case InlineAsm::isInput: 755 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 756 break; 757 case InlineAsm::isClobber: 758 // Nothing to do. 759 break; 760 } 761 762 // Compute the constraint code and ConstraintType to use. 763 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 764 765 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 766 OpInfo.isIndirect) { 767 Value *OpVal = OpInfo.CallOperandVal; 768 MadeChange |= OptimizeMemoryInst(I, OpVal, OpVal->getType(), SunkAddrs); 769 } 770 } 771 772 return MadeChange; 773} 774 775/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 776/// basic block as the load, unless conditions are unfavorable. This allows 777/// SelectionDAG to fold the extend into the load. 778/// 779bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 780 // Look for a load being extended. 781 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 782 if (!LI) return false; 783 784 // If they're already in the same block, there's nothing to do. 785 if (LI->getParent() == I->getParent()) 786 return false; 787 788 // If the load has other users and the truncate is not free, this probably 789 // isn't worthwhile. 790 if (!LI->hasOneUse() && 791 TLI && !TLI->isTruncateFree(I->getType(), LI->getType())) 792 return false; 793 794 // Check whether the target supports casts folded into loads. 795 unsigned LType; 796 if (isa<ZExtInst>(I)) 797 LType = ISD::ZEXTLOAD; 798 else { 799 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 800 LType = ISD::SEXTLOAD; 801 } 802 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 803 return false; 804 805 // Move the extend into the same block as the load, so that SelectionDAG 806 // can fold it. 807 I->removeFromParent(); 808 I->insertAfter(LI); 809 return true; 810} 811 812bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 813 BasicBlock *DefBB = I->getParent(); 814 815 // If both result of the {s|z}xt and its source are live out, rewrite all 816 // other uses of the source with result of extension. 817 Value *Src = I->getOperand(0); 818 if (Src->hasOneUse()) 819 return false; 820 821 // Only do this xform if truncating is free. 822 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 823 return false; 824 825 // Only safe to perform the optimization if the source is also defined in 826 // this block. 827 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 828 return false; 829 830 bool DefIsLiveOut = false; 831 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 832 UI != E; ++UI) { 833 Instruction *User = cast<Instruction>(*UI); 834 835 // Figure out which BB this ext is used in. 836 BasicBlock *UserBB = User->getParent(); 837 if (UserBB == DefBB) continue; 838 DefIsLiveOut = true; 839 break; 840 } 841 if (!DefIsLiveOut) 842 return false; 843 844 // Make sure non of the uses are PHI nodes. 845 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 846 UI != E; ++UI) { 847 Instruction *User = cast<Instruction>(*UI); 848 BasicBlock *UserBB = User->getParent(); 849 if (UserBB == DefBB) continue; 850 // Be conservative. We don't want this xform to end up introducing 851 // reloads just before load / store instructions. 852 if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User)) 853 return false; 854 } 855 856 // InsertedTruncs - Only insert one trunc in each block once. 857 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 858 859 bool MadeChange = false; 860 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 861 UI != E; ++UI) { 862 Use &TheUse = UI.getUse(); 863 Instruction *User = cast<Instruction>(*UI); 864 865 // Figure out which BB this ext is used in. 866 BasicBlock *UserBB = User->getParent(); 867 if (UserBB == DefBB) continue; 868 869 // Both src and def are live in this block. Rewrite the use. 870 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 871 872 if (!InsertedTrunc) { 873 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 874 875 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 876 } 877 878 // Replace a use of the {s|z}ext source with a use of the result. 879 TheUse = InsertedTrunc; 880 881 MadeChange = true; 882 } 883 884 return MadeChange; 885} 886 887// In this pass we look for GEP and cast instructions that are used 888// across basic blocks and rewrite them to improve basic-block-at-a-time 889// selection. 890bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 891 bool MadeChange = false; 892 893 // Split all critical edges where the dest block has a PHI. 894 TerminatorInst *BBTI = BB.getTerminator(); 895 if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) { 896 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) { 897 BasicBlock *SuccBB = BBTI->getSuccessor(i); 898 if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true)) 899 SplitEdgeNicely(BBTI, i, BackEdges, this); 900 } 901 } 902 903 // Keep track of non-local addresses that have been sunk into this block. 904 // This allows us to avoid inserting duplicate code for blocks with multiple 905 // load/stores of the same address. 906 DenseMap<Value*, Value*> SunkAddrs; 907 908 for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) { 909 Instruction *I = BBI++; 910 911 if (CastInst *CI = dyn_cast<CastInst>(I)) { 912 // If the source of the cast is a constant, then this should have 913 // already been constant folded. The only reason NOT to constant fold 914 // it is if something (e.g. LSR) was careful to place the constant 915 // evaluation in a block other than then one that uses it (e.g. to hoist 916 // the address of globals out of a loop). If this is the case, we don't 917 // want to forward-subst the cast. 918 if (isa<Constant>(CI->getOperand(0))) 919 continue; 920 921 bool Change = false; 922 if (TLI) { 923 Change = OptimizeNoopCopyExpression(CI, *TLI); 924 MadeChange |= Change; 925 } 926 927 if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) { 928 MadeChange |= MoveExtToFormExtLoad(I); 929 MadeChange |= OptimizeExtUses(I); 930 } 931 } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) { 932 MadeChange |= OptimizeCmpExpression(CI); 933 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 934 if (TLI) 935 MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(), 936 SunkAddrs); 937 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 938 if (TLI) 939 MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1), 940 SI->getOperand(0)->getType(), 941 SunkAddrs); 942 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 943 if (GEPI->hasAllZeroIndices()) { 944 /// The GEP operand must be a pointer, so must its result -> BitCast 945 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 946 GEPI->getName(), GEPI); 947 GEPI->replaceAllUsesWith(NC); 948 GEPI->eraseFromParent(); 949 MadeChange = true; 950 BBI = NC; 951 } 952 } else if (CallInst *CI = dyn_cast<CallInst>(I)) { 953 // If we found an inline asm expession, and if the target knows how to 954 // lower it to normal LLVM code, do so now. 955 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 956 if (TLI->ExpandInlineAsm(CI)) { 957 BBI = BB.begin(); 958 // Avoid processing instructions out of order, which could cause 959 // reuse before a value is defined. 960 SunkAddrs.clear(); 961 } else 962 // Sink address computing for memory operands into the block. 963 MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); 964 } else { 965 // Other CallInst optimizations that don't need to muck with the 966 // enclosing iterator here. 967 MadeChange |= OptimizeCallInst(CI); 968 } 969 } 970 } 971 972 return MadeChange; 973} 974