CodeGenPrepare.cpp revision d8d0b6a42c09f1c5b00a4e7029b08074a3da5acd
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass munges the code in the input function to better prepare it for 11// SelectionDAG-based code generation. This works around limitations in it's 12// basic-block-at-a-time approach. It should eventually be removed. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "codegenprepare" 17#include "llvm/Transforms/Scalar.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/InlineAsm.h" 22#include "llvm/Instructions.h" 23#include "llvm/IntrinsicInst.h" 24#include "llvm/Pass.h" 25#include "llvm/Analysis/ProfileInfo.h" 26#include "llvm/Target/TargetData.h" 27#include "llvm/Target/TargetLowering.h" 28#include "llvm/Transforms/Utils/AddrModeMatcher.h" 29#include "llvm/Transforms/Utils/BasicBlockUtils.h" 30#include "llvm/Transforms/Utils/Local.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallSet.h" 33#include "llvm/Assembly/Writer.h" 34#include "llvm/Support/CallSite.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Support/GetElementPtrTypeIterator.h" 38#include "llvm/Support/PatternMatch.h" 39#include "llvm/Support/raw_ostream.h" 40using namespace llvm; 41using namespace llvm::PatternMatch; 42 43static cl::opt<bool> FactorCommonPreds("split-critical-paths-tweak", 44 cl::init(false), cl::Hidden); 45 46namespace { 47 class CodeGenPrepare : public FunctionPass { 48 /// TLI - Keep a pointer of a TargetLowering to consult for determining 49 /// transformation profitability. 50 const TargetLowering *TLI; 51 ProfileInfo *PFI; 52 53 /// BackEdges - Keep a set of all the loop back edges. 54 /// 55 SmallSet<std::pair<const BasicBlock*, const BasicBlock*>, 8> BackEdges; 56 public: 57 static char ID; // Pass identification, replacement for typeid 58 explicit CodeGenPrepare(const TargetLowering *tli = 0) 59 : FunctionPass(&ID), TLI(tli) {} 60 bool runOnFunction(Function &F); 61 62 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 63 AU.addPreserved<ProfileInfo>(); 64 } 65 66 private: 67 bool EliminateMostlyEmptyBlocks(Function &F); 68 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 69 void EliminateMostlyEmptyBlock(BasicBlock *BB); 70 bool OptimizeBlock(BasicBlock &BB); 71 bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy, 72 DenseMap<Value*,Value*> &SunkAddrs); 73 bool OptimizeInlineAsmInst(Instruction *I, CallSite CS, 74 DenseMap<Value*,Value*> &SunkAddrs); 75 bool MoveExtToFormExtLoad(Instruction *I); 76 bool OptimizeExtUses(Instruction *I); 77 void findLoopBackEdges(const Function &F); 78 }; 79} 80 81char CodeGenPrepare::ID = 0; 82static RegisterPass<CodeGenPrepare> X("codegenprepare", 83 "Optimize for code generation"); 84 85FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { 86 return new CodeGenPrepare(TLI); 87} 88 89/// findLoopBackEdges - Do a DFS walk to find loop back edges. 90/// 91void CodeGenPrepare::findLoopBackEdges(const Function &F) { 92 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 93 FindFunctionBackedges(F, Edges); 94 95 BackEdges.insert(Edges.begin(), Edges.end()); 96} 97 98 99bool CodeGenPrepare::runOnFunction(Function &F) { 100 bool EverMadeChange = false; 101 102 PFI = getAnalysisIfAvailable<ProfileInfo>(); 103 // First pass, eliminate blocks that contain only PHI nodes and an 104 // unconditional branch. 105 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 106 107 // Now find loop back edges. 108 findLoopBackEdges(F); 109 110 bool MadeChange = true; 111 while (MadeChange) { 112 MadeChange = false; 113 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 114 MadeChange |= OptimizeBlock(*BB); 115 EverMadeChange |= MadeChange; 116 } 117 return EverMadeChange; 118} 119 120/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 121/// debug info directives, and an unconditional branch. Passes before isel 122/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 123/// isel. Start by eliminating these blocks so we can split them the way we 124/// want them. 125bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 126 bool MadeChange = false; 127 // Note that this intentionally skips the entry block. 128 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) { 129 BasicBlock *BB = I++; 130 131 // If this block doesn't end with an uncond branch, ignore it. 132 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 133 if (!BI || !BI->isUnconditional()) 134 continue; 135 136 // If the instruction before the branch (skipping debug info) isn't a phi 137 // node, then other stuff is happening here. 138 BasicBlock::iterator BBI = BI; 139 if (BBI != BB->begin()) { 140 --BBI; 141 while (isa<DbgInfoIntrinsic>(BBI)) { 142 if (BBI == BB->begin()) 143 break; 144 --BBI; 145 } 146 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 147 continue; 148 } 149 150 // Do not break infinite loops. 151 BasicBlock *DestBB = BI->getSuccessor(0); 152 if (DestBB == BB) 153 continue; 154 155 if (!CanMergeBlocks(BB, DestBB)) 156 continue; 157 158 EliminateMostlyEmptyBlock(BB); 159 MadeChange = true; 160 } 161 return MadeChange; 162} 163 164/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 165/// single uncond branch between them, and BB contains no other non-phi 166/// instructions. 167bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 168 const BasicBlock *DestBB) const { 169 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 170 // the successor. If there are more complex condition (e.g. preheaders), 171 // don't mess around with them. 172 BasicBlock::const_iterator BBI = BB->begin(); 173 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 174 for (Value::use_const_iterator UI = PN->use_begin(), E = PN->use_end(); 175 UI != E; ++UI) { 176 const Instruction *User = cast<Instruction>(*UI); 177 if (User->getParent() != DestBB || !isa<PHINode>(User)) 178 return false; 179 // If User is inside DestBB block and it is a PHINode then check 180 // incoming value. If incoming value is not from BB then this is 181 // a complex condition (e.g. preheaders) we want to avoid here. 182 if (User->getParent() == DestBB) { 183 if (const PHINode *UPN = dyn_cast<PHINode>(User)) 184 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 185 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 186 if (Insn && Insn->getParent() == BB && 187 Insn->getParent() != UPN->getIncomingBlock(I)) 188 return false; 189 } 190 } 191 } 192 } 193 194 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 195 // and DestBB may have conflicting incoming values for the block. If so, we 196 // can't merge the block. 197 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 198 if (!DestBBPN) return true; // no conflict. 199 200 // Collect the preds of BB. 201 SmallPtrSet<const BasicBlock*, 16> BBPreds; 202 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 203 // It is faster to get preds from a PHI than with pred_iterator. 204 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 205 BBPreds.insert(BBPN->getIncomingBlock(i)); 206 } else { 207 BBPreds.insert(pred_begin(BB), pred_end(BB)); 208 } 209 210 // Walk the preds of DestBB. 211 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 212 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 213 if (BBPreds.count(Pred)) { // Common predecessor? 214 BBI = DestBB->begin(); 215 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 216 const Value *V1 = PN->getIncomingValueForBlock(Pred); 217 const Value *V2 = PN->getIncomingValueForBlock(BB); 218 219 // If V2 is a phi node in BB, look up what the mapped value will be. 220 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 221 if (V2PN->getParent() == BB) 222 V2 = V2PN->getIncomingValueForBlock(Pred); 223 224 // If there is a conflict, bail out. 225 if (V1 != V2) return false; 226 } 227 } 228 } 229 230 return true; 231} 232 233 234/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 235/// an unconditional branch in it. 236void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 237 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 238 BasicBlock *DestBB = BI->getSuccessor(0); 239 240 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 241 242 // If the destination block has a single pred, then this is a trivial edge, 243 // just collapse it. 244 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 245 if (SinglePred != DestBB) { 246 // Remember if SinglePred was the entry block of the function. If so, we 247 // will need to move BB back to the entry position. 248 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 249 MergeBasicBlockIntoOnlyPred(DestBB, this); 250 251 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 252 BB->moveBefore(&BB->getParent()->getEntryBlock()); 253 254 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 255 return; 256 } 257 } 258 259 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 260 // to handle the new incoming edges it is about to have. 261 PHINode *PN; 262 for (BasicBlock::iterator BBI = DestBB->begin(); 263 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 264 // Remove the incoming value for BB, and remember it. 265 Value *InVal = PN->removeIncomingValue(BB, false); 266 267 // Two options: either the InVal is a phi node defined in BB or it is some 268 // value that dominates BB. 269 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 270 if (InValPhi && InValPhi->getParent() == BB) { 271 // Add all of the input values of the input PHI as inputs of this phi. 272 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 273 PN->addIncoming(InValPhi->getIncomingValue(i), 274 InValPhi->getIncomingBlock(i)); 275 } else { 276 // Otherwise, add one instance of the dominating value for each edge that 277 // we will be adding. 278 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 279 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 280 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 281 } else { 282 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 283 PN->addIncoming(InVal, *PI); 284 } 285 } 286 } 287 288 // The PHIs are now updated, change everything that refers to BB to use 289 // DestBB and remove BB. 290 BB->replaceAllUsesWith(DestBB); 291 if (PFI) { 292 PFI->replaceAllUses(BB, DestBB); 293 PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); 294 } 295 BB->eraseFromParent(); 296 297 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 298} 299 300 301/// SplitEdgeNicely - Split the critical edge from TI to its specified 302/// successor if it will improve codegen. We only do this if the successor has 303/// phi nodes (otherwise critical edges are ok). If there is already another 304/// predecessor of the succ that is empty (and thus has no phi nodes), use it 305/// instead of introducing a new block. 306static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, 307 SmallSet<std::pair<const BasicBlock*, 308 const BasicBlock*>, 8> &BackEdges, 309 Pass *P) { 310 BasicBlock *TIBB = TI->getParent(); 311 BasicBlock *Dest = TI->getSuccessor(SuccNum); 312 assert(isa<PHINode>(Dest->begin()) && 313 "This should only be called if Dest has a PHI!"); 314 315 // Do not split edges to EH landing pads. 316 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(TI)) { 317 if (Invoke->getSuccessor(1) == Dest) 318 return; 319 } 320 321 322 // As a hack, never split backedges of loops. Even though the copy for any 323 // PHIs inserted on the backedge would be dead for exits from the loop, we 324 // assume that the cost of *splitting* the backedge would be too high. 325 if (BackEdges.count(std::make_pair(TIBB, Dest))) 326 return; 327 328 if (!FactorCommonPreds) { 329 /// TIPHIValues - This array is lazily computed to determine the values of 330 /// PHIs in Dest that TI would provide. 331 SmallVector<Value*, 32> TIPHIValues; 332 333 // Check to see if Dest has any blocks that can be used as a split edge for 334 // this terminator. 335 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { 336 BasicBlock *Pred = *PI; 337 // To be usable, the pred has to end with an uncond branch to the dest. 338 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator()); 339 if (!PredBr || !PredBr->isUnconditional()) 340 continue; 341 // Must be empty other than the branch and debug info. 342 BasicBlock::iterator I = Pred->begin(); 343 while (isa<DbgInfoIntrinsic>(I)) 344 I++; 345 if (dyn_cast<Instruction>(I) != PredBr) 346 continue; 347 // Cannot be the entry block; its label does not get emitted. 348 if (Pred == &(Dest->getParent()->getEntryBlock())) 349 continue; 350 351 // Finally, since we know that Dest has phi nodes in it, we have to make 352 // sure that jumping to Pred will have the same effect as going to Dest in 353 // terms of PHI values. 354 PHINode *PN; 355 unsigned PHINo = 0; 356 bool FoundMatch = true; 357 for (BasicBlock::iterator I = Dest->begin(); 358 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) { 359 if (PHINo == TIPHIValues.size()) 360 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); 361 362 // If the PHI entry doesn't work, we can't use this pred. 363 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { 364 FoundMatch = false; 365 break; 366 } 367 } 368 369 // If we found a workable predecessor, change TI to branch to Succ. 370 if (FoundMatch) { 371 ProfileInfo *PFI = P->getAnalysisIfAvailable<ProfileInfo>(); 372 if (PFI) 373 PFI->splitEdge(TIBB, Dest, Pred); 374 Dest->removePredecessor(TIBB); 375 TI->setSuccessor(SuccNum, Pred); 376 return; 377 } 378 } 379 380 SplitCriticalEdge(TI, SuccNum, P, true); 381 return; 382 } 383 384 PHINode *PN; 385 SmallVector<Value*, 8> TIPHIValues; 386 for (BasicBlock::iterator I = Dest->begin(); 387 (PN = dyn_cast<PHINode>(I)); ++I) 388 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); 389 390 SmallVector<BasicBlock*, 8> IdenticalPreds; 391 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { 392 BasicBlock *Pred = *PI; 393 if (BackEdges.count(std::make_pair(Pred, Dest))) 394 continue; 395 if (PI == TIBB) 396 IdenticalPreds.push_back(Pred); 397 else { 398 bool Identical = true; 399 unsigned PHINo = 0; 400 for (BasicBlock::iterator I = Dest->begin(); 401 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) 402 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { 403 Identical = false; 404 break; 405 } 406 if (Identical) 407 IdenticalPreds.push_back(Pred); 408 } 409 } 410 411 assert(!IdenticalPreds.empty()); 412 SplitBlockPredecessors(Dest, &IdenticalPreds[0], IdenticalPreds.size(), 413 ".critedge", P); 414} 415 416 417/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 418/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 419/// sink it into user blocks to reduce the number of virtual 420/// registers that must be created and coalesced. 421/// 422/// Return true if any changes are made. 423/// 424static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 425 // If this is a noop copy, 426 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 427 EVT DstVT = TLI.getValueType(CI->getType()); 428 429 // This is an fp<->int conversion? 430 if (SrcVT.isInteger() != DstVT.isInteger()) 431 return false; 432 433 // If this is an extension, it will be a zero or sign extension, which 434 // isn't a noop. 435 if (SrcVT.bitsLT(DstVT)) return false; 436 437 // If these values will be promoted, find out what they will be promoted 438 // to. This helps us consider truncates on PPC as noop copies when they 439 // are. 440 if (TLI.getTypeAction(CI->getContext(), SrcVT) == TargetLowering::Promote) 441 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 442 if (TLI.getTypeAction(CI->getContext(), DstVT) == TargetLowering::Promote) 443 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 444 445 // If, after promotion, these are the same types, this is a noop copy. 446 if (SrcVT != DstVT) 447 return false; 448 449 BasicBlock *DefBB = CI->getParent(); 450 451 /// InsertedCasts - Only insert a cast in each block once. 452 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 453 454 bool MadeChange = false; 455 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 456 UI != E; ) { 457 Use &TheUse = UI.getUse(); 458 Instruction *User = cast<Instruction>(*UI); 459 460 // Figure out which BB this cast is used in. For PHI's this is the 461 // appropriate predecessor block. 462 BasicBlock *UserBB = User->getParent(); 463 if (PHINode *PN = dyn_cast<PHINode>(User)) { 464 UserBB = PN->getIncomingBlock(UI); 465 } 466 467 // Preincrement use iterator so we don't invalidate it. 468 ++UI; 469 470 // If this user is in the same block as the cast, don't change the cast. 471 if (UserBB == DefBB) continue; 472 473 // If we have already inserted a cast into this block, use it. 474 CastInst *&InsertedCast = InsertedCasts[UserBB]; 475 476 if (!InsertedCast) { 477 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 478 479 InsertedCast = 480 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 481 InsertPt); 482 MadeChange = true; 483 } 484 485 // Replace a use of the cast with a use of the new cast. 486 TheUse = InsertedCast; 487 } 488 489 // If we removed all uses, nuke the cast. 490 if (CI->use_empty()) { 491 CI->eraseFromParent(); 492 MadeChange = true; 493 } 494 495 return MadeChange; 496} 497 498/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 499/// the number of virtual registers that must be created and coalesced. This is 500/// a clear win except on targets with multiple condition code registers 501/// (PowerPC), where it might lose; some adjustment may be wanted there. 502/// 503/// Return true if any changes are made. 504static bool OptimizeCmpExpression(CmpInst *CI) { 505 BasicBlock *DefBB = CI->getParent(); 506 507 /// InsertedCmp - Only insert a cmp in each block once. 508 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 509 510 bool MadeChange = false; 511 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 512 UI != E; ) { 513 Use &TheUse = UI.getUse(); 514 Instruction *User = cast<Instruction>(*UI); 515 516 // Preincrement use iterator so we don't invalidate it. 517 ++UI; 518 519 // Don't bother for PHI nodes. 520 if (isa<PHINode>(User)) 521 continue; 522 523 // Figure out which BB this cmp is used in. 524 BasicBlock *UserBB = User->getParent(); 525 526 // If this user is in the same block as the cmp, don't change the cmp. 527 if (UserBB == DefBB) continue; 528 529 // If we have already inserted a cmp into this block, use it. 530 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 531 532 if (!InsertedCmp) { 533 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 534 535 InsertedCmp = 536 CmpInst::Create(CI->getOpcode(), 537 CI->getPredicate(), CI->getOperand(0), 538 CI->getOperand(1), "", InsertPt); 539 MadeChange = true; 540 } 541 542 // Replace a use of the cmp with a use of the new cmp. 543 TheUse = InsertedCmp; 544 } 545 546 // If we removed all uses, nuke the cmp. 547 if (CI->use_empty()) 548 CI->eraseFromParent(); 549 550 return MadeChange; 551} 552 553//===----------------------------------------------------------------------===// 554// Memory Optimization 555//===----------------------------------------------------------------------===// 556 557/// IsNonLocalValue - Return true if the specified values are defined in a 558/// different basic block than BB. 559static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 560 if (Instruction *I = dyn_cast<Instruction>(V)) 561 return I->getParent() != BB; 562 return false; 563} 564 565/// OptimizeMemoryInst - Load and Store Instructions often have 566/// addressing modes that can do significant amounts of computation. As such, 567/// instruction selection will try to get the load or store to do as much 568/// computation as possible for the program. The problem is that isel can only 569/// see within a single block. As such, we sink as much legal addressing mode 570/// stuff into the block as possible. 571/// 572/// This method is used to optimize both load/store and inline asms with memory 573/// operands. 574bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 575 const Type *AccessTy, 576 DenseMap<Value*,Value*> &SunkAddrs) { 577 // Figure out what addressing mode will be built up for this operation. 578 SmallVector<Instruction*, 16> AddrModeInsts; 579 ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst, 580 AddrModeInsts, *TLI); 581 582 // Check to see if any of the instructions supersumed by this addr mode are 583 // non-local to I's BB. 584 bool AnyNonLocal = false; 585 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 586 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 587 AnyNonLocal = true; 588 break; 589 } 590 } 591 592 // If all the instructions matched are already in this BB, don't do anything. 593 if (!AnyNonLocal) { 594 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 595 return false; 596 } 597 598 // Insert this computation right after this user. Since our caller is 599 // scanning from the top of the BB to the bottom, reuse of the expr are 600 // guaranteed to happen later. 601 BasicBlock::iterator InsertPt = MemoryInst; 602 603 // Now that we determined the addressing expression we want to use and know 604 // that we have to sink it into this block. Check to see if we have already 605 // done this for some other load/store instr in this block. If so, reuse the 606 // computation. 607 Value *&SunkAddr = SunkAddrs[Addr]; 608 if (SunkAddr) { 609 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 610 << *MemoryInst); 611 if (SunkAddr->getType() != Addr->getType()) 612 SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt); 613 } else { 614 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 615 << *MemoryInst); 616 const Type *IntPtrTy = 617 TLI->getTargetData()->getIntPtrType(AccessTy->getContext()); 618 619 Value *Result = 0; 620 621 // Start with the base register. Do this first so that subsequent address 622 // matching finds it last, which will prevent it from trying to match it 623 // as the scaled value in case it happens to be a mul. That would be 624 // problematic if we've sunk a different mul for the scale, because then 625 // we'd end up sinking both muls. 626 if (AddrMode.BaseReg) { 627 Value *V = AddrMode.BaseReg; 628 if (isa<PointerType>(V->getType())) 629 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 630 if (V->getType() != IntPtrTy) 631 V = CastInst::CreateIntegerCast(V, IntPtrTy, /*isSigned=*/true, 632 "sunkaddr", InsertPt); 633 Result = V; 634 } 635 636 // Add the scale value. 637 if (AddrMode.Scale) { 638 Value *V = AddrMode.ScaledReg; 639 if (V->getType() == IntPtrTy) { 640 // done. 641 } else if (isa<PointerType>(V->getType())) { 642 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 643 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 644 cast<IntegerType>(V->getType())->getBitWidth()) { 645 V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt); 646 } else { 647 V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt); 648 } 649 if (AddrMode.Scale != 1) 650 V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy, 651 AddrMode.Scale), 652 "sunkaddr", InsertPt); 653 if (Result) 654 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 655 else 656 Result = V; 657 } 658 659 // Add in the BaseGV if present. 660 if (AddrMode.BaseGV) { 661 Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr", 662 InsertPt); 663 if (Result) 664 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 665 else 666 Result = V; 667 } 668 669 // Add in the Base Offset if present. 670 if (AddrMode.BaseOffs) { 671 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 672 if (Result) 673 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 674 else 675 Result = V; 676 } 677 678 if (Result == 0) 679 SunkAddr = Constant::getNullValue(Addr->getType()); 680 else 681 SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt); 682 } 683 684 MemoryInst->replaceUsesOfWith(Addr, SunkAddr); 685 686 if (Addr->use_empty()) 687 RecursivelyDeleteTriviallyDeadInstructions(Addr); 688 return true; 689} 690 691/// OptimizeInlineAsmInst - If there are any memory operands, use 692/// OptimizeMemoryInst to sink their address computing into the block when 693/// possible / profitable. 694bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, 695 DenseMap<Value*,Value*> &SunkAddrs) { 696 bool MadeChange = false; 697 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 698 699 // Do a prepass over the constraints, canonicalizing them, and building up the 700 // ConstraintOperands list. 701 std::vector<InlineAsm::ConstraintInfo> 702 ConstraintInfos = IA->ParseConstraints(); 703 704 /// ConstraintOperands - Information about all of the constraints. 705 std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands; 706 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 707 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) { 708 ConstraintOperands. 709 push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i])); 710 TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back(); 711 712 // Compute the value type for each operand. 713 switch (OpInfo.Type) { 714 case InlineAsm::isOutput: 715 if (OpInfo.isIndirect) 716 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 717 break; 718 case InlineAsm::isInput: 719 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 720 break; 721 case InlineAsm::isClobber: 722 // Nothing to do. 723 break; 724 } 725 726 // Compute the constraint code and ConstraintType to use. 727 TLI->ComputeConstraintToUse(OpInfo, SDValue(), 728 OpInfo.ConstraintType == TargetLowering::C_Memory); 729 730 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 731 OpInfo.isIndirect) { 732 Value *OpVal = OpInfo.CallOperandVal; 733 MadeChange |= OptimizeMemoryInst(I, OpVal, OpVal->getType(), SunkAddrs); 734 } 735 } 736 737 return MadeChange; 738} 739 740/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 741/// basic block as the load, unless conditions are unfavorable. This allows 742/// SelectionDAG to fold the extend into the load. 743/// 744bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 745 // Look for a load being extended. 746 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 747 if (!LI) return false; 748 749 // If they're already in the same block, there's nothing to do. 750 if (LI->getParent() == I->getParent()) 751 return false; 752 753 // If the load has other users and the truncate is not free, this probably 754 // isn't worthwhile. 755 if (!LI->hasOneUse() && 756 TLI && !TLI->isTruncateFree(I->getType(), LI->getType())) 757 return false; 758 759 // Check whether the target supports casts folded into loads. 760 unsigned LType; 761 if (isa<ZExtInst>(I)) 762 LType = ISD::ZEXTLOAD; 763 else { 764 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 765 LType = ISD::SEXTLOAD; 766 } 767 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 768 return false; 769 770 // Move the extend into the same block as the load, so that SelectionDAG 771 // can fold it. 772 I->removeFromParent(); 773 I->insertAfter(LI); 774 return true; 775} 776 777bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 778 BasicBlock *DefBB = I->getParent(); 779 780 // If both result of the {s|z}xt and its source are live out, rewrite all 781 // other uses of the source with result of extension. 782 Value *Src = I->getOperand(0); 783 if (Src->hasOneUse()) 784 return false; 785 786 // Only do this xform if truncating is free. 787 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 788 return false; 789 790 // Only safe to perform the optimization if the source is also defined in 791 // this block. 792 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 793 return false; 794 795 bool DefIsLiveOut = false; 796 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 797 UI != E; ++UI) { 798 Instruction *User = cast<Instruction>(*UI); 799 800 // Figure out which BB this ext is used in. 801 BasicBlock *UserBB = User->getParent(); 802 if (UserBB == DefBB) continue; 803 DefIsLiveOut = true; 804 break; 805 } 806 if (!DefIsLiveOut) 807 return false; 808 809 // Make sure non of the uses are PHI nodes. 810 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 811 UI != E; ++UI) { 812 Instruction *User = cast<Instruction>(*UI); 813 BasicBlock *UserBB = User->getParent(); 814 if (UserBB == DefBB) continue; 815 // Be conservative. We don't want this xform to end up introducing 816 // reloads just before load / store instructions. 817 if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User)) 818 return false; 819 } 820 821 // InsertedTruncs - Only insert one trunc in each block once. 822 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 823 824 bool MadeChange = false; 825 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 826 UI != E; ++UI) { 827 Use &TheUse = UI.getUse(); 828 Instruction *User = cast<Instruction>(*UI); 829 830 // Figure out which BB this ext is used in. 831 BasicBlock *UserBB = User->getParent(); 832 if (UserBB == DefBB) continue; 833 834 // Both src and def are live in this block. Rewrite the use. 835 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 836 837 if (!InsertedTrunc) { 838 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 839 840 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 841 } 842 843 // Replace a use of the {s|z}ext source with a use of the result. 844 TheUse = InsertedTrunc; 845 846 MadeChange = true; 847 } 848 849 return MadeChange; 850} 851 852// In this pass we look for GEP and cast instructions that are used 853// across basic blocks and rewrite them to improve basic-block-at-a-time 854// selection. 855bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 856 bool MadeChange = false; 857 858 // Split all critical edges where the dest block has a PHI. 859 TerminatorInst *BBTI = BB.getTerminator(); 860 if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) { 861 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) { 862 BasicBlock *SuccBB = BBTI->getSuccessor(i); 863 if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true)) 864 SplitEdgeNicely(BBTI, i, BackEdges, this); 865 } 866 } 867 868 // Keep track of non-local addresses that have been sunk into this block. 869 // This allows us to avoid inserting duplicate code for blocks with multiple 870 // load/stores of the same address. 871 DenseMap<Value*, Value*> SunkAddrs; 872 873 for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) { 874 Instruction *I = BBI++; 875 876 if (CastInst *CI = dyn_cast<CastInst>(I)) { 877 // If the source of the cast is a constant, then this should have 878 // already been constant folded. The only reason NOT to constant fold 879 // it is if something (e.g. LSR) was careful to place the constant 880 // evaluation in a block other than then one that uses it (e.g. to hoist 881 // the address of globals out of a loop). If this is the case, we don't 882 // want to forward-subst the cast. 883 if (isa<Constant>(CI->getOperand(0))) 884 continue; 885 886 bool Change = false; 887 if (TLI) { 888 Change = OptimizeNoopCopyExpression(CI, *TLI); 889 MadeChange |= Change; 890 } 891 892 if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) { 893 MadeChange |= MoveExtToFormExtLoad(I); 894 MadeChange |= OptimizeExtUses(I); 895 } 896 } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) { 897 MadeChange |= OptimizeCmpExpression(CI); 898 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 899 if (TLI) 900 MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(), 901 SunkAddrs); 902 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 903 if (TLI) 904 MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1), 905 SI->getOperand(0)->getType(), 906 SunkAddrs); 907 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 908 if (GEPI->hasAllZeroIndices()) { 909 /// The GEP operand must be a pointer, so must its result -> BitCast 910 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 911 GEPI->getName(), GEPI); 912 GEPI->replaceAllUsesWith(NC); 913 GEPI->eraseFromParent(); 914 MadeChange = true; 915 BBI = NC; 916 } 917 } else if (CallInst *CI = dyn_cast<CallInst>(I)) { 918 // If we found an inline asm expession, and if the target knows how to 919 // lower it to normal LLVM code, do so now. 920 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 921 if (TLI->ExpandInlineAsm(CI)) { 922 BBI = BB.begin(); 923 // Avoid processing instructions out of order, which could cause 924 // reuse before a value is defined. 925 SunkAddrs.clear(); 926 } else 927 // Sink address computing for memory operands into the block. 928 MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); 929 } 930 } 931 } 932 933 return MadeChange; 934} 935