CodeGenPrepare.cpp revision 58256f83c86d85df24874db0db78b0bc972d6258
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass munges the code in the input function to better prepare it for 11// SelectionDAG-based code generation. This works around limitations in it's 12// basic-block-at-a-time approach. It should eventually be removed. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "codegenprepare" 17#include "llvm/Transforms/Scalar.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/InlineAsm.h" 22#include "llvm/Instructions.h" 23#include "llvm/Pass.h" 24#include "llvm/Target/TargetAsmInfo.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/Target/TargetLowering.h" 27#include "llvm/Target/TargetMachine.h" 28#include "llvm/Transforms/Utils/AddrModeMatcher.h" 29#include "llvm/Transforms/Utils/BasicBlockUtils.h" 30#include "llvm/Transforms/Utils/Local.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallSet.h" 33#include "llvm/Assembly/Writer.h" 34#include "llvm/Support/CallSite.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/Compiler.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/GetElementPtrTypeIterator.h" 39#include "llvm/Support/PatternMatch.h" 40using namespace llvm; 41using namespace llvm::PatternMatch; 42 43static cl::opt<bool> FactorCommonPreds("split-critical-paths-tweak", 44 cl::init(false), cl::Hidden); 45 46namespace { 47 class VISIBILITY_HIDDEN CodeGenPrepare : public FunctionPass { 48 /// TLI - Keep a pointer of a TargetLowering to consult for determining 49 /// transformation profitability. 50 const TargetLowering *TLI; 51 52 /// BackEdges - Keep a set of all the loop back edges. 53 /// 54 SmallSet<std::pair<BasicBlock*,BasicBlock*>, 8> BackEdges; 55 public: 56 static char ID; // Pass identification, replacement for typeid 57 explicit CodeGenPrepare(const TargetLowering *tli = 0) 58 : FunctionPass(&ID), TLI(tli) {} 59 bool runOnFunction(Function &F); 60 61 private: 62 bool EliminateMostlyEmptyBlocks(Function &F); 63 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 64 void EliminateMostlyEmptyBlock(BasicBlock *BB); 65 bool OptimizeBlock(BasicBlock &BB); 66 bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy, 67 DenseMap<Value*,Value*> &SunkAddrs); 68 bool OptimizeInlineAsmInst(Instruction *I, CallSite CS, 69 DenseMap<Value*,Value*> &SunkAddrs); 70 bool OptimizeExtUses(Instruction *I); 71 void findLoopBackEdges(Function &F); 72 }; 73} 74 75char CodeGenPrepare::ID = 0; 76static RegisterPass<CodeGenPrepare> X("codegenprepare", 77 "Optimize for code generation"); 78 79FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { 80 return new CodeGenPrepare(TLI); 81} 82 83/// findLoopBackEdges - Do a DFS walk to find loop back edges. 84/// 85void CodeGenPrepare::findLoopBackEdges(Function &F) { 86 SmallPtrSet<BasicBlock*, 8> Visited; 87 SmallVector<std::pair<BasicBlock*, succ_iterator>, 8> VisitStack; 88 SmallPtrSet<BasicBlock*, 8> InStack; 89 90 BasicBlock *BB = &F.getEntryBlock(); 91 if (succ_begin(BB) == succ_end(BB)) 92 return; 93 Visited.insert(BB); 94 VisitStack.push_back(std::make_pair(BB, succ_begin(BB))); 95 InStack.insert(BB); 96 do { 97 std::pair<BasicBlock*, succ_iterator> &Top = VisitStack.back(); 98 BasicBlock *ParentBB = Top.first; 99 succ_iterator &I = Top.second; 100 101 bool FoundNew = false; 102 while (I != succ_end(ParentBB)) { 103 BB = *I++; 104 if (Visited.insert(BB)) { 105 FoundNew = true; 106 break; 107 } 108 // Successor is in VisitStack, it's a back edge. 109 if (InStack.count(BB)) 110 BackEdges.insert(std::make_pair(ParentBB, BB)); 111 } 112 113 if (FoundNew) { 114 // Go down one level if there is a unvisited successor. 115 InStack.insert(BB); 116 VisitStack.push_back(std::make_pair(BB, succ_begin(BB))); 117 } else { 118 // Go up one level. 119 std::pair<BasicBlock*, succ_iterator> &Pop = VisitStack.back(); 120 InStack.erase(Pop.first); 121 VisitStack.pop_back(); 122 } 123 } while (!VisitStack.empty()); 124} 125 126 127bool CodeGenPrepare::runOnFunction(Function &F) { 128 bool EverMadeChange = false; 129 130 // First pass, eliminate blocks that contain only PHI nodes and an 131 // unconditional branch. 132 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 133 134 // Now find loop back edges. 135 findLoopBackEdges(F); 136 137 bool MadeChange = true; 138 while (MadeChange) { 139 MadeChange = false; 140 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 141 MadeChange |= OptimizeBlock(*BB); 142 EverMadeChange |= MadeChange; 143 } 144 return EverMadeChange; 145} 146 147/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes 148/// and an unconditional branch. Passes before isel (e.g. LSR/loopsimplify) 149/// often split edges in ways that are non-optimal for isel. Start by 150/// eliminating these blocks so we can split them the way we want them. 151bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 152 bool MadeChange = false; 153 // Note that this intentionally skips the entry block. 154 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) { 155 BasicBlock *BB = I++; 156 157 // If this block doesn't end with an uncond branch, ignore it. 158 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 159 if (!BI || !BI->isUnconditional()) 160 continue; 161 162 // If the instruction before the branch isn't a phi node, then other stuff 163 // is happening here. 164 BasicBlock::iterator BBI = BI; 165 if (BBI != BB->begin()) { 166 --BBI; 167 if (!isa<PHINode>(BBI)) continue; 168 } 169 170 // Do not break infinite loops. 171 BasicBlock *DestBB = BI->getSuccessor(0); 172 if (DestBB == BB) 173 continue; 174 175 if (!CanMergeBlocks(BB, DestBB)) 176 continue; 177 178 EliminateMostlyEmptyBlock(BB); 179 MadeChange = true; 180 } 181 return MadeChange; 182} 183 184/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 185/// single uncond branch between them, and BB contains no other non-phi 186/// instructions. 187bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 188 const BasicBlock *DestBB) const { 189 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 190 // the successor. If there are more complex condition (e.g. preheaders), 191 // don't mess around with them. 192 BasicBlock::const_iterator BBI = BB->begin(); 193 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 194 for (Value::use_const_iterator UI = PN->use_begin(), E = PN->use_end(); 195 UI != E; ++UI) { 196 const Instruction *User = cast<Instruction>(*UI); 197 if (User->getParent() != DestBB || !isa<PHINode>(User)) 198 return false; 199 // If User is inside DestBB block and it is a PHINode then check 200 // incoming value. If incoming value is not from BB then this is 201 // a complex condition (e.g. preheaders) we want to avoid here. 202 if (User->getParent() == DestBB) { 203 if (const PHINode *UPN = dyn_cast<PHINode>(User)) 204 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 205 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 206 if (Insn && Insn->getParent() == BB && 207 Insn->getParent() != UPN->getIncomingBlock(I)) 208 return false; 209 } 210 } 211 } 212 } 213 214 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 215 // and DestBB may have conflicting incoming values for the block. If so, we 216 // can't merge the block. 217 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 218 if (!DestBBPN) return true; // no conflict. 219 220 // Collect the preds of BB. 221 SmallPtrSet<const BasicBlock*, 16> BBPreds; 222 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 223 // It is faster to get preds from a PHI than with pred_iterator. 224 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 225 BBPreds.insert(BBPN->getIncomingBlock(i)); 226 } else { 227 BBPreds.insert(pred_begin(BB), pred_end(BB)); 228 } 229 230 // Walk the preds of DestBB. 231 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 232 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 233 if (BBPreds.count(Pred)) { // Common predecessor? 234 BBI = DestBB->begin(); 235 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 236 const Value *V1 = PN->getIncomingValueForBlock(Pred); 237 const Value *V2 = PN->getIncomingValueForBlock(BB); 238 239 // If V2 is a phi node in BB, look up what the mapped value will be. 240 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 241 if (V2PN->getParent() == BB) 242 V2 = V2PN->getIncomingValueForBlock(Pred); 243 244 // If there is a conflict, bail out. 245 if (V1 != V2) return false; 246 } 247 } 248 } 249 250 return true; 251} 252 253 254/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 255/// an unconditional branch in it. 256void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 257 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 258 BasicBlock *DestBB = BI->getSuccessor(0); 259 260 DOUT << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; 261 262 // If the destination block has a single pred, then this is a trivial edge, 263 // just collapse it. 264 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 265 if (SinglePred != DestBB) { 266 // Remember if SinglePred was the entry block of the function. If so, we 267 // will need to move BB back to the entry position. 268 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 269 MergeBasicBlockIntoOnlyPred(DestBB); 270 271 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 272 BB->moveBefore(&BB->getParent()->getEntryBlock()); 273 274 DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; 275 return; 276 } 277 } 278 279 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 280 // to handle the new incoming edges it is about to have. 281 PHINode *PN; 282 for (BasicBlock::iterator BBI = DestBB->begin(); 283 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 284 // Remove the incoming value for BB, and remember it. 285 Value *InVal = PN->removeIncomingValue(BB, false); 286 287 // Two options: either the InVal is a phi node defined in BB or it is some 288 // value that dominates BB. 289 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 290 if (InValPhi && InValPhi->getParent() == BB) { 291 // Add all of the input values of the input PHI as inputs of this phi. 292 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 293 PN->addIncoming(InValPhi->getIncomingValue(i), 294 InValPhi->getIncomingBlock(i)); 295 } else { 296 // Otherwise, add one instance of the dominating value for each edge that 297 // we will be adding. 298 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 299 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 300 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 301 } else { 302 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 303 PN->addIncoming(InVal, *PI); 304 } 305 } 306 } 307 308 // The PHIs are now updated, change everything that refers to BB to use 309 // DestBB and remove BB. 310 BB->replaceAllUsesWith(DestBB); 311 BB->eraseFromParent(); 312 313 DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; 314} 315 316 317/// SplitEdgeNicely - Split the critical edge from TI to its specified 318/// successor if it will improve codegen. We only do this if the successor has 319/// phi nodes (otherwise critical edges are ok). If there is already another 320/// predecessor of the succ that is empty (and thus has no phi nodes), use it 321/// instead of introducing a new block. 322static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, 323 SmallSet<std::pair<BasicBlock*,BasicBlock*>, 8> &BackEdges, 324 Pass *P) { 325 BasicBlock *TIBB = TI->getParent(); 326 BasicBlock *Dest = TI->getSuccessor(SuccNum); 327 assert(isa<PHINode>(Dest->begin()) && 328 "This should only be called if Dest has a PHI!"); 329 330 // As a hack, never split backedges of loops. Even though the copy for any 331 // PHIs inserted on the backedge would be dead for exits from the loop, we 332 // assume that the cost of *splitting* the backedge would be too high. 333 if (BackEdges.count(std::make_pair(TIBB, Dest))) 334 return; 335 336 if (!FactorCommonPreds) { 337 /// TIPHIValues - This array is lazily computed to determine the values of 338 /// PHIs in Dest that TI would provide. 339 SmallVector<Value*, 32> TIPHIValues; 340 341 // Check to see if Dest has any blocks that can be used as a split edge for 342 // this terminator. 343 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { 344 BasicBlock *Pred = *PI; 345 // To be usable, the pred has to end with an uncond branch to the dest. 346 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator()); 347 if (!PredBr || !PredBr->isUnconditional() || 348 // Must be empty other than the branch. 349 &Pred->front() != PredBr || 350 // Cannot be the entry block; its label does not get emitted. 351 Pred == &(Dest->getParent()->getEntryBlock())) 352 continue; 353 354 // Finally, since we know that Dest has phi nodes in it, we have to make 355 // sure that jumping to Pred will have the same affect as going to Dest in 356 // terms of PHI values. 357 PHINode *PN; 358 unsigned PHINo = 0; 359 bool FoundMatch = true; 360 for (BasicBlock::iterator I = Dest->begin(); 361 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) { 362 if (PHINo == TIPHIValues.size()) 363 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); 364 365 // If the PHI entry doesn't work, we can't use this pred. 366 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { 367 FoundMatch = false; 368 break; 369 } 370 } 371 372 // If we found a workable predecessor, change TI to branch to Succ. 373 if (FoundMatch) { 374 Dest->removePredecessor(TIBB); 375 TI->setSuccessor(SuccNum, Pred); 376 return; 377 } 378 } 379 380 SplitCriticalEdge(TI, SuccNum, P, true); 381 return; 382 } 383 384 PHINode *PN; 385 SmallVector<Value*, 8> TIPHIValues; 386 for (BasicBlock::iterator I = Dest->begin(); 387 (PN = dyn_cast<PHINode>(I)); ++I) 388 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); 389 390 SmallVector<BasicBlock*, 8> IdenticalPreds; 391 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { 392 BasicBlock *Pred = *PI; 393 if (BackEdges.count(std::make_pair(Pred, Dest))) 394 continue; 395 if (PI == TIBB) 396 IdenticalPreds.push_back(Pred); 397 else { 398 bool Identical = true; 399 unsigned PHINo = 0; 400 for (BasicBlock::iterator I = Dest->begin(); 401 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) 402 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { 403 Identical = false; 404 break; 405 } 406 if (Identical) 407 IdenticalPreds.push_back(Pred); 408 } 409 } 410 411 assert(!IdenticalPreds.empty()); 412 SplitBlockPredecessors(Dest, &IdenticalPreds[0], IdenticalPreds.size(), 413 ".critedge", P); 414} 415 416 417/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 418/// copy (e.g. it's casting from one pointer type to another, int->uint, or 419/// int->sbyte on PPC), sink it into user blocks to reduce the number of virtual 420/// registers that must be created and coalesced. 421/// 422/// Return true if any changes are made. 423/// 424static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 425 // If this is a noop copy, 426 MVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 427 MVT DstVT = TLI.getValueType(CI->getType()); 428 429 // This is an fp<->int conversion? 430 if (SrcVT.isInteger() != DstVT.isInteger()) 431 return false; 432 433 // If this is an extension, it will be a zero or sign extension, which 434 // isn't a noop. 435 if (SrcVT.bitsLT(DstVT)) return false; 436 437 // If these values will be promoted, find out what they will be promoted 438 // to. This helps us consider truncates on PPC as noop copies when they 439 // are. 440 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 441 SrcVT = TLI.getTypeToTransformTo(SrcVT); 442 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 443 DstVT = TLI.getTypeToTransformTo(DstVT); 444 445 // If, after promotion, these are the same types, this is a noop copy. 446 if (SrcVT != DstVT) 447 return false; 448 449 BasicBlock *DefBB = CI->getParent(); 450 451 /// InsertedCasts - Only insert a cast in each block once. 452 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 453 454 bool MadeChange = false; 455 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 456 UI != E; ) { 457 Use &TheUse = UI.getUse(); 458 Instruction *User = cast<Instruction>(*UI); 459 460 // Figure out which BB this cast is used in. For PHI's this is the 461 // appropriate predecessor block. 462 BasicBlock *UserBB = User->getParent(); 463 if (PHINode *PN = dyn_cast<PHINode>(User)) { 464 UserBB = PN->getIncomingBlock(UI); 465 } 466 467 // Preincrement use iterator so we don't invalidate it. 468 ++UI; 469 470 // If this user is in the same block as the cast, don't change the cast. 471 if (UserBB == DefBB) continue; 472 473 // If we have already inserted a cast into this block, use it. 474 CastInst *&InsertedCast = InsertedCasts[UserBB]; 475 476 if (!InsertedCast) { 477 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 478 479 InsertedCast = 480 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 481 InsertPt); 482 MadeChange = true; 483 } 484 485 // Replace a use of the cast with a use of the new cast. 486 TheUse = InsertedCast; 487 } 488 489 // If we removed all uses, nuke the cast. 490 if (CI->use_empty()) { 491 CI->eraseFromParent(); 492 MadeChange = true; 493 } 494 495 return MadeChange; 496} 497 498/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 499/// the number of virtual registers that must be created and coalesced. This is 500/// a clear win except on targets with multiple condition code registers 501/// (PowerPC), where it might lose; some adjustment may be wanted there. 502/// 503/// Return true if any changes are made. 504static bool OptimizeCmpExpression(CmpInst *CI) { 505 BasicBlock *DefBB = CI->getParent(); 506 507 /// InsertedCmp - Only insert a cmp in each block once. 508 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 509 510 bool MadeChange = false; 511 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 512 UI != E; ) { 513 Use &TheUse = UI.getUse(); 514 Instruction *User = cast<Instruction>(*UI); 515 516 // Preincrement use iterator so we don't invalidate it. 517 ++UI; 518 519 // Don't bother for PHI nodes. 520 if (isa<PHINode>(User)) 521 continue; 522 523 // Figure out which BB this cmp is used in. 524 BasicBlock *UserBB = User->getParent(); 525 526 // If this user is in the same block as the cmp, don't change the cmp. 527 if (UserBB == DefBB) continue; 528 529 // If we have already inserted a cmp into this block, use it. 530 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 531 532 if (!InsertedCmp) { 533 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 534 535 InsertedCmp = 536 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), CI->getOperand(0), 537 CI->getOperand(1), "", InsertPt); 538 MadeChange = true; 539 } 540 541 // Replace a use of the cmp with a use of the new cmp. 542 TheUse = InsertedCmp; 543 } 544 545 // If we removed all uses, nuke the cmp. 546 if (CI->use_empty()) 547 CI->eraseFromParent(); 548 549 return MadeChange; 550} 551 552//===----------------------------------------------------------------------===// 553// Addressing Mode Analysis and Optimization 554//===----------------------------------------------------------------------===// 555 556//===----------------------------------------------------------------------===// 557// Memory Optimization 558//===----------------------------------------------------------------------===// 559 560/// IsNonLocalValue - Return true if the specified values are defined in a 561/// different basic block than BB. 562static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 563 if (Instruction *I = dyn_cast<Instruction>(V)) 564 return I->getParent() != BB; 565 return false; 566} 567 568/// OptimizeMemoryInst - Load and Store Instructions have often have 569/// addressing modes that can do significant amounts of computation. As such, 570/// instruction selection will try to get the load or store to do as much 571/// computation as possible for the program. The problem is that isel can only 572/// see within a single block. As such, we sink as much legal addressing mode 573/// stuff into the block as possible. 574/// 575/// This method is used to optimize both load/store and inline asms with memory 576/// operands. 577bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 578 const Type *AccessTy, 579 DenseMap<Value*,Value*> &SunkAddrs) { 580 // Figure out what addressing mode will be built up for this operation. 581 SmallVector<Instruction*, 16> AddrModeInsts; 582 ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst, 583 AddrModeInsts, *TLI); 584 585 // Check to see if any of the instructions supersumed by this addr mode are 586 // non-local to I's BB. 587 bool AnyNonLocal = false; 588 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 589 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 590 AnyNonLocal = true; 591 break; 592 } 593 } 594 595 // If all the instructions matched are already in this BB, don't do anything. 596 if (!AnyNonLocal) { 597 DEBUG(cerr << "CGP: Found local addrmode: " << AddrMode << "\n"); 598 return false; 599 } 600 601 // Insert this computation right after this user. Since our caller is 602 // scanning from the top of the BB to the bottom, reuse of the expr are 603 // guaranteed to happen later. 604 BasicBlock::iterator InsertPt = MemoryInst; 605 606 // Now that we determined the addressing expression we want to use and know 607 // that we have to sink it into this block. Check to see if we have already 608 // done this for some other load/store instr in this block. If so, reuse the 609 // computation. 610 Value *&SunkAddr = SunkAddrs[Addr]; 611 if (SunkAddr) { 612 DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 613 << *MemoryInst); 614 if (SunkAddr->getType() != Addr->getType()) 615 SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt); 616 } else { 617 DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 618 << *MemoryInst); 619 const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType(); 620 621 Value *Result = 0; 622 // Start with the scale value. 623 if (AddrMode.Scale) { 624 Value *V = AddrMode.ScaledReg; 625 if (V->getType() == IntPtrTy) { 626 // done. 627 } else if (isa<PointerType>(V->getType())) { 628 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 629 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 630 cast<IntegerType>(V->getType())->getBitWidth()) { 631 V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt); 632 } else { 633 V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt); 634 } 635 if (AddrMode.Scale != 1) 636 V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy, 637 AddrMode.Scale), 638 "sunkaddr", InsertPt); 639 Result = V; 640 } 641 642 // Add in the base register. 643 if (AddrMode.BaseReg) { 644 Value *V = AddrMode.BaseReg; 645 if (V->getType() != IntPtrTy) 646 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 647 if (Result) 648 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 649 else 650 Result = V; 651 } 652 653 // Add in the BaseGV if present. 654 if (AddrMode.BaseGV) { 655 Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr", 656 InsertPt); 657 if (Result) 658 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 659 else 660 Result = V; 661 } 662 663 // Add in the Base Offset if present. 664 if (AddrMode.BaseOffs) { 665 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 666 if (Result) 667 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 668 else 669 Result = V; 670 } 671 672 if (Result == 0) 673 SunkAddr = Constant::getNullValue(Addr->getType()); 674 else 675 SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt); 676 } 677 678 MemoryInst->replaceUsesOfWith(Addr, SunkAddr); 679 680 if (Addr->use_empty()) 681 RecursivelyDeleteTriviallyDeadInstructions(Addr); 682 return true; 683} 684 685/// OptimizeInlineAsmInst - If there are any memory operands, use 686/// OptimizeMemoryInst to sink their address computing into the block when 687/// possible / profitable. 688bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, 689 DenseMap<Value*,Value*> &SunkAddrs) { 690 bool MadeChange = false; 691 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 692 693 // Do a prepass over the constraints, canonicalizing them, and building up the 694 // ConstraintOperands list. 695 std::vector<InlineAsm::ConstraintInfo> 696 ConstraintInfos = IA->ParseConstraints(); 697 698 /// ConstraintOperands - Information about all of the constraints. 699 std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands; 700 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 701 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) { 702 ConstraintOperands. 703 push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i])); 704 TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back(); 705 706 // Compute the value type for each operand. 707 switch (OpInfo.Type) { 708 case InlineAsm::isOutput: 709 if (OpInfo.isIndirect) 710 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 711 break; 712 case InlineAsm::isInput: 713 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 714 break; 715 case InlineAsm::isClobber: 716 // Nothing to do. 717 break; 718 } 719 720 // Compute the constraint code and ConstraintType to use. 721 TLI->ComputeConstraintToUse(OpInfo, SDValue(), 722 OpInfo.ConstraintType == TargetLowering::C_Memory); 723 724 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 725 OpInfo.isIndirect) { 726 Value *OpVal = OpInfo.CallOperandVal; 727 MadeChange |= OptimizeMemoryInst(I, OpVal, OpVal->getType(), SunkAddrs); 728 } 729 } 730 731 return MadeChange; 732} 733 734bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 735 BasicBlock *DefBB = I->getParent(); 736 737 // If both result of the {s|z}xt and its source are live out, rewrite all 738 // other uses of the source with result of extension. 739 Value *Src = I->getOperand(0); 740 if (Src->hasOneUse()) 741 return false; 742 743 // Only do this xform if truncating is free. 744 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 745 return false; 746 747 // Only safe to perform the optimization if the source is also defined in 748 // this block. 749 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 750 return false; 751 752 bool DefIsLiveOut = false; 753 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 754 UI != E; ++UI) { 755 Instruction *User = cast<Instruction>(*UI); 756 757 // Figure out which BB this ext is used in. 758 BasicBlock *UserBB = User->getParent(); 759 if (UserBB == DefBB) continue; 760 DefIsLiveOut = true; 761 break; 762 } 763 if (!DefIsLiveOut) 764 return false; 765 766 // Make sure non of the uses are PHI nodes. 767 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 768 UI != E; ++UI) { 769 Instruction *User = cast<Instruction>(*UI); 770 BasicBlock *UserBB = User->getParent(); 771 if (UserBB == DefBB) continue; 772 // Be conservative. We don't want this xform to end up introducing 773 // reloads just before load / store instructions. 774 if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User)) 775 return false; 776 } 777 778 // InsertedTruncs - Only insert one trunc in each block once. 779 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 780 781 bool MadeChange = false; 782 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 783 UI != E; ++UI) { 784 Use &TheUse = UI.getUse(); 785 Instruction *User = cast<Instruction>(*UI); 786 787 // Figure out which BB this ext is used in. 788 BasicBlock *UserBB = User->getParent(); 789 if (UserBB == DefBB) continue; 790 791 // Both src and def are live in this block. Rewrite the use. 792 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 793 794 if (!InsertedTrunc) { 795 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 796 797 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 798 } 799 800 // Replace a use of the {s|z}ext source with a use of the result. 801 TheUse = InsertedTrunc; 802 803 MadeChange = true; 804 } 805 806 return MadeChange; 807} 808 809// In this pass we look for GEP and cast instructions that are used 810// across basic blocks and rewrite them to improve basic-block-at-a-time 811// selection. 812bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 813 bool MadeChange = false; 814 815 // Split all critical edges where the dest block has a PHI. 816 TerminatorInst *BBTI = BB.getTerminator(); 817 if (BBTI->getNumSuccessors() > 1) { 818 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) { 819 BasicBlock *SuccBB = BBTI->getSuccessor(i); 820 if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true)) 821 SplitEdgeNicely(BBTI, i, BackEdges, this); 822 } 823 } 824 825 // Keep track of non-local addresses that have been sunk into this block. 826 // This allows us to avoid inserting duplicate code for blocks with multiple 827 // load/stores of the same address. 828 DenseMap<Value*, Value*> SunkAddrs; 829 830 for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) { 831 Instruction *I = BBI++; 832 833 if (CastInst *CI = dyn_cast<CastInst>(I)) { 834 // If the source of the cast is a constant, then this should have 835 // already been constant folded. The only reason NOT to constant fold 836 // it is if something (e.g. LSR) was careful to place the constant 837 // evaluation in a block other than then one that uses it (e.g. to hoist 838 // the address of globals out of a loop). If this is the case, we don't 839 // want to forward-subst the cast. 840 if (isa<Constant>(CI->getOperand(0))) 841 continue; 842 843 bool Change = false; 844 if (TLI) { 845 Change = OptimizeNoopCopyExpression(CI, *TLI); 846 MadeChange |= Change; 847 } 848 849 if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) 850 MadeChange |= OptimizeExtUses(I); 851 } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) { 852 MadeChange |= OptimizeCmpExpression(CI); 853 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 854 if (TLI) 855 MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(), 856 SunkAddrs); 857 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 858 if (TLI) 859 MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1), 860 SI->getOperand(0)->getType(), 861 SunkAddrs); 862 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 863 if (GEPI->hasAllZeroIndices()) { 864 /// The GEP operand must be a pointer, so must its result -> BitCast 865 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 866 GEPI->getName(), GEPI); 867 GEPI->replaceAllUsesWith(NC); 868 GEPI->eraseFromParent(); 869 MadeChange = true; 870 BBI = NC; 871 } 872 } else if (CallInst *CI = dyn_cast<CallInst>(I)) { 873 // If we found an inline asm expession, and if the target knows how to 874 // lower it to normal LLVM code, do so now. 875 if (TLI && isa<InlineAsm>(CI->getCalledValue())) 876 if (const TargetAsmInfo *TAI = 877 TLI->getTargetMachine().getTargetAsmInfo()) { 878 if (TAI->ExpandInlineAsm(CI)) { 879 BBI = BB.begin(); 880 // Avoid processing instructions out of order, which could cause 881 // reuse before a value is defined. 882 SunkAddrs.clear(); 883 } else 884 // Sink address computing for memory operands into the block. 885 MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); 886 } 887 } 888 } 889 890 return MadeChange; 891} 892