CodeGenPrepare.cpp revision ae73dc1448d25b02cabc7c64c86c64371453dda8
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass munges the code in the input function to better prepare it for 11// SelectionDAG-based code generation. This works around limitations in it's 12// basic-block-at-a-time approach. It should eventually be removed. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "codegenprepare" 17#include "llvm/Transforms/Scalar.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/InlineAsm.h" 22#include "llvm/Instructions.h" 23#include "llvm/Pass.h" 24#include "llvm/Target/TargetAsmInfo.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/Target/TargetLowering.h" 27#include "llvm/Target/TargetMachine.h" 28#include "llvm/Transforms/Utils/BasicBlockUtils.h" 29#include "llvm/Transforms/Utils/Local.h" 30#include "llvm/ADT/DenseMap.h" 31#include "llvm/ADT/SmallSet.h" 32#include "llvm/Support/CallSite.h" 33#include "llvm/Support/Compiler.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36using namespace llvm; 37 38namespace { 39 class VISIBILITY_HIDDEN CodeGenPrepare : public FunctionPass { 40 /// TLI - Keep a pointer of a TargetLowering to consult for determining 41 /// transformation profitability. 42 const TargetLowering *TLI; 43 public: 44 static char ID; // Pass identification, replacement for typeid 45 explicit CodeGenPrepare(const TargetLowering *tli = 0) 46 : FunctionPass(&ID), TLI(tli) {} 47 bool runOnFunction(Function &F); 48 49 private: 50 bool EliminateMostlyEmptyBlocks(Function &F); 51 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 52 void EliminateMostlyEmptyBlock(BasicBlock *BB); 53 bool OptimizeBlock(BasicBlock &BB); 54 bool OptimizeLoadStoreInst(Instruction *I, Value *Addr, 55 const Type *AccessTy, 56 DenseMap<Value*,Value*> &SunkAddrs); 57 bool OptimizeInlineAsmInst(Instruction *I, CallSite CS, 58 DenseMap<Value*,Value*> &SunkAddrs); 59 bool OptimizeExtUses(Instruction *I); 60 }; 61} 62 63char CodeGenPrepare::ID = 0; 64static RegisterPass<CodeGenPrepare> X("codegenprepare", 65 "Optimize for code generation"); 66 67FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { 68 return new CodeGenPrepare(TLI); 69} 70 71 72bool CodeGenPrepare::runOnFunction(Function &F) { 73 bool EverMadeChange = false; 74 75 // First pass, eliminate blocks that contain only PHI nodes and an 76 // unconditional branch. 77 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 78 79 bool MadeChange = true; 80 while (MadeChange) { 81 MadeChange = false; 82 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 83 MadeChange |= OptimizeBlock(*BB); 84 EverMadeChange |= MadeChange; 85 } 86 return EverMadeChange; 87} 88 89/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes 90/// and an unconditional branch. Passes before isel (e.g. LSR/loopsimplify) 91/// often split edges in ways that are non-optimal for isel. Start by 92/// eliminating these blocks so we can split them the way we want them. 93bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 94 bool MadeChange = false; 95 // Note that this intentionally skips the entry block. 96 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) { 97 BasicBlock *BB = I++; 98 99 // If this block doesn't end with an uncond branch, ignore it. 100 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 101 if (!BI || !BI->isUnconditional()) 102 continue; 103 104 // If the instruction before the branch isn't a phi node, then other stuff 105 // is happening here. 106 BasicBlock::iterator BBI = BI; 107 if (BBI != BB->begin()) { 108 --BBI; 109 if (!isa<PHINode>(BBI)) continue; 110 } 111 112 // Do not break infinite loops. 113 BasicBlock *DestBB = BI->getSuccessor(0); 114 if (DestBB == BB) 115 continue; 116 117 if (!CanMergeBlocks(BB, DestBB)) 118 continue; 119 120 EliminateMostlyEmptyBlock(BB); 121 MadeChange = true; 122 } 123 return MadeChange; 124} 125 126/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 127/// single uncond branch between them, and BB contains no other non-phi 128/// instructions. 129bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 130 const BasicBlock *DestBB) const { 131 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 132 // the successor. If there are more complex condition (e.g. preheaders), 133 // don't mess around with them. 134 BasicBlock::const_iterator BBI = BB->begin(); 135 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 136 for (Value::use_const_iterator UI = PN->use_begin(), E = PN->use_end(); 137 UI != E; ++UI) { 138 const Instruction *User = cast<Instruction>(*UI); 139 if (User->getParent() != DestBB || !isa<PHINode>(User)) 140 return false; 141 // If User is inside DestBB block and it is a PHINode then check 142 // incoming value. If incoming value is not from BB then this is 143 // a complex condition (e.g. preheaders) we want to avoid here. 144 if (User->getParent() == DestBB) { 145 if (const PHINode *UPN = dyn_cast<PHINode>(User)) 146 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 147 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 148 if (Insn && Insn->getParent() == BB && 149 Insn->getParent() != UPN->getIncomingBlock(I)) 150 return false; 151 } 152 } 153 } 154 } 155 156 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 157 // and DestBB may have conflicting incoming values for the block. If so, we 158 // can't merge the block. 159 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 160 if (!DestBBPN) return true; // no conflict. 161 162 // Collect the preds of BB. 163 SmallPtrSet<const BasicBlock*, 16> BBPreds; 164 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 165 // It is faster to get preds from a PHI than with pred_iterator. 166 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 167 BBPreds.insert(BBPN->getIncomingBlock(i)); 168 } else { 169 BBPreds.insert(pred_begin(BB), pred_end(BB)); 170 } 171 172 // Walk the preds of DestBB. 173 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 174 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 175 if (BBPreds.count(Pred)) { // Common predecessor? 176 BBI = DestBB->begin(); 177 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 178 const Value *V1 = PN->getIncomingValueForBlock(Pred); 179 const Value *V2 = PN->getIncomingValueForBlock(BB); 180 181 // If V2 is a phi node in BB, look up what the mapped value will be. 182 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 183 if (V2PN->getParent() == BB) 184 V2 = V2PN->getIncomingValueForBlock(Pred); 185 186 // If there is a conflict, bail out. 187 if (V1 != V2) return false; 188 } 189 } 190 } 191 192 return true; 193} 194 195 196/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 197/// an unconditional branch in it. 198void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 199 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 200 BasicBlock *DestBB = BI->getSuccessor(0); 201 202 DOUT << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; 203 204 // If the destination block has a single pred, then this is a trivial edge, 205 // just collapse it. 206 if (DestBB->getSinglePredecessor()) { 207 // If DestBB has single-entry PHI nodes, fold them. 208 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 209 PN->replaceAllUsesWith(PN->getIncomingValue(0)); 210 PN->eraseFromParent(); 211 } 212 213 // Splice all the PHI nodes from BB over to DestBB. 214 DestBB->getInstList().splice(DestBB->begin(), BB->getInstList(), 215 BB->begin(), BI); 216 217 // Anything that branched to BB now branches to DestBB. 218 BB->replaceAllUsesWith(DestBB); 219 220 // Nuke BB. 221 BB->eraseFromParent(); 222 223 DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; 224 return; 225 } 226 227 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 228 // to handle the new incoming edges it is about to have. 229 PHINode *PN; 230 for (BasicBlock::iterator BBI = DestBB->begin(); 231 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 232 // Remove the incoming value for BB, and remember it. 233 Value *InVal = PN->removeIncomingValue(BB, false); 234 235 // Two options: either the InVal is a phi node defined in BB or it is some 236 // value that dominates BB. 237 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 238 if (InValPhi && InValPhi->getParent() == BB) { 239 // Add all of the input values of the input PHI as inputs of this phi. 240 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 241 PN->addIncoming(InValPhi->getIncomingValue(i), 242 InValPhi->getIncomingBlock(i)); 243 } else { 244 // Otherwise, add one instance of the dominating value for each edge that 245 // we will be adding. 246 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 247 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 248 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 249 } else { 250 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 251 PN->addIncoming(InVal, *PI); 252 } 253 } 254 } 255 256 // The PHIs are now updated, change everything that refers to BB to use 257 // DestBB and remove BB. 258 BB->replaceAllUsesWith(DestBB); 259 BB->eraseFromParent(); 260 261 DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; 262} 263 264 265/// SplitEdgeNicely - Split the critical edge from TI to its specified 266/// successor if it will improve codegen. We only do this if the successor has 267/// phi nodes (otherwise critical edges are ok). If there is already another 268/// predecessor of the succ that is empty (and thus has no phi nodes), use it 269/// instead of introducing a new block. 270static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) { 271 BasicBlock *TIBB = TI->getParent(); 272 BasicBlock *Dest = TI->getSuccessor(SuccNum); 273 assert(isa<PHINode>(Dest->begin()) && 274 "This should only be called if Dest has a PHI!"); 275 276 // As a hack, never split backedges of loops. Even though the copy for any 277 // PHIs inserted on the backedge would be dead for exits from the loop, we 278 // assume that the cost of *splitting* the backedge would be too high. 279 if (Dest == TIBB) 280 return; 281 282 /// TIPHIValues - This array is lazily computed to determine the values of 283 /// PHIs in Dest that TI would provide. 284 SmallVector<Value*, 32> TIPHIValues; 285 286 // Check to see if Dest has any blocks that can be used as a split edge for 287 // this terminator. 288 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { 289 BasicBlock *Pred = *PI; 290 // To be usable, the pred has to end with an uncond branch to the dest. 291 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator()); 292 if (!PredBr || !PredBr->isUnconditional() || 293 // Must be empty other than the branch. 294 &Pred->front() != PredBr || 295 // Cannot be the entry block; its label does not get emitted. 296 Pred == &(Dest->getParent()->getEntryBlock())) 297 continue; 298 299 // Finally, since we know that Dest has phi nodes in it, we have to make 300 // sure that jumping to Pred will have the same affect as going to Dest in 301 // terms of PHI values. 302 PHINode *PN; 303 unsigned PHINo = 0; 304 bool FoundMatch = true; 305 for (BasicBlock::iterator I = Dest->begin(); 306 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) { 307 if (PHINo == TIPHIValues.size()) 308 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); 309 310 // If the PHI entry doesn't work, we can't use this pred. 311 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { 312 FoundMatch = false; 313 break; 314 } 315 } 316 317 // If we found a workable predecessor, change TI to branch to Succ. 318 if (FoundMatch) { 319 Dest->removePredecessor(TIBB); 320 TI->setSuccessor(SuccNum, Pred); 321 return; 322 } 323 } 324 325 SplitCriticalEdge(TI, SuccNum, P, true); 326} 327 328/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 329/// copy (e.g. it's casting from one pointer type to another, int->uint, or 330/// int->sbyte on PPC), sink it into user blocks to reduce the number of virtual 331/// registers that must be created and coalesced. 332/// 333/// Return true if any changes are made. 334static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 335 // If this is a noop copy, 336 MVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 337 MVT DstVT = TLI.getValueType(CI->getType()); 338 339 // This is an fp<->int conversion? 340 if (SrcVT.isInteger() != DstVT.isInteger()) 341 return false; 342 343 // If this is an extension, it will be a zero or sign extension, which 344 // isn't a noop. 345 if (SrcVT.bitsLT(DstVT)) return false; 346 347 // If these values will be promoted, find out what they will be promoted 348 // to. This helps us consider truncates on PPC as noop copies when they 349 // are. 350 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 351 SrcVT = TLI.getTypeToTransformTo(SrcVT); 352 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 353 DstVT = TLI.getTypeToTransformTo(DstVT); 354 355 // If, after promotion, these are the same types, this is a noop copy. 356 if (SrcVT != DstVT) 357 return false; 358 359 BasicBlock *DefBB = CI->getParent(); 360 361 /// InsertedCasts - Only insert a cast in each block once. 362 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 363 364 bool MadeChange = false; 365 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 366 UI != E; ) { 367 Use &TheUse = UI.getUse(); 368 Instruction *User = cast<Instruction>(*UI); 369 370 // Figure out which BB this cast is used in. For PHI's this is the 371 // appropriate predecessor block. 372 BasicBlock *UserBB = User->getParent(); 373 if (PHINode *PN = dyn_cast<PHINode>(User)) { 374 unsigned OpVal = UI.getOperandNo()/2; 375 UserBB = PN->getIncomingBlock(OpVal); 376 } 377 378 // Preincrement use iterator so we don't invalidate it. 379 ++UI; 380 381 // If this user is in the same block as the cast, don't change the cast. 382 if (UserBB == DefBB) continue; 383 384 // If we have already inserted a cast into this block, use it. 385 CastInst *&InsertedCast = InsertedCasts[UserBB]; 386 387 if (!InsertedCast) { 388 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 389 390 InsertedCast = 391 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 392 InsertPt); 393 MadeChange = true; 394 } 395 396 // Replace a use of the cast with a use of the new cast. 397 TheUse = InsertedCast; 398 } 399 400 // If we removed all uses, nuke the cast. 401 if (CI->use_empty()) { 402 CI->eraseFromParent(); 403 MadeChange = true; 404 } 405 406 return MadeChange; 407} 408 409/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 410/// the number of virtual registers that must be created and coalesced. This is 411/// a clear win except on targets with multiple condition code registers 412/// (PowerPC), where it might lose; some adjustment may be wanted there. 413/// 414/// Return true if any changes are made. 415static bool OptimizeCmpExpression(CmpInst *CI){ 416 417 BasicBlock *DefBB = CI->getParent(); 418 419 /// InsertedCmp - Only insert a cmp in each block once. 420 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 421 422 bool MadeChange = false; 423 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 424 UI != E; ) { 425 Use &TheUse = UI.getUse(); 426 Instruction *User = cast<Instruction>(*UI); 427 428 // Preincrement use iterator so we don't invalidate it. 429 ++UI; 430 431 // Don't bother for PHI nodes. 432 if (isa<PHINode>(User)) 433 continue; 434 435 // Figure out which BB this cmp is used in. 436 BasicBlock *UserBB = User->getParent(); 437 438 // If this user is in the same block as the cmp, don't change the cmp. 439 if (UserBB == DefBB) continue; 440 441 // If we have already inserted a cmp into this block, use it. 442 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 443 444 if (!InsertedCmp) { 445 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 446 447 InsertedCmp = 448 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), CI->getOperand(0), 449 CI->getOperand(1), "", InsertPt); 450 MadeChange = true; 451 } 452 453 // Replace a use of the cmp with a use of the new cmp. 454 TheUse = InsertedCmp; 455 } 456 457 // If we removed all uses, nuke the cmp. 458 if (CI->use_empty()) 459 CI->eraseFromParent(); 460 461 return MadeChange; 462} 463 464/// EraseDeadInstructions - Erase any dead instructions 465static void EraseDeadInstructions(Value *V) { 466 Instruction *I = dyn_cast<Instruction>(V); 467 if (!I || !I->use_empty()) return; 468 469 SmallPtrSet<Instruction*, 16> Insts; 470 Insts.insert(I); 471 472 while (!Insts.empty()) { 473 I = *Insts.begin(); 474 Insts.erase(I); 475 if (isInstructionTriviallyDead(I)) { 476 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) 477 if (Instruction *U = dyn_cast<Instruction>(I->getOperand(i))) 478 Insts.insert(U); 479 I->eraseFromParent(); 480 } 481 } 482} 483 484namespace { 485 486/// ExtAddrMode - This is an extended version of TargetLowering::AddrMode which 487/// holds actual Value*'s for register values. 488struct ExtAddrMode : public TargetLowering::AddrMode { 489 Value *BaseReg; 490 Value *ScaledReg; 491 ExtAddrMode() : BaseReg(0), ScaledReg(0) {} 492 void dump() const; 493}; 494 495static std::ostream &operator<<(std::ostream &OS, const ExtAddrMode &AM) { 496 bool NeedPlus = false; 497 OS << "["; 498 if (AM.BaseGV) 499 OS << (NeedPlus ? " + " : "") 500 << "GV:%" << AM.BaseGV->getName(), NeedPlus = true; 501 502 if (AM.BaseOffs) 503 OS << (NeedPlus ? " + " : "") << AM.BaseOffs, NeedPlus = true; 504 505 if (AM.BaseReg) 506 OS << (NeedPlus ? " + " : "") 507 << "Base:%" << AM.BaseReg->getName(), NeedPlus = true; 508 if (AM.Scale) 509 OS << (NeedPlus ? " + " : "") 510 << AM.Scale << "*%" << AM.ScaledReg->getName(), NeedPlus = true; 511 512 return OS << "]"; 513} 514 515void ExtAddrMode::dump() const { 516 cerr << *this << "\n"; 517} 518 519} 520 521static bool TryMatchingScaledValue(Value *ScaleReg, int64_t Scale, 522 const Type *AccessTy, ExtAddrMode &AddrMode, 523 SmallVector<Instruction*, 16> &AddrModeInsts, 524 const TargetLowering &TLI, unsigned Depth); 525 526/// FindMaximalLegalAddressingMode - If we can, try to merge the computation of 527/// Addr into the specified addressing mode. If Addr can't be added to AddrMode 528/// this returns false. This assumes that Addr is either a pointer type or 529/// intptr_t for the target. 530static bool FindMaximalLegalAddressingMode(Value *Addr, const Type *AccessTy, 531 ExtAddrMode &AddrMode, 532 SmallVector<Instruction*, 16> &AddrModeInsts, 533 const TargetLowering &TLI, 534 unsigned Depth) { 535 536 // If this is a global variable, fold it into the addressing mode if possible. 537 if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 538 if (AddrMode.BaseGV == 0) { 539 AddrMode.BaseGV = GV; 540 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 541 return true; 542 AddrMode.BaseGV = 0; 543 } 544 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 545 AddrMode.BaseOffs += CI->getSExtValue(); 546 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 547 return true; 548 AddrMode.BaseOffs -= CI->getSExtValue(); 549 } else if (isa<ConstantPointerNull>(Addr)) { 550 return true; 551 } 552 553 // Look through constant exprs and instructions. 554 unsigned Opcode = ~0U; 555 User *AddrInst = 0; 556 if (Instruction *I = dyn_cast<Instruction>(Addr)) { 557 Opcode = I->getOpcode(); 558 AddrInst = I; 559 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 560 Opcode = CE->getOpcode(); 561 AddrInst = CE; 562 } 563 564 // Limit recursion to avoid exponential behavior. 565 if (Depth == 5) { AddrInst = 0; Opcode = ~0U; } 566 567 // If this is really an instruction, add it to our list of related 568 // instructions. 569 if (Instruction *I = dyn_cast_or_null<Instruction>(AddrInst)) 570 AddrModeInsts.push_back(I); 571 572 switch (Opcode) { 573 case Instruction::PtrToInt: 574 // PtrToInt is always a noop, as we know that the int type is pointer sized. 575 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy, 576 AddrMode, AddrModeInsts, TLI, Depth)) 577 return true; 578 break; 579 case Instruction::IntToPtr: 580 // This inttoptr is a no-op if the integer type is pointer sized. 581 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == 582 TLI.getPointerTy()) { 583 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy, 584 AddrMode, AddrModeInsts, TLI, Depth)) 585 return true; 586 } 587 break; 588 case Instruction::Add: { 589 // Check to see if we can merge in the RHS then the LHS. If so, we win. 590 ExtAddrMode BackupAddrMode = AddrMode; 591 unsigned OldSize = AddrModeInsts.size(); 592 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(1), AccessTy, 593 AddrMode, AddrModeInsts, TLI, Depth+1) && 594 FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy, 595 AddrMode, AddrModeInsts, TLI, Depth+1)) 596 return true; 597 598 // Restore the old addr mode info. 599 AddrMode = BackupAddrMode; 600 AddrModeInsts.resize(OldSize); 601 602 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 603 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy, 604 AddrMode, AddrModeInsts, TLI, Depth+1) && 605 FindMaximalLegalAddressingMode(AddrInst->getOperand(1), AccessTy, 606 AddrMode, AddrModeInsts, TLI, Depth+1)) 607 return true; 608 609 // Otherwise we definitely can't merge the ADD in. 610 AddrMode = BackupAddrMode; 611 AddrModeInsts.resize(OldSize); 612 break; 613 } 614 case Instruction::Or: { 615 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 616 if (!RHS) break; 617 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 618 break; 619 } 620 case Instruction::Mul: 621 case Instruction::Shl: { 622 // Can only handle X*C and X << C, and can only handle this when the scale 623 // field is available. 624 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 625 if (!RHS) break; 626 int64_t Scale = RHS->getSExtValue(); 627 if (Opcode == Instruction::Shl) 628 Scale = 1 << Scale; 629 630 if (TryMatchingScaledValue(AddrInst->getOperand(0), Scale, AccessTy, 631 AddrMode, AddrModeInsts, TLI, Depth)) 632 return true; 633 break; 634 } 635 case Instruction::GetElementPtr: { 636 // Scan the GEP. We check it if it contains constant offsets and at most 637 // one variable offset. 638 int VariableOperand = -1; 639 unsigned VariableScale = 0; 640 641 int64_t ConstantOffset = 0; 642 const TargetData *TD = TLI.getTargetData(); 643 gep_type_iterator GTI = gep_type_begin(AddrInst); 644 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 645 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 646 const StructLayout *SL = TD->getStructLayout(STy); 647 unsigned Idx = 648 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 649 ConstantOffset += SL->getElementOffset(Idx); 650 } else { 651 uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType()); 652 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 653 ConstantOffset += CI->getSExtValue()*TypeSize; 654 } else if (TypeSize) { // Scales of zero don't do anything. 655 // We only allow one variable index at the moment. 656 if (VariableOperand != -1) { 657 VariableOperand = -2; 658 break; 659 } 660 661 // Remember the variable index. 662 VariableOperand = i; 663 VariableScale = TypeSize; 664 } 665 } 666 } 667 668 // If the GEP had multiple variable indices, punt. 669 if (VariableOperand == -2) 670 break; 671 672 // A common case is for the GEP to only do a constant offset. In this case, 673 // just add it to the disp field and check validity. 674 if (VariableOperand == -1) { 675 AddrMode.BaseOffs += ConstantOffset; 676 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ 677 // Check to see if we can fold the base pointer in too. 678 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy, 679 AddrMode, AddrModeInsts, TLI, 680 Depth+1)) 681 return true; 682 } 683 AddrMode.BaseOffs -= ConstantOffset; 684 } else { 685 // Check that this has no base reg yet. If so, we won't have a place to 686 // put the base of the GEP (assuming it is not a null ptr). 687 bool SetBaseReg = false; 688 if (AddrMode.HasBaseReg) { 689 if (!isa<ConstantPointerNull>(AddrInst->getOperand(0))) 690 break; 691 } else { 692 AddrMode.HasBaseReg = true; 693 AddrMode.BaseReg = AddrInst->getOperand(0); 694 SetBaseReg = true; 695 } 696 697 // See if the scale amount is valid for this target. 698 AddrMode.BaseOffs += ConstantOffset; 699 if (TryMatchingScaledValue(AddrInst->getOperand(VariableOperand), 700 VariableScale, AccessTy, AddrMode, 701 AddrModeInsts, TLI, Depth)) { 702 if (!SetBaseReg) return true; 703 704 // If this match succeeded, we know that we can form an address with the 705 // GepBase as the basereg. See if we can match *more*. 706 AddrMode.HasBaseReg = false; 707 AddrMode.BaseReg = 0; 708 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy, 709 AddrMode, AddrModeInsts, TLI, 710 Depth+1)) 711 return true; 712 // Strange, shouldn't happen. Restore the base reg and succeed the easy 713 // way. 714 AddrMode.HasBaseReg = true; 715 AddrMode.BaseReg = AddrInst->getOperand(0); 716 return true; 717 } 718 719 AddrMode.BaseOffs -= ConstantOffset; 720 if (SetBaseReg) { 721 AddrMode.HasBaseReg = false; 722 AddrMode.BaseReg = 0; 723 } 724 } 725 break; 726 } 727 } 728 729 if (Instruction *I = dyn_cast_or_null<Instruction>(AddrInst)) { 730 assert(AddrModeInsts.back() == I && "Stack imbalance"); I = I; 731 AddrModeInsts.pop_back(); 732 } 733 734 // Worse case, the target should support [reg] addressing modes. :) 735 if (!AddrMode.HasBaseReg) { 736 AddrMode.HasBaseReg = true; 737 // Still check for legality in case the target supports [imm] but not [i+r]. 738 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) { 739 AddrMode.BaseReg = Addr; 740 return true; 741 } 742 AddrMode.HasBaseReg = false; 743 } 744 745 // If the base register is already taken, see if we can do [r+r]. 746 if (AddrMode.Scale == 0) { 747 AddrMode.Scale = 1; 748 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) { 749 AddrMode.ScaledReg = Addr; 750 return true; 751 } 752 AddrMode.Scale = 0; 753 } 754 // Couldn't match. 755 return false; 756} 757 758/// TryMatchingScaledValue - Try adding ScaleReg*Scale to the specified 759/// addressing mode. Return true if this addr mode is legal for the target, 760/// false if not. 761static bool TryMatchingScaledValue(Value *ScaleReg, int64_t Scale, 762 const Type *AccessTy, ExtAddrMode &AddrMode, 763 SmallVector<Instruction*, 16> &AddrModeInsts, 764 const TargetLowering &TLI, unsigned Depth) { 765 // If we already have a scale of this value, we can add to it, otherwise, we 766 // need an available scale field. 767 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 768 return false; 769 770 ExtAddrMode InputAddrMode = AddrMode; 771 772 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 773 // [A+B + A*7] -> [B+A*8]. 774 AddrMode.Scale += Scale; 775 AddrMode.ScaledReg = ScaleReg; 776 777 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) { 778 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 779 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 780 // X*Scale + C*Scale to addr mode. 781 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(ScaleReg); 782 if (BinOp && BinOp->getOpcode() == Instruction::Add && 783 isa<ConstantInt>(BinOp->getOperand(1)) && InputAddrMode.ScaledReg ==0) { 784 785 InputAddrMode.Scale = Scale; 786 InputAddrMode.ScaledReg = BinOp->getOperand(0); 787 InputAddrMode.BaseOffs += 788 cast<ConstantInt>(BinOp->getOperand(1))->getSExtValue()*Scale; 789 if (TLI.isLegalAddressingMode(InputAddrMode, AccessTy)) { 790 AddrModeInsts.push_back(BinOp); 791 AddrMode = InputAddrMode; 792 return true; 793 } 794 } 795 796 // Otherwise, not (x+c)*scale, just return what we have. 797 return true; 798 } 799 800 // Otherwise, back this attempt out. 801 AddrMode.Scale -= Scale; 802 if (AddrMode.Scale == 0) AddrMode.ScaledReg = 0; 803 804 return false; 805} 806 807 808/// IsNonLocalValue - Return true if the specified values are defined in a 809/// different basic block than BB. 810static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 811 if (Instruction *I = dyn_cast<Instruction>(V)) 812 return I->getParent() != BB; 813 return false; 814} 815 816/// OptimizeLoadStoreInst - Load and Store Instructions have often have 817/// addressing modes that can do significant amounts of computation. As such, 818/// instruction selection will try to get the load or store to do as much 819/// computation as possible for the program. The problem is that isel can only 820/// see within a single block. As such, we sink as much legal addressing mode 821/// stuff into the block as possible. 822bool CodeGenPrepare::OptimizeLoadStoreInst(Instruction *LdStInst, Value *Addr, 823 const Type *AccessTy, 824 DenseMap<Value*,Value*> &SunkAddrs) { 825 // Figure out what addressing mode will be built up for this operation. 826 SmallVector<Instruction*, 16> AddrModeInsts; 827 ExtAddrMode AddrMode; 828 bool Success = FindMaximalLegalAddressingMode(Addr, AccessTy, AddrMode, 829 AddrModeInsts, *TLI, 0); 830 Success = Success; assert(Success && "Couldn't select *anything*?"); 831 832 // Check to see if any of the instructions supersumed by this addr mode are 833 // non-local to I's BB. 834 bool AnyNonLocal = false; 835 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 836 if (IsNonLocalValue(AddrModeInsts[i], LdStInst->getParent())) { 837 AnyNonLocal = true; 838 break; 839 } 840 } 841 842 // If all the instructions matched are already in this BB, don't do anything. 843 if (!AnyNonLocal) { 844 DEBUG(cerr << "CGP: Found local addrmode: " << AddrMode << "\n"); 845 return false; 846 } 847 848 // Insert this computation right after this user. Since our caller is 849 // scanning from the top of the BB to the bottom, reuse of the expr are 850 // guaranteed to happen later. 851 BasicBlock::iterator InsertPt = LdStInst; 852 853 // Now that we determined the addressing expression we want to use and know 854 // that we have to sink it into this block. Check to see if we have already 855 // done this for some other load/store instr in this block. If so, reuse the 856 // computation. 857 Value *&SunkAddr = SunkAddrs[Addr]; 858 if (SunkAddr) { 859 DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n"); 860 if (SunkAddr->getType() != Addr->getType()) 861 SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt); 862 } else { 863 DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n"); 864 const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType(); 865 866 Value *Result = 0; 867 // Start with the scale value. 868 if (AddrMode.Scale) { 869 Value *V = AddrMode.ScaledReg; 870 if (V->getType() == IntPtrTy) { 871 // done. 872 } else if (isa<PointerType>(V->getType())) { 873 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 874 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 875 cast<IntegerType>(V->getType())->getBitWidth()) { 876 V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt); 877 } else { 878 V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt); 879 } 880 if (AddrMode.Scale != 1) 881 V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy, 882 AddrMode.Scale), 883 "sunkaddr", InsertPt); 884 Result = V; 885 } 886 887 // Add in the base register. 888 if (AddrMode.BaseReg) { 889 Value *V = AddrMode.BaseReg; 890 if (V->getType() != IntPtrTy) 891 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); 892 if (Result) 893 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 894 else 895 Result = V; 896 } 897 898 // Add in the BaseGV if present. 899 if (AddrMode.BaseGV) { 900 Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr", 901 InsertPt); 902 if (Result) 903 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 904 else 905 Result = V; 906 } 907 908 // Add in the Base Offset if present. 909 if (AddrMode.BaseOffs) { 910 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 911 if (Result) 912 Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); 913 else 914 Result = V; 915 } 916 917 if (Result == 0) 918 SunkAddr = Constant::getNullValue(Addr->getType()); 919 else 920 SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt); 921 } 922 923 LdStInst->replaceUsesOfWith(Addr, SunkAddr); 924 925 if (Addr->use_empty()) 926 EraseDeadInstructions(Addr); 927 return true; 928} 929 930/// OptimizeInlineAsmInst - If there are any memory operands, use 931/// OptimizeLoadStoreInt to sink their address computing into the block when 932/// possible / profitable. 933bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, 934 DenseMap<Value*,Value*> &SunkAddrs) { 935 bool MadeChange = false; 936 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 937 938 // Do a prepass over the constraints, canonicalizing them, and building up the 939 // ConstraintOperands list. 940 std::vector<InlineAsm::ConstraintInfo> 941 ConstraintInfos = IA->ParseConstraints(); 942 943 /// ConstraintOperands - Information about all of the constraints. 944 std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands; 945 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 946 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) { 947 ConstraintOperands. 948 push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i])); 949 TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back(); 950 951 // Compute the value type for each operand. 952 switch (OpInfo.Type) { 953 case InlineAsm::isOutput: 954 if (OpInfo.isIndirect) 955 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 956 break; 957 case InlineAsm::isInput: 958 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 959 break; 960 case InlineAsm::isClobber: 961 // Nothing to do. 962 break; 963 } 964 965 // Compute the constraint code and ConstraintType to use. 966 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 967 968 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 969 OpInfo.isIndirect) { 970 Value *OpVal = OpInfo.CallOperandVal; 971 MadeChange |= OptimizeLoadStoreInst(I, OpVal, OpVal->getType(), 972 SunkAddrs); 973 } 974 } 975 976 return MadeChange; 977} 978 979bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 980 BasicBlock *DefBB = I->getParent(); 981 982 // If both result of the {s|z}xt and its source are live out, rewrite all 983 // other uses of the source with result of extension. 984 Value *Src = I->getOperand(0); 985 if (Src->hasOneUse()) 986 return false; 987 988 // Only do this xform if truncating is free. 989 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 990 return false; 991 992 // Only safe to perform the optimization if the source is also defined in 993 // this block. 994 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 995 return false; 996 997 bool DefIsLiveOut = false; 998 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 999 UI != E; ++UI) { 1000 Instruction *User = cast<Instruction>(*UI); 1001 1002 // Figure out which BB this ext is used in. 1003 BasicBlock *UserBB = User->getParent(); 1004 if (UserBB == DefBB) continue; 1005 DefIsLiveOut = true; 1006 break; 1007 } 1008 if (!DefIsLiveOut) 1009 return false; 1010 1011 // Make sure non of the uses are PHI nodes. 1012 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 1013 UI != E; ++UI) { 1014 Instruction *User = cast<Instruction>(*UI); 1015 BasicBlock *UserBB = User->getParent(); 1016 if (UserBB == DefBB) continue; 1017 // Be conservative. We don't want this xform to end up introducing 1018 // reloads just before load / store instructions. 1019 if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User)) 1020 return false; 1021 } 1022 1023 // InsertedTruncs - Only insert one trunc in each block once. 1024 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 1025 1026 bool MadeChange = false; 1027 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); 1028 UI != E; ++UI) { 1029 Use &TheUse = UI.getUse(); 1030 Instruction *User = cast<Instruction>(*UI); 1031 1032 // Figure out which BB this ext is used in. 1033 BasicBlock *UserBB = User->getParent(); 1034 if (UserBB == DefBB) continue; 1035 1036 // Both src and def are live in this block. Rewrite the use. 1037 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 1038 1039 if (!InsertedTrunc) { 1040 BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); 1041 1042 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 1043 } 1044 1045 // Replace a use of the {s|z}ext source with a use of the result. 1046 TheUse = InsertedTrunc; 1047 1048 MadeChange = true; 1049 } 1050 1051 return MadeChange; 1052} 1053 1054// In this pass we look for GEP and cast instructions that are used 1055// across basic blocks and rewrite them to improve basic-block-at-a-time 1056// selection. 1057bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 1058 bool MadeChange = false; 1059 1060 // Split all critical edges where the dest block has a PHI and where the phi 1061 // has shared immediate operands. 1062 TerminatorInst *BBTI = BB.getTerminator(); 1063 if (BBTI->getNumSuccessors() > 1) { 1064 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) 1065 if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) && 1066 isCriticalEdge(BBTI, i, true)) 1067 SplitEdgeNicely(BBTI, i, this); 1068 } 1069 1070 1071 // Keep track of non-local addresses that have been sunk into this block. 1072 // This allows us to avoid inserting duplicate code for blocks with multiple 1073 // load/stores of the same address. 1074 DenseMap<Value*, Value*> SunkAddrs; 1075 1076 for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) { 1077 Instruction *I = BBI++; 1078 1079 if (CastInst *CI = dyn_cast<CastInst>(I)) { 1080 // If the source of the cast is a constant, then this should have 1081 // already been constant folded. The only reason NOT to constant fold 1082 // it is if something (e.g. LSR) was careful to place the constant 1083 // evaluation in a block other than then one that uses it (e.g. to hoist 1084 // the address of globals out of a loop). If this is the case, we don't 1085 // want to forward-subst the cast. 1086 if (isa<Constant>(CI->getOperand(0))) 1087 continue; 1088 1089 bool Change = false; 1090 if (TLI) { 1091 Change = OptimizeNoopCopyExpression(CI, *TLI); 1092 MadeChange |= Change; 1093 } 1094 1095 if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) 1096 MadeChange |= OptimizeExtUses(I); 1097 } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) { 1098 MadeChange |= OptimizeCmpExpression(CI); 1099 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1100 if (TLI) 1101 MadeChange |= OptimizeLoadStoreInst(I, I->getOperand(0), LI->getType(), 1102 SunkAddrs); 1103 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1104 if (TLI) 1105 MadeChange |= OptimizeLoadStoreInst(I, SI->getOperand(1), 1106 SI->getOperand(0)->getType(), 1107 SunkAddrs); 1108 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 1109 if (GEPI->hasAllZeroIndices()) { 1110 /// The GEP operand must be a pointer, so must its result -> BitCast 1111 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 1112 GEPI->getName(), GEPI); 1113 GEPI->replaceAllUsesWith(NC); 1114 GEPI->eraseFromParent(); 1115 MadeChange = true; 1116 BBI = NC; 1117 } 1118 } else if (CallInst *CI = dyn_cast<CallInst>(I)) { 1119 // If we found an inline asm expession, and if the target knows how to 1120 // lower it to normal LLVM code, do so now. 1121 if (TLI && isa<InlineAsm>(CI->getCalledValue())) 1122 if (const TargetAsmInfo *TAI = 1123 TLI->getTargetMachine().getTargetAsmInfo()) { 1124 if (TAI->ExpandInlineAsm(CI)) 1125 BBI = BB.begin(); 1126 else 1127 // Sink address computing for memory operands into the block. 1128 MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); 1129 } 1130 } 1131 } 1132 1133 return MadeChange; 1134} 1135 1136