CodeGenPrepare.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass munges the code in the input function to better prepare it for 11// SelectionDAG-based code generation. This works around limitations in it's 12// basic-block-at-a-time approach. It should eventually be removed. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "codegenprepare" 17#include "llvm/CodeGen/Passes.h" 18#include "llvm/ADT/DenseMap.h" 19#include "llvm/ADT/SmallSet.h" 20#include "llvm/ADT/Statistic.h" 21#include "llvm/Analysis/InstructionSimplify.h" 22#include "llvm/IR/CallSite.h" 23#include "llvm/IR/Constants.h" 24#include "llvm/IR/DataLayout.h" 25#include "llvm/IR/DerivedTypes.h" 26#include "llvm/IR/Dominators.h" 27#include "llvm/IR/Function.h" 28#include "llvm/IR/GetElementPtrTypeIterator.h" 29#include "llvm/IR/IRBuilder.h" 30#include "llvm/IR/InlineAsm.h" 31#include "llvm/IR/Instructions.h" 32#include "llvm/IR/IntrinsicInst.h" 33#include "llvm/IR/PatternMatch.h" 34#include "llvm/IR/ValueHandle.h" 35#include "llvm/IR/ValueMap.h" 36#include "llvm/Pass.h" 37#include "llvm/Support/CommandLine.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/Target/TargetLibraryInfo.h" 41#include "llvm/Target/TargetLowering.h" 42#include "llvm/Transforms/Utils/BasicBlockUtils.h" 43#include "llvm/Transforms/Utils/BuildLibCalls.h" 44#include "llvm/Transforms/Utils/BypassSlowDivision.h" 45#include "llvm/Transforms/Utils/Local.h" 46using namespace llvm; 47using namespace llvm::PatternMatch; 48 49STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 50STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 51STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 52STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 53 "sunken Cmps"); 54STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 55 "of sunken Casts"); 56STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 57 "computations were sunk"); 58STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 59STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 60STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 61STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 62STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 63STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 64 65static cl::opt<bool> DisableBranchOpts( 66 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 67 cl::desc("Disable branch optimizations in CodeGenPrepare")); 68 69static cl::opt<bool> DisableSelectToBranch( 70 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 71 cl::desc("Disable select to branch conversion.")); 72 73static cl::opt<bool> EnableAndCmpSinking( 74 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 75 cl::desc("Enable sinkinig and/cmp into branches.")); 76 77namespace { 78typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 79typedef DenseMap<Instruction *, Type *> InstrToOrigTy; 80 81 class CodeGenPrepare : public FunctionPass { 82 /// TLI - Keep a pointer of a TargetLowering to consult for determining 83 /// transformation profitability. 84 const TargetMachine *TM; 85 const TargetLowering *TLI; 86 const TargetLibraryInfo *TLInfo; 87 DominatorTree *DT; 88 89 /// CurInstIterator - As we scan instructions optimizing them, this is the 90 /// next instruction to optimize. Xforms that can invalidate this should 91 /// update it. 92 BasicBlock::iterator CurInstIterator; 93 94 /// Keeps track of non-local addresses that have been sunk into a block. 95 /// This allows us to avoid inserting duplicate code for blocks with 96 /// multiple load/stores of the same address. 97 ValueMap<Value*, Value*> SunkAddrs; 98 99 /// Keeps track of all truncates inserted for the current function. 100 SetOfInstrs InsertedTruncsSet; 101 /// Keeps track of the type of the related instruction before their 102 /// promotion for the current function. 103 InstrToOrigTy PromotedInsts; 104 105 /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to 106 /// be updated. 107 bool ModifiedDT; 108 109 /// OptSize - True if optimizing for size. 110 bool OptSize; 111 112 public: 113 static char ID; // Pass identification, replacement for typeid 114 explicit CodeGenPrepare(const TargetMachine *TM = 0) 115 : FunctionPass(ID), TM(TM), TLI(0) { 116 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 117 } 118 bool runOnFunction(Function &F) override; 119 120 const char *getPassName() const override { return "CodeGen Prepare"; } 121 122 void getAnalysisUsage(AnalysisUsage &AU) const override { 123 AU.addPreserved<DominatorTreeWrapperPass>(); 124 AU.addRequired<TargetLibraryInfo>(); 125 } 126 127 private: 128 bool EliminateFallThrough(Function &F); 129 bool EliminateMostlyEmptyBlocks(Function &F); 130 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 131 void EliminateMostlyEmptyBlock(BasicBlock *BB); 132 bool OptimizeBlock(BasicBlock &BB); 133 bool OptimizeInst(Instruction *I); 134 bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy); 135 bool OptimizeInlineAsmInst(CallInst *CS); 136 bool OptimizeCallInst(CallInst *CI); 137 bool MoveExtToFormExtLoad(Instruction *I); 138 bool OptimizeExtUses(Instruction *I); 139 bool OptimizeSelectInst(SelectInst *SI); 140 bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI); 141 bool DupRetToEnableTailCallOpts(BasicBlock *BB); 142 bool PlaceDbgValues(Function &F); 143 bool sinkAndCmp(Function &F); 144 }; 145} 146 147char CodeGenPrepare::ID = 0; 148static void *initializeCodeGenPreparePassOnce(PassRegistry &Registry) { 149 initializeTargetLibraryInfoPass(Registry); 150 PassInfo *PI = new PassInfo( 151 "Optimize for code generation", "codegenprepare", &CodeGenPrepare::ID, 152 PassInfo::NormalCtor_t(callDefaultCtor<CodeGenPrepare>), false, false, 153 PassInfo::TargetMachineCtor_t(callTargetMachineCtor<CodeGenPrepare>)); 154 Registry.registerPass(*PI, true); 155 return PI; 156} 157 158void llvm::initializeCodeGenPreparePass(PassRegistry &Registry) { 159 CALL_ONCE_INITIALIZATION(initializeCodeGenPreparePassOnce) 160} 161 162FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 163 return new CodeGenPrepare(TM); 164} 165 166bool CodeGenPrepare::runOnFunction(Function &F) { 167 if (skipOptnoneFunction(F)) 168 return false; 169 170 bool EverMadeChange = false; 171 // Clear per function information. 172 InsertedTruncsSet.clear(); 173 PromotedInsts.clear(); 174 175 ModifiedDT = false; 176 if (TM) TLI = TM->getTargetLowering(); 177 TLInfo = &getAnalysis<TargetLibraryInfo>(); 178 DominatorTreeWrapperPass *DTWP = 179 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 180 DT = DTWP ? &DTWP->getDomTree() : 0; 181 OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 182 Attribute::OptimizeForSize); 183 184 /// This optimization identifies DIV instructions that can be 185 /// profitably bypassed and carried out with a shorter, faster divide. 186 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 187 const DenseMap<unsigned int, unsigned int> &BypassWidths = 188 TLI->getBypassSlowDivWidths(); 189 for (Function::iterator I = F.begin(); I != F.end(); I++) 190 EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); 191 } 192 193 // Eliminate blocks that contain only PHI nodes and an 194 // unconditional branch. 195 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 196 197 // llvm.dbg.value is far away from the value then iSel may not be able 198 // handle it properly. iSel will drop llvm.dbg.value if it can not 199 // find a node corresponding to the value. 200 EverMadeChange |= PlaceDbgValues(F); 201 202 // If there is a mask, compare against zero, and branch that can be combined 203 // into a single target instruction, push the mask and compare into branch 204 // users. Do this before OptimizeBlock -> OptimizeInst -> 205 // OptimizeCmpExpression, which perturbs the pattern being searched for. 206 if (!DisableBranchOpts) 207 EverMadeChange |= sinkAndCmp(F); 208 209 bool MadeChange = true; 210 while (MadeChange) { 211 MadeChange = false; 212 for (Function::iterator I = F.begin(); I != F.end(); ) { 213 BasicBlock *BB = I++; 214 MadeChange |= OptimizeBlock(*BB); 215 } 216 EverMadeChange |= MadeChange; 217 } 218 219 SunkAddrs.clear(); 220 221 if (!DisableBranchOpts) { 222 MadeChange = false; 223 SmallPtrSet<BasicBlock*, 8> WorkList; 224 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 225 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 226 MadeChange |= ConstantFoldTerminator(BB, true); 227 if (!MadeChange) continue; 228 229 for (SmallVectorImpl<BasicBlock*>::iterator 230 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 231 if (pred_begin(*II) == pred_end(*II)) 232 WorkList.insert(*II); 233 } 234 235 // Delete the dead blocks and any of their dead successors. 236 MadeChange |= !WorkList.empty(); 237 while (!WorkList.empty()) { 238 BasicBlock *BB = *WorkList.begin(); 239 WorkList.erase(BB); 240 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 241 242 DeleteDeadBlock(BB); 243 244 for (SmallVectorImpl<BasicBlock*>::iterator 245 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 246 if (pred_begin(*II) == pred_end(*II)) 247 WorkList.insert(*II); 248 } 249 250 // Merge pairs of basic blocks with unconditional branches, connected by 251 // a single edge. 252 if (EverMadeChange || MadeChange) 253 MadeChange |= EliminateFallThrough(F); 254 255 if (MadeChange) 256 ModifiedDT = true; 257 EverMadeChange |= MadeChange; 258 } 259 260 if (ModifiedDT && DT) 261 DT->recalculate(F); 262 263 return EverMadeChange; 264} 265 266/// EliminateFallThrough - Merge basic blocks which are connected 267/// by a single edge, where one of the basic blocks has a single successor 268/// pointing to the other basic block, which has a single predecessor. 269bool CodeGenPrepare::EliminateFallThrough(Function &F) { 270 bool Changed = false; 271 // Scan all of the blocks in the function, except for the entry block. 272 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 273 BasicBlock *BB = I++; 274 // If the destination block has a single pred, then this is a trivial 275 // edge, just collapse it. 276 BasicBlock *SinglePred = BB->getSinglePredecessor(); 277 278 // Don't merge if BB's address is taken. 279 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 280 281 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 282 if (Term && !Term->isConditional()) { 283 Changed = true; 284 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 285 // Remember if SinglePred was the entry block of the function. 286 // If so, we will need to move BB back to the entry position. 287 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 288 MergeBasicBlockIntoOnlyPred(BB, this); 289 290 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 291 BB->moveBefore(&BB->getParent()->getEntryBlock()); 292 293 // We have erased a block. Update the iterator. 294 I = BB; 295 } 296 } 297 return Changed; 298} 299 300/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 301/// debug info directives, and an unconditional branch. Passes before isel 302/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 303/// isel. Start by eliminating these blocks so we can split them the way we 304/// want them. 305bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 306 bool MadeChange = false; 307 // Note that this intentionally skips the entry block. 308 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 309 BasicBlock *BB = I++; 310 311 // If this block doesn't end with an uncond branch, ignore it. 312 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 313 if (!BI || !BI->isUnconditional()) 314 continue; 315 316 // If the instruction before the branch (skipping debug info) isn't a phi 317 // node, then other stuff is happening here. 318 BasicBlock::iterator BBI = BI; 319 if (BBI != BB->begin()) { 320 --BBI; 321 while (isa<DbgInfoIntrinsic>(BBI)) { 322 if (BBI == BB->begin()) 323 break; 324 --BBI; 325 } 326 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 327 continue; 328 } 329 330 // Do not break infinite loops. 331 BasicBlock *DestBB = BI->getSuccessor(0); 332 if (DestBB == BB) 333 continue; 334 335 if (!CanMergeBlocks(BB, DestBB)) 336 continue; 337 338 EliminateMostlyEmptyBlock(BB); 339 MadeChange = true; 340 } 341 return MadeChange; 342} 343 344/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 345/// single uncond branch between them, and BB contains no other non-phi 346/// instructions. 347bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 348 const BasicBlock *DestBB) const { 349 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 350 // the successor. If there are more complex condition (e.g. preheaders), 351 // don't mess around with them. 352 BasicBlock::const_iterator BBI = BB->begin(); 353 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 354 for (const User *U : PN->users()) { 355 const Instruction *UI = cast<Instruction>(U); 356 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 357 return false; 358 // If User is inside DestBB block and it is a PHINode then check 359 // incoming value. If incoming value is not from BB then this is 360 // a complex condition (e.g. preheaders) we want to avoid here. 361 if (UI->getParent() == DestBB) { 362 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 363 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 364 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 365 if (Insn && Insn->getParent() == BB && 366 Insn->getParent() != UPN->getIncomingBlock(I)) 367 return false; 368 } 369 } 370 } 371 } 372 373 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 374 // and DestBB may have conflicting incoming values for the block. If so, we 375 // can't merge the block. 376 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 377 if (!DestBBPN) return true; // no conflict. 378 379 // Collect the preds of BB. 380 SmallPtrSet<const BasicBlock*, 16> BBPreds; 381 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 382 // It is faster to get preds from a PHI than with pred_iterator. 383 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 384 BBPreds.insert(BBPN->getIncomingBlock(i)); 385 } else { 386 BBPreds.insert(pred_begin(BB), pred_end(BB)); 387 } 388 389 // Walk the preds of DestBB. 390 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 391 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 392 if (BBPreds.count(Pred)) { // Common predecessor? 393 BBI = DestBB->begin(); 394 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 395 const Value *V1 = PN->getIncomingValueForBlock(Pred); 396 const Value *V2 = PN->getIncomingValueForBlock(BB); 397 398 // If V2 is a phi node in BB, look up what the mapped value will be. 399 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 400 if (V2PN->getParent() == BB) 401 V2 = V2PN->getIncomingValueForBlock(Pred); 402 403 // If there is a conflict, bail out. 404 if (V1 != V2) return false; 405 } 406 } 407 } 408 409 return true; 410} 411 412 413/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 414/// an unconditional branch in it. 415void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 416 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 417 BasicBlock *DestBB = BI->getSuccessor(0); 418 419 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 420 421 // If the destination block has a single pred, then this is a trivial edge, 422 // just collapse it. 423 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 424 if (SinglePred != DestBB) { 425 // Remember if SinglePred was the entry block of the function. If so, we 426 // will need to move BB back to the entry position. 427 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 428 MergeBasicBlockIntoOnlyPred(DestBB, this); 429 430 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 431 BB->moveBefore(&BB->getParent()->getEntryBlock()); 432 433 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 434 return; 435 } 436 } 437 438 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 439 // to handle the new incoming edges it is about to have. 440 PHINode *PN; 441 for (BasicBlock::iterator BBI = DestBB->begin(); 442 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 443 // Remove the incoming value for BB, and remember it. 444 Value *InVal = PN->removeIncomingValue(BB, false); 445 446 // Two options: either the InVal is a phi node defined in BB or it is some 447 // value that dominates BB. 448 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 449 if (InValPhi && InValPhi->getParent() == BB) { 450 // Add all of the input values of the input PHI as inputs of this phi. 451 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 452 PN->addIncoming(InValPhi->getIncomingValue(i), 453 InValPhi->getIncomingBlock(i)); 454 } else { 455 // Otherwise, add one instance of the dominating value for each edge that 456 // we will be adding. 457 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 458 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 459 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 460 } else { 461 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 462 PN->addIncoming(InVal, *PI); 463 } 464 } 465 } 466 467 // The PHIs are now updated, change everything that refers to BB to use 468 // DestBB and remove BB. 469 BB->replaceAllUsesWith(DestBB); 470 if (DT && !ModifiedDT) { 471 BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock(); 472 BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock(); 473 BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom); 474 DT->changeImmediateDominator(DestBB, NewIDom); 475 DT->eraseNode(BB); 476 } 477 BB->eraseFromParent(); 478 ++NumBlocksElim; 479 480 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 481} 482 483/// SinkCast - Sink the specified cast instruction into its user blocks 484static bool SinkCast(CastInst *CI) { 485 BasicBlock *DefBB = CI->getParent(); 486 487 /// InsertedCasts - Only insert a cast in each block once. 488 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 489 490 bool MadeChange = false; 491 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 492 UI != E; ) { 493 Use &TheUse = UI.getUse(); 494 Instruction *User = cast<Instruction>(*UI); 495 496 // Figure out which BB this cast is used in. For PHI's this is the 497 // appropriate predecessor block. 498 BasicBlock *UserBB = User->getParent(); 499 if (PHINode *PN = dyn_cast<PHINode>(User)) { 500 UserBB = PN->getIncomingBlock(TheUse); 501 } 502 503 // Preincrement use iterator so we don't invalidate it. 504 ++UI; 505 506 // If this user is in the same block as the cast, don't change the cast. 507 if (UserBB == DefBB) continue; 508 509 // If we have already inserted a cast into this block, use it. 510 CastInst *&InsertedCast = InsertedCasts[UserBB]; 511 512 if (!InsertedCast) { 513 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 514 InsertedCast = 515 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 516 InsertPt); 517 MadeChange = true; 518 } 519 520 // Replace a use of the cast with a use of the new cast. 521 TheUse = InsertedCast; 522 ++NumCastUses; 523 } 524 525 // If we removed all uses, nuke the cast. 526 if (CI->use_empty()) { 527 CI->eraseFromParent(); 528 MadeChange = true; 529 } 530 531 return MadeChange; 532} 533 534/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 535/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 536/// sink it into user blocks to reduce the number of virtual 537/// registers that must be created and coalesced. 538/// 539/// Return true if any changes are made. 540/// 541static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 542 // If this is a noop copy, 543 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 544 EVT DstVT = TLI.getValueType(CI->getType()); 545 546 // This is an fp<->int conversion? 547 if (SrcVT.isInteger() != DstVT.isInteger()) 548 return false; 549 550 // If this is an extension, it will be a zero or sign extension, which 551 // isn't a noop. 552 if (SrcVT.bitsLT(DstVT)) return false; 553 554 // If these values will be promoted, find out what they will be promoted 555 // to. This helps us consider truncates on PPC as noop copies when they 556 // are. 557 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 558 TargetLowering::TypePromoteInteger) 559 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 560 if (TLI.getTypeAction(CI->getContext(), DstVT) == 561 TargetLowering::TypePromoteInteger) 562 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 563 564 // If, after promotion, these are the same types, this is a noop copy. 565 if (SrcVT != DstVT) 566 return false; 567 568 return SinkCast(CI); 569} 570 571/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 572/// the number of virtual registers that must be created and coalesced. This is 573/// a clear win except on targets with multiple condition code registers 574/// (PowerPC), where it might lose; some adjustment may be wanted there. 575/// 576/// Return true if any changes are made. 577static bool OptimizeCmpExpression(CmpInst *CI) { 578 BasicBlock *DefBB = CI->getParent(); 579 580 /// InsertedCmp - Only insert a cmp in each block once. 581 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 582 583 bool MadeChange = false; 584 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 585 UI != E; ) { 586 Use &TheUse = UI.getUse(); 587 Instruction *User = cast<Instruction>(*UI); 588 589 // Preincrement use iterator so we don't invalidate it. 590 ++UI; 591 592 // Don't bother for PHI nodes. 593 if (isa<PHINode>(User)) 594 continue; 595 596 // Figure out which BB this cmp is used in. 597 BasicBlock *UserBB = User->getParent(); 598 599 // If this user is in the same block as the cmp, don't change the cmp. 600 if (UserBB == DefBB) continue; 601 602 // If we have already inserted a cmp into this block, use it. 603 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 604 605 if (!InsertedCmp) { 606 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 607 InsertedCmp = 608 CmpInst::Create(CI->getOpcode(), 609 CI->getPredicate(), CI->getOperand(0), 610 CI->getOperand(1), "", InsertPt); 611 MadeChange = true; 612 } 613 614 // Replace a use of the cmp with a use of the new cmp. 615 TheUse = InsertedCmp; 616 ++NumCmpUses; 617 } 618 619 // If we removed all uses, nuke the cmp. 620 if (CI->use_empty()) 621 CI->eraseFromParent(); 622 623 return MadeChange; 624} 625 626namespace { 627class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { 628protected: 629 void replaceCall(Value *With) override { 630 CI->replaceAllUsesWith(With); 631 CI->eraseFromParent(); 632 } 633 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const override { 634 if (ConstantInt *SizeCI = 635 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) 636 return SizeCI->isAllOnesValue(); 637 return false; 638 } 639}; 640} // end anonymous namespace 641 642bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { 643 BasicBlock *BB = CI->getParent(); 644 645 // Lower inline assembly if we can. 646 // If we found an inline asm expession, and if the target knows how to 647 // lower it to normal LLVM code, do so now. 648 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 649 if (TLI->ExpandInlineAsm(CI)) { 650 // Avoid invalidating the iterator. 651 CurInstIterator = BB->begin(); 652 // Avoid processing instructions out of order, which could cause 653 // reuse before a value is defined. 654 SunkAddrs.clear(); 655 return true; 656 } 657 // Sink address computing for memory operands into the block. 658 if (OptimizeInlineAsmInst(CI)) 659 return true; 660 } 661 662 // Lower all uses of llvm.objectsize.* 663 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 664 if (II && II->getIntrinsicID() == Intrinsic::objectsize) { 665 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 666 Type *ReturnTy = CI->getType(); 667 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 668 669 // Substituting this can cause recursive simplifications, which can 670 // invalidate our iterator. Use a WeakVH to hold onto it in case this 671 // happens. 672 WeakVH IterHandle(CurInstIterator); 673 674 replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0, 675 TLInfo, ModifiedDT ? 0 : DT); 676 677 // If the iterator instruction was recursively deleted, start over at the 678 // start of the block. 679 if (IterHandle != CurInstIterator) { 680 CurInstIterator = BB->begin(); 681 SunkAddrs.clear(); 682 } 683 return true; 684 } 685 686 if (II && TLI) { 687 SmallVector<Value*, 2> PtrOps; 688 Type *AccessTy; 689 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) 690 while (!PtrOps.empty()) 691 if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) 692 return true; 693 } 694 695 // From here on out we're working with named functions. 696 if (CI->getCalledFunction() == 0) return false; 697 698 // We'll need DataLayout from here on out. 699 const DataLayout *TD = TLI ? TLI->getDataLayout() : 0; 700 if (!TD) return false; 701 702 // Lower all default uses of _chk calls. This is very similar 703 // to what InstCombineCalls does, but here we are only lowering calls 704 // that have the default "don't know" as the objectsize. Anything else 705 // should be left alone. 706 CodeGenPrepareFortifiedLibCalls Simplifier; 707 return Simplifier.fold(CI, TD, TLInfo); 708} 709 710/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return 711/// instructions to the predecessor to enable tail call optimizations. The 712/// case it is currently looking for is: 713/// @code 714/// bb0: 715/// %tmp0 = tail call i32 @f0() 716/// br label %return 717/// bb1: 718/// %tmp1 = tail call i32 @f1() 719/// br label %return 720/// bb2: 721/// %tmp2 = tail call i32 @f2() 722/// br label %return 723/// return: 724/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 725/// ret i32 %retval 726/// @endcode 727/// 728/// => 729/// 730/// @code 731/// bb0: 732/// %tmp0 = tail call i32 @f0() 733/// ret i32 %tmp0 734/// bb1: 735/// %tmp1 = tail call i32 @f1() 736/// ret i32 %tmp1 737/// bb2: 738/// %tmp2 = tail call i32 @f2() 739/// ret i32 %tmp2 740/// @endcode 741bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) { 742 if (!TLI) 743 return false; 744 745 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 746 if (!RI) 747 return false; 748 749 PHINode *PN = 0; 750 BitCastInst *BCI = 0; 751 Value *V = RI->getReturnValue(); 752 if (V) { 753 BCI = dyn_cast<BitCastInst>(V); 754 if (BCI) 755 V = BCI->getOperand(0); 756 757 PN = dyn_cast<PHINode>(V); 758 if (!PN) 759 return false; 760 } 761 762 if (PN && PN->getParent() != BB) 763 return false; 764 765 // It's not safe to eliminate the sign / zero extension of the return value. 766 // See llvm::isInTailCallPosition(). 767 const Function *F = BB->getParent(); 768 AttributeSet CallerAttrs = F->getAttributes(); 769 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 770 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 771 return false; 772 773 // Make sure there are no instructions between the PHI and return, or that the 774 // return is the first instruction in the block. 775 if (PN) { 776 BasicBlock::iterator BI = BB->begin(); 777 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 778 if (&*BI == BCI) 779 // Also skip over the bitcast. 780 ++BI; 781 if (&*BI != RI) 782 return false; 783 } else { 784 BasicBlock::iterator BI = BB->begin(); 785 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 786 if (&*BI != RI) 787 return false; 788 } 789 790 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 791 /// call. 792 SmallVector<CallInst*, 4> TailCalls; 793 if (PN) { 794 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 795 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 796 // Make sure the phi value is indeed produced by the tail call. 797 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 798 TLI->mayBeEmittedAsTailCall(CI)) 799 TailCalls.push_back(CI); 800 } 801 } else { 802 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 803 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 804 if (!VisitedBBs.insert(*PI)) 805 continue; 806 807 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 808 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 809 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 810 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 811 if (RI == RE) 812 continue; 813 814 CallInst *CI = dyn_cast<CallInst>(&*RI); 815 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 816 TailCalls.push_back(CI); 817 } 818 } 819 820 bool Changed = false; 821 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 822 CallInst *CI = TailCalls[i]; 823 CallSite CS(CI); 824 825 // Conservatively require the attributes of the call to match those of the 826 // return. Ignore noalias because it doesn't affect the call sequence. 827 AttributeSet CalleeAttrs = CS.getAttributes(); 828 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 829 removeAttribute(Attribute::NoAlias) != 830 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 831 removeAttribute(Attribute::NoAlias)) 832 continue; 833 834 // Make sure the call instruction is followed by an unconditional branch to 835 // the return block. 836 BasicBlock *CallBB = CI->getParent(); 837 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 838 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 839 continue; 840 841 // Duplicate the return into CallBB. 842 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 843 ModifiedDT = Changed = true; 844 ++NumRetsDup; 845 } 846 847 // If we eliminated all predecessors of the block, delete the block now. 848 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 849 BB->eraseFromParent(); 850 851 return Changed; 852} 853 854//===----------------------------------------------------------------------===// 855// Memory Optimization 856//===----------------------------------------------------------------------===// 857 858namespace { 859 860/// ExtAddrMode - This is an extended version of TargetLowering::AddrMode 861/// which holds actual Value*'s for register values. 862struct ExtAddrMode : public TargetLowering::AddrMode { 863 Value *BaseReg; 864 Value *ScaledReg; 865 ExtAddrMode() : BaseReg(0), ScaledReg(0) {} 866 void print(raw_ostream &OS) const; 867 void dump() const; 868 869 bool operator==(const ExtAddrMode& O) const { 870 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 871 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 872 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 873 } 874}; 875 876#ifndef NDEBUG 877static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 878 AM.print(OS); 879 return OS; 880} 881#endif 882 883void ExtAddrMode::print(raw_ostream &OS) const { 884 bool NeedPlus = false; 885 OS << "["; 886 if (BaseGV) { 887 OS << (NeedPlus ? " + " : "") 888 << "GV:"; 889 BaseGV->printAsOperand(OS, /*PrintType=*/false); 890 NeedPlus = true; 891 } 892 893 if (BaseOffs) 894 OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true; 895 896 if (BaseReg) { 897 OS << (NeedPlus ? " + " : "") 898 << "Base:"; 899 BaseReg->printAsOperand(OS, /*PrintType=*/false); 900 NeedPlus = true; 901 } 902 if (Scale) { 903 OS << (NeedPlus ? " + " : "") 904 << Scale << "*"; 905 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 906 } 907 908 OS << ']'; 909} 910 911#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 912void ExtAddrMode::dump() const { 913 print(dbgs()); 914 dbgs() << '\n'; 915} 916#endif 917 918/// \brief This class provides transaction based operation on the IR. 919/// Every change made through this class is recorded in the internal state and 920/// can be undone (rollback) until commit is called. 921class TypePromotionTransaction { 922 923 /// \brief This represents the common interface of the individual transaction. 924 /// Each class implements the logic for doing one specific modification on 925 /// the IR via the TypePromotionTransaction. 926 class TypePromotionAction { 927 protected: 928 /// The Instruction modified. 929 Instruction *Inst; 930 931 public: 932 /// \brief Constructor of the action. 933 /// The constructor performs the related action on the IR. 934 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 935 936 virtual ~TypePromotionAction() {} 937 938 /// \brief Undo the modification done by this action. 939 /// When this method is called, the IR must be in the same state as it was 940 /// before this action was applied. 941 /// \pre Undoing the action works if and only if the IR is in the exact same 942 /// state as it was directly after this action was applied. 943 virtual void undo() = 0; 944 945 /// \brief Advocate every change made by this action. 946 /// When the results on the IR of the action are to be kept, it is important 947 /// to call this function, otherwise hidden information may be kept forever. 948 virtual void commit() { 949 // Nothing to be done, this action is not doing anything. 950 } 951 }; 952 953 /// \brief Utility to remember the position of an instruction. 954 class InsertionHandler { 955 /// Position of an instruction. 956 /// Either an instruction: 957 /// - Is the first in a basic block: BB is used. 958 /// - Has a previous instructon: PrevInst is used. 959 union { 960 Instruction *PrevInst; 961 BasicBlock *BB; 962 } Point; 963 /// Remember whether or not the instruction had a previous instruction. 964 bool HasPrevInstruction; 965 966 public: 967 /// \brief Record the position of \p Inst. 968 InsertionHandler(Instruction *Inst) { 969 BasicBlock::iterator It = Inst; 970 HasPrevInstruction = (It != (Inst->getParent()->begin())); 971 if (HasPrevInstruction) 972 Point.PrevInst = --It; 973 else 974 Point.BB = Inst->getParent(); 975 } 976 977 /// \brief Insert \p Inst at the recorded position. 978 void insert(Instruction *Inst) { 979 if (HasPrevInstruction) { 980 if (Inst->getParent()) 981 Inst->removeFromParent(); 982 Inst->insertAfter(Point.PrevInst); 983 } else { 984 Instruction *Position = Point.BB->getFirstInsertionPt(); 985 if (Inst->getParent()) 986 Inst->moveBefore(Position); 987 else 988 Inst->insertBefore(Position); 989 } 990 } 991 }; 992 993 /// \brief Move an instruction before another. 994 class InstructionMoveBefore : public TypePromotionAction { 995 /// Original position of the instruction. 996 InsertionHandler Position; 997 998 public: 999 /// \brief Move \p Inst before \p Before. 1000 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 1001 : TypePromotionAction(Inst), Position(Inst) { 1002 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 1003 Inst->moveBefore(Before); 1004 } 1005 1006 /// \brief Move the instruction back to its original position. 1007 void undo() override { 1008 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 1009 Position.insert(Inst); 1010 } 1011 }; 1012 1013 /// \brief Set the operand of an instruction with a new value. 1014 class OperandSetter : public TypePromotionAction { 1015 /// Original operand of the instruction. 1016 Value *Origin; 1017 /// Index of the modified instruction. 1018 unsigned Idx; 1019 1020 public: 1021 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 1022 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 1023 : TypePromotionAction(Inst), Idx(Idx) { 1024 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 1025 << "for:" << *Inst << "\n" 1026 << "with:" << *NewVal << "\n"); 1027 Origin = Inst->getOperand(Idx); 1028 Inst->setOperand(Idx, NewVal); 1029 } 1030 1031 /// \brief Restore the original value of the instruction. 1032 void undo() override { 1033 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 1034 << "for: " << *Inst << "\n" 1035 << "with: " << *Origin << "\n"); 1036 Inst->setOperand(Idx, Origin); 1037 } 1038 }; 1039 1040 /// \brief Hide the operands of an instruction. 1041 /// Do as if this instruction was not using any of its operands. 1042 class OperandsHider : public TypePromotionAction { 1043 /// The list of original operands. 1044 SmallVector<Value *, 4> OriginalValues; 1045 1046 public: 1047 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 1048 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 1049 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 1050 unsigned NumOpnds = Inst->getNumOperands(); 1051 OriginalValues.reserve(NumOpnds); 1052 for (unsigned It = 0; It < NumOpnds; ++It) { 1053 // Save the current operand. 1054 Value *Val = Inst->getOperand(It); 1055 OriginalValues.push_back(Val); 1056 // Set a dummy one. 1057 // We could use OperandSetter here, but that would implied an overhead 1058 // that we are not willing to pay. 1059 Inst->setOperand(It, UndefValue::get(Val->getType())); 1060 } 1061 } 1062 1063 /// \brief Restore the original list of uses. 1064 void undo() override { 1065 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 1066 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 1067 Inst->setOperand(It, OriginalValues[It]); 1068 } 1069 }; 1070 1071 /// \brief Build a truncate instruction. 1072 class TruncBuilder : public TypePromotionAction { 1073 public: 1074 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 1075 /// result. 1076 /// trunc Opnd to Ty. 1077 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 1078 IRBuilder<> Builder(Opnd); 1079 Inst = cast<Instruction>(Builder.CreateTrunc(Opnd, Ty, "promoted")); 1080 DEBUG(dbgs() << "Do: TruncBuilder: " << *Inst << "\n"); 1081 } 1082 1083 /// \brief Get the built instruction. 1084 Instruction *getBuiltInstruction() { return Inst; } 1085 1086 /// \brief Remove the built instruction. 1087 void undo() override { 1088 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Inst << "\n"); 1089 Inst->eraseFromParent(); 1090 } 1091 }; 1092 1093 /// \brief Build a sign extension instruction. 1094 class SExtBuilder : public TypePromotionAction { 1095 public: 1096 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 1097 /// result. 1098 /// sext Opnd to Ty. 1099 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 1100 : TypePromotionAction(Inst) { 1101 IRBuilder<> Builder(InsertPt); 1102 Inst = cast<Instruction>(Builder.CreateSExt(Opnd, Ty, "promoted")); 1103 DEBUG(dbgs() << "Do: SExtBuilder: " << *Inst << "\n"); 1104 } 1105 1106 /// \brief Get the built instruction. 1107 Instruction *getBuiltInstruction() { return Inst; } 1108 1109 /// \brief Remove the built instruction. 1110 void undo() override { 1111 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Inst << "\n"); 1112 Inst->eraseFromParent(); 1113 } 1114 }; 1115 1116 /// \brief Mutate an instruction to another type. 1117 class TypeMutator : public TypePromotionAction { 1118 /// Record the original type. 1119 Type *OrigTy; 1120 1121 public: 1122 /// \brief Mutate the type of \p Inst into \p NewTy. 1123 TypeMutator(Instruction *Inst, Type *NewTy) 1124 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 1125 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 1126 << "\n"); 1127 Inst->mutateType(NewTy); 1128 } 1129 1130 /// \brief Mutate the instruction back to its original type. 1131 void undo() override { 1132 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 1133 << "\n"); 1134 Inst->mutateType(OrigTy); 1135 } 1136 }; 1137 1138 /// \brief Replace the uses of an instruction by another instruction. 1139 class UsesReplacer : public TypePromotionAction { 1140 /// Helper structure to keep track of the replaced uses. 1141 struct InstructionAndIdx { 1142 /// The instruction using the instruction. 1143 Instruction *Inst; 1144 /// The index where this instruction is used for Inst. 1145 unsigned Idx; 1146 InstructionAndIdx(Instruction *Inst, unsigned Idx) 1147 : Inst(Inst), Idx(Idx) {} 1148 }; 1149 1150 /// Keep track of the original uses (pair Instruction, Index). 1151 SmallVector<InstructionAndIdx, 4> OriginalUses; 1152 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 1153 1154 public: 1155 /// \brief Replace all the use of \p Inst by \p New. 1156 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 1157 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 1158 << "\n"); 1159 // Record the original uses. 1160 for (Use &U : Inst->uses()) { 1161 Instruction *UserI = cast<Instruction>(U.getUser()); 1162 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 1163 } 1164 // Now, we can replace the uses. 1165 Inst->replaceAllUsesWith(New); 1166 } 1167 1168 /// \brief Reassign the original uses of Inst to Inst. 1169 void undo() override { 1170 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 1171 for (use_iterator UseIt = OriginalUses.begin(), 1172 EndIt = OriginalUses.end(); 1173 UseIt != EndIt; ++UseIt) { 1174 UseIt->Inst->setOperand(UseIt->Idx, Inst); 1175 } 1176 } 1177 }; 1178 1179 /// \brief Remove an instruction from the IR. 1180 class InstructionRemover : public TypePromotionAction { 1181 /// Original position of the instruction. 1182 InsertionHandler Inserter; 1183 /// Helper structure to hide all the link to the instruction. In other 1184 /// words, this helps to do as if the instruction was removed. 1185 OperandsHider Hider; 1186 /// Keep track of the uses replaced, if any. 1187 UsesReplacer *Replacer; 1188 1189 public: 1190 /// \brief Remove all reference of \p Inst and optinally replace all its 1191 /// uses with New. 1192 /// \pre If !Inst->use_empty(), then New != NULL 1193 InstructionRemover(Instruction *Inst, Value *New = NULL) 1194 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 1195 Replacer(NULL) { 1196 if (New) 1197 Replacer = new UsesReplacer(Inst, New); 1198 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 1199 Inst->removeFromParent(); 1200 } 1201 1202 ~InstructionRemover() { delete Replacer; } 1203 1204 /// \brief Really remove the instruction. 1205 void commit() override { delete Inst; } 1206 1207 /// \brief Resurrect the instruction and reassign it to the proper uses if 1208 /// new value was provided when build this action. 1209 void undo() override { 1210 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 1211 Inserter.insert(Inst); 1212 if (Replacer) 1213 Replacer->undo(); 1214 Hider.undo(); 1215 } 1216 }; 1217 1218public: 1219 /// Restoration point. 1220 /// The restoration point is a pointer to an action instead of an iterator 1221 /// because the iterator may be invalidated but not the pointer. 1222 typedef const TypePromotionAction *ConstRestorationPt; 1223 /// Advocate every changes made in that transaction. 1224 void commit(); 1225 /// Undo all the changes made after the given point. 1226 void rollback(ConstRestorationPt Point); 1227 /// Get the current restoration point. 1228 ConstRestorationPt getRestorationPoint() const; 1229 1230 /// \name API for IR modification with state keeping to support rollback. 1231 /// @{ 1232 /// Same as Instruction::setOperand. 1233 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 1234 /// Same as Instruction::eraseFromParent. 1235 void eraseInstruction(Instruction *Inst, Value *NewVal = NULL); 1236 /// Same as Value::replaceAllUsesWith. 1237 void replaceAllUsesWith(Instruction *Inst, Value *New); 1238 /// Same as Value::mutateType. 1239 void mutateType(Instruction *Inst, Type *NewTy); 1240 /// Same as IRBuilder::createTrunc. 1241 Instruction *createTrunc(Instruction *Opnd, Type *Ty); 1242 /// Same as IRBuilder::createSExt. 1243 Instruction *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 1244 /// Same as Instruction::moveBefore. 1245 void moveBefore(Instruction *Inst, Instruction *Before); 1246 /// @} 1247 1248 ~TypePromotionTransaction(); 1249 1250private: 1251 /// The ordered list of actions made so far. 1252 SmallVector<TypePromotionAction *, 16> Actions; 1253 typedef SmallVectorImpl<TypePromotionAction *>::iterator CommitPt; 1254}; 1255 1256void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 1257 Value *NewVal) { 1258 Actions.push_back( 1259 new TypePromotionTransaction::OperandSetter(Inst, Idx, NewVal)); 1260} 1261 1262void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 1263 Value *NewVal) { 1264 Actions.push_back( 1265 new TypePromotionTransaction::InstructionRemover(Inst, NewVal)); 1266} 1267 1268void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 1269 Value *New) { 1270 Actions.push_back(new TypePromotionTransaction::UsesReplacer(Inst, New)); 1271} 1272 1273void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 1274 Actions.push_back(new TypePromotionTransaction::TypeMutator(Inst, NewTy)); 1275} 1276 1277Instruction *TypePromotionTransaction::createTrunc(Instruction *Opnd, 1278 Type *Ty) { 1279 TruncBuilder *TB = new TruncBuilder(Opnd, Ty); 1280 Actions.push_back(TB); 1281 return TB->getBuiltInstruction(); 1282} 1283 1284Instruction *TypePromotionTransaction::createSExt(Instruction *Inst, 1285 Value *Opnd, Type *Ty) { 1286 SExtBuilder *SB = new SExtBuilder(Inst, Opnd, Ty); 1287 Actions.push_back(SB); 1288 return SB->getBuiltInstruction(); 1289} 1290 1291void TypePromotionTransaction::moveBefore(Instruction *Inst, 1292 Instruction *Before) { 1293 Actions.push_back( 1294 new TypePromotionTransaction::InstructionMoveBefore(Inst, Before)); 1295} 1296 1297TypePromotionTransaction::ConstRestorationPt 1298TypePromotionTransaction::getRestorationPoint() const { 1299 return Actions.rbegin() != Actions.rend() ? *Actions.rbegin() : NULL; 1300} 1301 1302void TypePromotionTransaction::commit() { 1303 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 1304 ++It) { 1305 (*It)->commit(); 1306 delete *It; 1307 } 1308 Actions.clear(); 1309} 1310 1311void TypePromotionTransaction::rollback( 1312 TypePromotionTransaction::ConstRestorationPt Point) { 1313 while (!Actions.empty() && Point != (*Actions.rbegin())) { 1314 TypePromotionAction *Curr = Actions.pop_back_val(); 1315 Curr->undo(); 1316 delete Curr; 1317 } 1318} 1319 1320TypePromotionTransaction::~TypePromotionTransaction() { 1321 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; ++It) 1322 delete *It; 1323 Actions.clear(); 1324} 1325 1326/// \brief A helper class for matching addressing modes. 1327/// 1328/// This encapsulates the logic for matching the target-legal addressing modes. 1329class AddressingModeMatcher { 1330 SmallVectorImpl<Instruction*> &AddrModeInsts; 1331 const TargetLowering &TLI; 1332 1333 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 1334 /// the memory instruction that we're computing this address for. 1335 Type *AccessTy; 1336 Instruction *MemoryInst; 1337 1338 /// AddrMode - This is the addressing mode that we're building up. This is 1339 /// part of the return value of this addressing mode matching stuff. 1340 ExtAddrMode &AddrMode; 1341 1342 /// The truncate instruction inserted by other CodeGenPrepare optimizations. 1343 const SetOfInstrs &InsertedTruncs; 1344 /// A map from the instructions to their type before promotion. 1345 InstrToOrigTy &PromotedInsts; 1346 /// The ongoing transaction where every action should be registered. 1347 TypePromotionTransaction &TPT; 1348 1349 /// IgnoreProfitability - This is set to true when we should not do 1350 /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode 1351 /// always returns true. 1352 bool IgnoreProfitability; 1353 1354 AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI, 1355 const TargetLowering &T, Type *AT, 1356 Instruction *MI, ExtAddrMode &AM, 1357 const SetOfInstrs &InsertedTruncs, 1358 InstrToOrigTy &PromotedInsts, 1359 TypePromotionTransaction &TPT) 1360 : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM), 1361 InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) { 1362 IgnoreProfitability = false; 1363 } 1364public: 1365 1366 /// Match - Find the maximal addressing mode that a load/store of V can fold, 1367 /// give an access type of AccessTy. This returns a list of involved 1368 /// instructions in AddrModeInsts. 1369 /// \p InsertedTruncs The truncate instruction inserted by other 1370 /// CodeGenPrepare 1371 /// optimizations. 1372 /// \p PromotedInsts maps the instructions to their type before promotion. 1373 /// \p The ongoing transaction where every action should be registered. 1374 static ExtAddrMode Match(Value *V, Type *AccessTy, 1375 Instruction *MemoryInst, 1376 SmallVectorImpl<Instruction*> &AddrModeInsts, 1377 const TargetLowering &TLI, 1378 const SetOfInstrs &InsertedTruncs, 1379 InstrToOrigTy &PromotedInsts, 1380 TypePromotionTransaction &TPT) { 1381 ExtAddrMode Result; 1382 1383 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, AccessTy, 1384 MemoryInst, Result, InsertedTruncs, 1385 PromotedInsts, TPT).MatchAddr(V, 0); 1386 (void)Success; assert(Success && "Couldn't select *anything*?"); 1387 return Result; 1388 } 1389private: 1390 bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 1391 bool MatchAddr(Value *V, unsigned Depth); 1392 bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 1393 bool *MovedAway = NULL); 1394 bool IsProfitableToFoldIntoAddressingMode(Instruction *I, 1395 ExtAddrMode &AMBefore, 1396 ExtAddrMode &AMAfter); 1397 bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 1398 bool IsPromotionProfitable(unsigned MatchedSize, unsigned SizeWithPromotion, 1399 Value *PromotedOperand) const; 1400}; 1401 1402/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. 1403/// Return true and update AddrMode if this addr mode is legal for the target, 1404/// false if not. 1405bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, 1406 unsigned Depth) { 1407 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 1408 // mode. Just process that directly. 1409 if (Scale == 1) 1410 return MatchAddr(ScaleReg, Depth); 1411 1412 // If the scale is 0, it takes nothing to add this. 1413 if (Scale == 0) 1414 return true; 1415 1416 // If we already have a scale of this value, we can add to it, otherwise, we 1417 // need an available scale field. 1418 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 1419 return false; 1420 1421 ExtAddrMode TestAddrMode = AddrMode; 1422 1423 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 1424 // [A+B + A*7] -> [B+A*8]. 1425 TestAddrMode.Scale += Scale; 1426 TestAddrMode.ScaledReg = ScaleReg; 1427 1428 // If the new address isn't legal, bail out. 1429 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) 1430 return false; 1431 1432 // It was legal, so commit it. 1433 AddrMode = TestAddrMode; 1434 1435 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 1436 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 1437 // X*Scale + C*Scale to addr mode. 1438 ConstantInt *CI = 0; Value *AddLHS = 0; 1439 if (isa<Instruction>(ScaleReg) && // not a constant expr. 1440 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 1441 TestAddrMode.ScaledReg = AddLHS; 1442 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 1443 1444 // If this addressing mode is legal, commit it and remember that we folded 1445 // this instruction. 1446 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { 1447 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 1448 AddrMode = TestAddrMode; 1449 return true; 1450 } 1451 } 1452 1453 // Otherwise, not (x+c)*scale, just return what we have. 1454 return true; 1455} 1456 1457/// MightBeFoldableInst - This is a little filter, which returns true if an 1458/// addressing computation involving I might be folded into a load/store 1459/// accessing it. This doesn't need to be perfect, but needs to accept at least 1460/// the set of instructions that MatchOperationAddr can. 1461static bool MightBeFoldableInst(Instruction *I) { 1462 switch (I->getOpcode()) { 1463 case Instruction::BitCast: 1464 // Don't touch identity bitcasts. 1465 if (I->getType() == I->getOperand(0)->getType()) 1466 return false; 1467 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 1468 case Instruction::PtrToInt: 1469 // PtrToInt is always a noop, as we know that the int type is pointer sized. 1470 return true; 1471 case Instruction::IntToPtr: 1472 // We know the input is intptr_t, so this is foldable. 1473 return true; 1474 case Instruction::Add: 1475 return true; 1476 case Instruction::Mul: 1477 case Instruction::Shl: 1478 // Can only handle X*C and X << C. 1479 return isa<ConstantInt>(I->getOperand(1)); 1480 case Instruction::GetElementPtr: 1481 return true; 1482 default: 1483 return false; 1484 } 1485} 1486 1487/// \brief Hepler class to perform type promotion. 1488class TypePromotionHelper { 1489 /// \brief Utility function to check whether or not a sign extension of 1490 /// \p Inst with \p ConsideredSExtType can be moved through \p Inst by either 1491 /// using the operands of \p Inst or promoting \p Inst. 1492 /// In other words, check if: 1493 /// sext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredSExtType. 1494 /// #1 Promotion applies: 1495 /// ConsideredSExtType Inst (sext opnd1 to ConsideredSExtType, ...). 1496 /// #2 Operand reuses: 1497 /// sext opnd1 to ConsideredSExtType. 1498 /// \p PromotedInsts maps the instructions to their type before promotion. 1499 static bool canGetThrough(const Instruction *Inst, Type *ConsideredSExtType, 1500 const InstrToOrigTy &PromotedInsts); 1501 1502 /// \brief Utility function to determine if \p OpIdx should be promoted when 1503 /// promoting \p Inst. 1504 static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) { 1505 if (isa<SelectInst>(Inst) && OpIdx == 0) 1506 return false; 1507 return true; 1508 } 1509 1510 /// \brief Utility function to promote the operand of \p SExt when this 1511 /// operand is a promotable trunc or sext. 1512 /// \p PromotedInsts maps the instructions to their type before promotion. 1513 /// \p CreatedInsts[out] contains how many non-free instructions have been 1514 /// created to promote the operand of SExt. 1515 /// Should never be called directly. 1516 /// \return The promoted value which is used instead of SExt. 1517 static Value *promoteOperandForTruncAndSExt(Instruction *SExt, 1518 TypePromotionTransaction &TPT, 1519 InstrToOrigTy &PromotedInsts, 1520 unsigned &CreatedInsts); 1521 1522 /// \brief Utility function to promote the operand of \p SExt when this 1523 /// operand is promotable and is not a supported trunc or sext. 1524 /// \p PromotedInsts maps the instructions to their type before promotion. 1525 /// \p CreatedInsts[out] contains how many non-free instructions have been 1526 /// created to promote the operand of SExt. 1527 /// Should never be called directly. 1528 /// \return The promoted value which is used instead of SExt. 1529 static Value *promoteOperandForOther(Instruction *SExt, 1530 TypePromotionTransaction &TPT, 1531 InstrToOrigTy &PromotedInsts, 1532 unsigned &CreatedInsts); 1533 1534public: 1535 /// Type for the utility function that promotes the operand of SExt. 1536 typedef Value *(*Action)(Instruction *SExt, TypePromotionTransaction &TPT, 1537 InstrToOrigTy &PromotedInsts, 1538 unsigned &CreatedInsts); 1539 /// \brief Given a sign extend instruction \p SExt, return the approriate 1540 /// action to promote the operand of \p SExt instead of using SExt. 1541 /// \return NULL if no promotable action is possible with the current 1542 /// sign extension. 1543 /// \p InsertedTruncs keeps track of all the truncate instructions inserted by 1544 /// the others CodeGenPrepare optimizations. This information is important 1545 /// because we do not want to promote these instructions as CodeGenPrepare 1546 /// will reinsert them later. Thus creating an infinite loop: create/remove. 1547 /// \p PromotedInsts maps the instructions to their type before promotion. 1548 static Action getAction(Instruction *SExt, const SetOfInstrs &InsertedTruncs, 1549 const TargetLowering &TLI, 1550 const InstrToOrigTy &PromotedInsts); 1551}; 1552 1553bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 1554 Type *ConsideredSExtType, 1555 const InstrToOrigTy &PromotedInsts) { 1556 // We can always get through sext. 1557 if (isa<SExtInst>(Inst)) 1558 return true; 1559 1560 // We can get through binary operator, if it is legal. In other words, the 1561 // binary operator must have a nuw or nsw flag. 1562 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 1563 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 1564 (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap())) 1565 return true; 1566 1567 // Check if we can do the following simplification. 1568 // sext(trunc(sext)) --> sext 1569 if (!isa<TruncInst>(Inst)) 1570 return false; 1571 1572 Value *OpndVal = Inst->getOperand(0); 1573 // Check if we can use this operand in the sext. 1574 // If the type is larger than the result type of the sign extension, 1575 // we cannot. 1576 if (OpndVal->getType()->getIntegerBitWidth() > 1577 ConsideredSExtType->getIntegerBitWidth()) 1578 return false; 1579 1580 // If the operand of the truncate is not an instruction, we will not have 1581 // any information on the dropped bits. 1582 // (Actually we could for constant but it is not worth the extra logic). 1583 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 1584 if (!Opnd) 1585 return false; 1586 1587 // Check if the source of the type is narrow enough. 1588 // I.e., check that trunc just drops sign extended bits. 1589 // #1 get the type of the operand. 1590 const Type *OpndType; 1591 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 1592 if (It != PromotedInsts.end()) 1593 OpndType = It->second; 1594 else if (isa<SExtInst>(Opnd)) 1595 OpndType = cast<Instruction>(Opnd)->getOperand(0)->getType(); 1596 else 1597 return false; 1598 1599 // #2 check that the truncate just drop sign extended bits. 1600 if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth()) 1601 return true; 1602 1603 return false; 1604} 1605 1606TypePromotionHelper::Action TypePromotionHelper::getAction( 1607 Instruction *SExt, const SetOfInstrs &InsertedTruncs, 1608 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 1609 Instruction *SExtOpnd = dyn_cast<Instruction>(SExt->getOperand(0)); 1610 Type *SExtTy = SExt->getType(); 1611 // If the operand of the sign extension is not an instruction, we cannot 1612 // get through. 1613 // If it, check we can get through. 1614 if (!SExtOpnd || !canGetThrough(SExtOpnd, SExtTy, PromotedInsts)) 1615 return NULL; 1616 1617 // Do not promote if the operand has been added by codegenprepare. 1618 // Otherwise, it means we are undoing an optimization that is likely to be 1619 // redone, thus causing potential infinite loop. 1620 if (isa<TruncInst>(SExtOpnd) && InsertedTruncs.count(SExtOpnd)) 1621 return NULL; 1622 1623 // SExt or Trunc instructions. 1624 // Return the related handler. 1625 if (isa<SExtInst>(SExtOpnd) || isa<TruncInst>(SExtOpnd)) 1626 return promoteOperandForTruncAndSExt; 1627 1628 // Regular instruction. 1629 // Abort early if we will have to insert non-free instructions. 1630 if (!SExtOpnd->hasOneUse() && 1631 !TLI.isTruncateFree(SExtTy, SExtOpnd->getType())) 1632 return NULL; 1633 return promoteOperandForOther; 1634} 1635 1636Value *TypePromotionHelper::promoteOperandForTruncAndSExt( 1637 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 1638 InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts) { 1639 // By construction, the operand of SExt is an instruction. Otherwise we cannot 1640 // get through it and this method should not be called. 1641 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 1642 // Replace sext(trunc(opnd)) or sext(sext(opnd)) 1643 // => sext(opnd). 1644 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 1645 CreatedInsts = 0; 1646 1647 // Remove dead code. 1648 if (SExtOpnd->use_empty()) 1649 TPT.eraseInstruction(SExtOpnd); 1650 1651 // Check if the sext is still needed. 1652 if (SExt->getType() != SExt->getOperand(0)->getType()) 1653 return SExt; 1654 1655 // At this point we have: sext ty opnd to ty. 1656 // Reassign the uses of SExt to the opnd and remove SExt. 1657 Value *NextVal = SExt->getOperand(0); 1658 TPT.eraseInstruction(SExt, NextVal); 1659 return NextVal; 1660} 1661 1662Value * 1663TypePromotionHelper::promoteOperandForOther(Instruction *SExt, 1664 TypePromotionTransaction &TPT, 1665 InstrToOrigTy &PromotedInsts, 1666 unsigned &CreatedInsts) { 1667 // By construction, the operand of SExt is an instruction. Otherwise we cannot 1668 // get through it and this method should not be called. 1669 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 1670 CreatedInsts = 0; 1671 if (!SExtOpnd->hasOneUse()) { 1672 // SExtOpnd will be promoted. 1673 // All its uses, but SExt, will need to use a truncated value of the 1674 // promoted version. 1675 // Create the truncate now. 1676 Instruction *Trunc = TPT.createTrunc(SExt, SExtOpnd->getType()); 1677 Trunc->removeFromParent(); 1678 // Insert it just after the definition. 1679 Trunc->insertAfter(SExtOpnd); 1680 1681 TPT.replaceAllUsesWith(SExtOpnd, Trunc); 1682 // Restore the operand of SExt (which has been replace by the previous call 1683 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 1684 TPT.setOperand(SExt, 0, SExtOpnd); 1685 } 1686 1687 // Get through the Instruction: 1688 // 1. Update its type. 1689 // 2. Replace the uses of SExt by Inst. 1690 // 3. Sign extend each operand that needs to be sign extended. 1691 1692 // Remember the original type of the instruction before promotion. 1693 // This is useful to know that the high bits are sign extended bits. 1694 PromotedInsts.insert( 1695 std::pair<Instruction *, Type *>(SExtOpnd, SExtOpnd->getType())); 1696 // Step #1. 1697 TPT.mutateType(SExtOpnd, SExt->getType()); 1698 // Step #2. 1699 TPT.replaceAllUsesWith(SExt, SExtOpnd); 1700 // Step #3. 1701 Instruction *SExtForOpnd = SExt; 1702 1703 DEBUG(dbgs() << "Propagate SExt to operands\n"); 1704 for (int OpIdx = 0, EndOpIdx = SExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 1705 ++OpIdx) { 1706 DEBUG(dbgs() << "Operand:\n" << *(SExtOpnd->getOperand(OpIdx)) << '\n'); 1707 if (SExtOpnd->getOperand(OpIdx)->getType() == SExt->getType() || 1708 !shouldSExtOperand(SExtOpnd, OpIdx)) { 1709 DEBUG(dbgs() << "No need to propagate\n"); 1710 continue; 1711 } 1712 // Check if we can statically sign extend the operand. 1713 Value *Opnd = SExtOpnd->getOperand(OpIdx); 1714 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 1715 DEBUG(dbgs() << "Statically sign extend\n"); 1716 TPT.setOperand( 1717 SExtOpnd, OpIdx, 1718 ConstantInt::getSigned(SExt->getType(), Cst->getSExtValue())); 1719 continue; 1720 } 1721 // UndefValue are typed, so we have to statically sign extend them. 1722 if (isa<UndefValue>(Opnd)) { 1723 DEBUG(dbgs() << "Statically sign extend\n"); 1724 TPT.setOperand(SExtOpnd, OpIdx, UndefValue::get(SExt->getType())); 1725 continue; 1726 } 1727 1728 // Otherwise we have to explicity sign extend the operand. 1729 // Check if SExt was reused to sign extend an operand. 1730 if (!SExtForOpnd) { 1731 // If yes, create a new one. 1732 DEBUG(dbgs() << "More operands to sext\n"); 1733 SExtForOpnd = TPT.createSExt(SExt, Opnd, SExt->getType()); 1734 ++CreatedInsts; 1735 } 1736 1737 TPT.setOperand(SExtForOpnd, 0, Opnd); 1738 1739 // Move the sign extension before the insertion point. 1740 TPT.moveBefore(SExtForOpnd, SExtOpnd); 1741 TPT.setOperand(SExtOpnd, OpIdx, SExtForOpnd); 1742 // If more sext are required, new instructions will have to be created. 1743 SExtForOpnd = NULL; 1744 } 1745 if (SExtForOpnd == SExt) { 1746 DEBUG(dbgs() << "Sign extension is useless now\n"); 1747 TPT.eraseInstruction(SExt); 1748 } 1749 return SExtOpnd; 1750} 1751 1752/// IsPromotionProfitable - Check whether or not promoting an instruction 1753/// to a wider type was profitable. 1754/// \p MatchedSize gives the number of instructions that have been matched 1755/// in the addressing mode after the promotion was applied. 1756/// \p SizeWithPromotion gives the number of created instructions for 1757/// the promotion plus the number of instructions that have been 1758/// matched in the addressing mode before the promotion. 1759/// \p PromotedOperand is the value that has been promoted. 1760/// \return True if the promotion is profitable, false otherwise. 1761bool 1762AddressingModeMatcher::IsPromotionProfitable(unsigned MatchedSize, 1763 unsigned SizeWithPromotion, 1764 Value *PromotedOperand) const { 1765 // We folded less instructions than what we created to promote the operand. 1766 // This is not profitable. 1767 if (MatchedSize < SizeWithPromotion) 1768 return false; 1769 if (MatchedSize > SizeWithPromotion) 1770 return true; 1771 // The promotion is neutral but it may help folding the sign extension in 1772 // loads for instance. 1773 // Check that we did not create an illegal instruction. 1774 Instruction *PromotedInst = dyn_cast<Instruction>(PromotedOperand); 1775 if (!PromotedInst) 1776 return false; 1777 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 1778 // If the ISDOpcode is undefined, it was undefined before the promotion. 1779 if (!ISDOpcode) 1780 return true; 1781 // Otherwise, check if the promoted instruction is legal or not. 1782 return TLI.isOperationLegalOrCustom(ISDOpcode, 1783 EVT::getEVT(PromotedInst->getType())); 1784} 1785 1786/// MatchOperationAddr - Given an instruction or constant expr, see if we can 1787/// fold the operation into the addressing mode. If so, update the addressing 1788/// mode and return true, otherwise return false without modifying AddrMode. 1789/// If \p MovedAway is not NULL, it contains the information of whether or 1790/// not AddrInst has to be folded into the addressing mode on success. 1791/// If \p MovedAway == true, \p AddrInst will not be part of the addressing 1792/// because it has been moved away. 1793/// Thus AddrInst must not be added in the matched instructions. 1794/// This state can happen when AddrInst is a sext, since it may be moved away. 1795/// Therefore, AddrInst may not be valid when MovedAway is true and it must 1796/// not be referenced anymore. 1797bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, 1798 unsigned Depth, 1799 bool *MovedAway) { 1800 // Avoid exponential behavior on extremely deep expression trees. 1801 if (Depth >= 5) return false; 1802 1803 // By default, all matched instructions stay in place. 1804 if (MovedAway) 1805 *MovedAway = false; 1806 1807 switch (Opcode) { 1808 case Instruction::PtrToInt: 1809 // PtrToInt is always a noop, as we know that the int type is pointer sized. 1810 return MatchAddr(AddrInst->getOperand(0), Depth); 1811 case Instruction::IntToPtr: 1812 // This inttoptr is a no-op if the integer type is pointer sized. 1813 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == 1814 TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace())) 1815 return MatchAddr(AddrInst->getOperand(0), Depth); 1816 return false; 1817 case Instruction::BitCast: 1818 // BitCast is always a noop, and we can handle it as long as it is 1819 // int->int or pointer->pointer (we don't want int<->fp or something). 1820 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 1821 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 1822 // Don't touch identity bitcasts. These were probably put here by LSR, 1823 // and we don't want to mess around with them. Assume it knows what it 1824 // is doing. 1825 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 1826 return MatchAddr(AddrInst->getOperand(0), Depth); 1827 return false; 1828 case Instruction::Add: { 1829 // Check to see if we can merge in the RHS then the LHS. If so, we win. 1830 ExtAddrMode BackupAddrMode = AddrMode; 1831 unsigned OldSize = AddrModeInsts.size(); 1832 // Start a transaction at this point. 1833 // The LHS may match but not the RHS. 1834 // Therefore, we need a higher level restoration point to undo partially 1835 // matched operation. 1836 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 1837 TPT.getRestorationPoint(); 1838 1839 if (MatchAddr(AddrInst->getOperand(1), Depth+1) && 1840 MatchAddr(AddrInst->getOperand(0), Depth+1)) 1841 return true; 1842 1843 // Restore the old addr mode info. 1844 AddrMode = BackupAddrMode; 1845 AddrModeInsts.resize(OldSize); 1846 TPT.rollback(LastKnownGood); 1847 1848 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 1849 if (MatchAddr(AddrInst->getOperand(0), Depth+1) && 1850 MatchAddr(AddrInst->getOperand(1), Depth+1)) 1851 return true; 1852 1853 // Otherwise we definitely can't merge the ADD in. 1854 AddrMode = BackupAddrMode; 1855 AddrModeInsts.resize(OldSize); 1856 TPT.rollback(LastKnownGood); 1857 break; 1858 } 1859 //case Instruction::Or: 1860 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 1861 //break; 1862 case Instruction::Mul: 1863 case Instruction::Shl: { 1864 // Can only handle X*C and X << C. 1865 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 1866 if (!RHS) return false; 1867 int64_t Scale = RHS->getSExtValue(); 1868 if (Opcode == Instruction::Shl) 1869 Scale = 1LL << Scale; 1870 1871 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); 1872 } 1873 case Instruction::GetElementPtr: { 1874 // Scan the GEP. We check it if it contains constant offsets and at most 1875 // one variable offset. 1876 int VariableOperand = -1; 1877 unsigned VariableScale = 0; 1878 1879 int64_t ConstantOffset = 0; 1880 const DataLayout *TD = TLI.getDataLayout(); 1881 gep_type_iterator GTI = gep_type_begin(AddrInst); 1882 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 1883 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1884 const StructLayout *SL = TD->getStructLayout(STy); 1885 unsigned Idx = 1886 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 1887 ConstantOffset += SL->getElementOffset(Idx); 1888 } else { 1889 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType()); 1890 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 1891 ConstantOffset += CI->getSExtValue()*TypeSize; 1892 } else if (TypeSize) { // Scales of zero don't do anything. 1893 // We only allow one variable index at the moment. 1894 if (VariableOperand != -1) 1895 return false; 1896 1897 // Remember the variable index. 1898 VariableOperand = i; 1899 VariableScale = TypeSize; 1900 } 1901 } 1902 } 1903 1904 // A common case is for the GEP to only do a constant offset. In this case, 1905 // just add it to the disp field and check validity. 1906 if (VariableOperand == -1) { 1907 AddrMode.BaseOffs += ConstantOffset; 1908 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ 1909 // Check to see if we can fold the base pointer in too. 1910 if (MatchAddr(AddrInst->getOperand(0), Depth+1)) 1911 return true; 1912 } 1913 AddrMode.BaseOffs -= ConstantOffset; 1914 return false; 1915 } 1916 1917 // Save the valid addressing mode in case we can't match. 1918 ExtAddrMode BackupAddrMode = AddrMode; 1919 unsigned OldSize = AddrModeInsts.size(); 1920 1921 // See if the scale and offset amount is valid for this target. 1922 AddrMode.BaseOffs += ConstantOffset; 1923 1924 // Match the base operand of the GEP. 1925 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) { 1926 // If it couldn't be matched, just stuff the value in a register. 1927 if (AddrMode.HasBaseReg) { 1928 AddrMode = BackupAddrMode; 1929 AddrModeInsts.resize(OldSize); 1930 return false; 1931 } 1932 AddrMode.HasBaseReg = true; 1933 AddrMode.BaseReg = AddrInst->getOperand(0); 1934 } 1935 1936 // Match the remaining variable portion of the GEP. 1937 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 1938 Depth)) { 1939 // If it couldn't be matched, try stuffing the base into a register 1940 // instead of matching it, and retrying the match of the scale. 1941 AddrMode = BackupAddrMode; 1942 AddrModeInsts.resize(OldSize); 1943 if (AddrMode.HasBaseReg) 1944 return false; 1945 AddrMode.HasBaseReg = true; 1946 AddrMode.BaseReg = AddrInst->getOperand(0); 1947 AddrMode.BaseOffs += ConstantOffset; 1948 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), 1949 VariableScale, Depth)) { 1950 // If even that didn't work, bail. 1951 AddrMode = BackupAddrMode; 1952 AddrModeInsts.resize(OldSize); 1953 return false; 1954 } 1955 } 1956 1957 return true; 1958 } 1959 case Instruction::SExt: { 1960 // Try to move this sext out of the way of the addressing mode. 1961 Instruction *SExt = cast<Instruction>(AddrInst); 1962 // Ask for a method for doing so. 1963 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 1964 SExt, InsertedTruncs, TLI, PromotedInsts); 1965 if (!TPH) 1966 return false; 1967 1968 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 1969 TPT.getRestorationPoint(); 1970 unsigned CreatedInsts = 0; 1971 Value *PromotedOperand = TPH(SExt, TPT, PromotedInsts, CreatedInsts); 1972 // SExt has been moved away. 1973 // Thus either it will be rematched later in the recursive calls or it is 1974 // gone. Anyway, we must not fold it into the addressing mode at this point. 1975 // E.g., 1976 // op = add opnd, 1 1977 // idx = sext op 1978 // addr = gep base, idx 1979 // is now: 1980 // promotedOpnd = sext opnd <- no match here 1981 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 1982 // addr = gep base, op <- match 1983 if (MovedAway) 1984 *MovedAway = true; 1985 1986 assert(PromotedOperand && 1987 "TypePromotionHelper should have filtered out those cases"); 1988 1989 ExtAddrMode BackupAddrMode = AddrMode; 1990 unsigned OldSize = AddrModeInsts.size(); 1991 1992 if (!MatchAddr(PromotedOperand, Depth) || 1993 !IsPromotionProfitable(AddrModeInsts.size(), OldSize + CreatedInsts, 1994 PromotedOperand)) { 1995 AddrMode = BackupAddrMode; 1996 AddrModeInsts.resize(OldSize); 1997 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 1998 TPT.rollback(LastKnownGood); 1999 return false; 2000 } 2001 return true; 2002 } 2003 } 2004 return false; 2005} 2006 2007/// MatchAddr - If we can, try to add the value of 'Addr' into the current 2008/// addressing mode. If Addr can't be added to AddrMode this returns false and 2009/// leaves AddrMode unmodified. This assumes that Addr is either a pointer type 2010/// or intptr_t for the target. 2011/// 2012bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { 2013 // Start a transaction at this point that we will rollback if the matching 2014 // fails. 2015 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2016 TPT.getRestorationPoint(); 2017 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 2018 // Fold in immediates if legal for the target. 2019 AddrMode.BaseOffs += CI->getSExtValue(); 2020 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2021 return true; 2022 AddrMode.BaseOffs -= CI->getSExtValue(); 2023 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 2024 // If this is a global variable, try to fold it into the addressing mode. 2025 if (AddrMode.BaseGV == 0) { 2026 AddrMode.BaseGV = GV; 2027 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2028 return true; 2029 AddrMode.BaseGV = 0; 2030 } 2031 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 2032 ExtAddrMode BackupAddrMode = AddrMode; 2033 unsigned OldSize = AddrModeInsts.size(); 2034 2035 // Check to see if it is possible to fold this operation. 2036 bool MovedAway = false; 2037 if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 2038 // This instruction may have been move away. If so, there is nothing 2039 // to check here. 2040 if (MovedAway) 2041 return true; 2042 // Okay, it's possible to fold this. Check to see if it is actually 2043 // *profitable* to do so. We use a simple cost model to avoid increasing 2044 // register pressure too much. 2045 if (I->hasOneUse() || 2046 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 2047 AddrModeInsts.push_back(I); 2048 return true; 2049 } 2050 2051 // It isn't profitable to do this, roll back. 2052 //cerr << "NOT FOLDING: " << *I; 2053 AddrMode = BackupAddrMode; 2054 AddrModeInsts.resize(OldSize); 2055 TPT.rollback(LastKnownGood); 2056 } 2057 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 2058 if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) 2059 return true; 2060 TPT.rollback(LastKnownGood); 2061 } else if (isa<ConstantPointerNull>(Addr)) { 2062 // Null pointer gets folded without affecting the addressing mode. 2063 return true; 2064 } 2065 2066 // Worse case, the target should support [reg] addressing modes. :) 2067 if (!AddrMode.HasBaseReg) { 2068 AddrMode.HasBaseReg = true; 2069 AddrMode.BaseReg = Addr; 2070 // Still check for legality in case the target supports [imm] but not [i+r]. 2071 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2072 return true; 2073 AddrMode.HasBaseReg = false; 2074 AddrMode.BaseReg = 0; 2075 } 2076 2077 // If the base register is already taken, see if we can do [r+r]. 2078 if (AddrMode.Scale == 0) { 2079 AddrMode.Scale = 1; 2080 AddrMode.ScaledReg = Addr; 2081 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2082 return true; 2083 AddrMode.Scale = 0; 2084 AddrMode.ScaledReg = 0; 2085 } 2086 // Couldn't match. 2087 TPT.rollback(LastKnownGood); 2088 return false; 2089} 2090 2091/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified 2092/// inline asm call are due to memory operands. If so, return true, otherwise 2093/// return false. 2094static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 2095 const TargetLowering &TLI) { 2096 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI)); 2097 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 2098 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 2099 2100 // Compute the constraint code and ConstraintType to use. 2101 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 2102 2103 // If this asm operand is our Value*, and if it isn't an indirect memory 2104 // operand, we can't fold it! 2105 if (OpInfo.CallOperandVal == OpVal && 2106 (OpInfo.ConstraintType != TargetLowering::C_Memory || 2107 !OpInfo.isIndirect)) 2108 return false; 2109 } 2110 2111 return true; 2112} 2113 2114/// FindAllMemoryUses - Recursively walk all the uses of I until we find a 2115/// memory use. If we find an obviously non-foldable instruction, return true. 2116/// Add the ultimately found memory instructions to MemoryUses. 2117static bool FindAllMemoryUses(Instruction *I, 2118 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses, 2119 SmallPtrSet<Instruction*, 16> &ConsideredInsts, 2120 const TargetLowering &TLI) { 2121 // If we already considered this instruction, we're done. 2122 if (!ConsideredInsts.insert(I)) 2123 return false; 2124 2125 // If this is an obviously unfoldable instruction, bail out. 2126 if (!MightBeFoldableInst(I)) 2127 return true; 2128 2129 // Loop over all the uses, recursively processing them. 2130 for (Use &U : I->uses()) { 2131 Instruction *UserI = cast<Instruction>(U.getUser()); 2132 2133 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 2134 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 2135 continue; 2136 } 2137 2138 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 2139 unsigned opNo = U.getOperandNo(); 2140 if (opNo == 0) return true; // Storing addr, not into addr. 2141 MemoryUses.push_back(std::make_pair(SI, opNo)); 2142 continue; 2143 } 2144 2145 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 2146 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 2147 if (!IA) return true; 2148 2149 // If this is a memory operand, we're cool, otherwise bail out. 2150 if (!IsOperandAMemoryOperand(CI, IA, I, TLI)) 2151 return true; 2152 continue; 2153 } 2154 2155 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI)) 2156 return true; 2157 } 2158 2159 return false; 2160} 2161 2162/// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at 2163/// the use site that we're folding it into. If so, there is no cost to 2164/// include it in the addressing mode. KnownLive1 and KnownLive2 are two values 2165/// that we know are live at the instruction already. 2166bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 2167 Value *KnownLive2) { 2168 // If Val is either of the known-live values, we know it is live! 2169 if (Val == 0 || Val == KnownLive1 || Val == KnownLive2) 2170 return true; 2171 2172 // All values other than instructions and arguments (e.g. constants) are live. 2173 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 2174 2175 // If Val is a constant sized alloca in the entry block, it is live, this is 2176 // true because it is just a reference to the stack/frame pointer, which is 2177 // live for the whole function. 2178 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 2179 if (AI->isStaticAlloca()) 2180 return true; 2181 2182 // Check to see if this value is already used in the memory instruction's 2183 // block. If so, it's already live into the block at the very least, so we 2184 // can reasonably fold it. 2185 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 2186} 2187 2188/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing 2189/// mode of the machine to fold the specified instruction into a load or store 2190/// that ultimately uses it. However, the specified instruction has multiple 2191/// uses. Given this, it may actually increase register pressure to fold it 2192/// into the load. For example, consider this code: 2193/// 2194/// X = ... 2195/// Y = X+1 2196/// use(Y) -> nonload/store 2197/// Z = Y+1 2198/// load Z 2199/// 2200/// In this case, Y has multiple uses, and can be folded into the load of Z 2201/// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 2202/// be live at the use(Y) line. If we don't fold Y into load Z, we use one 2203/// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 2204/// number of computations either. 2205/// 2206/// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 2207/// X was live across 'load Z' for other reasons, we actually *would* want to 2208/// fold the addressing mode in the Z case. This would make Y die earlier. 2209bool AddressingModeMatcher:: 2210IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 2211 ExtAddrMode &AMAfter) { 2212 if (IgnoreProfitability) return true; 2213 2214 // AMBefore is the addressing mode before this instruction was folded into it, 2215 // and AMAfter is the addressing mode after the instruction was folded. Get 2216 // the set of registers referenced by AMAfter and subtract out those 2217 // referenced by AMBefore: this is the set of values which folding in this 2218 // address extends the lifetime of. 2219 // 2220 // Note that there are only two potential values being referenced here, 2221 // BaseReg and ScaleReg (global addresses are always available, as are any 2222 // folded immediates). 2223 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 2224 2225 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 2226 // lifetime wasn't extended by adding this instruction. 2227 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 2228 BaseReg = 0; 2229 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 2230 ScaledReg = 0; 2231 2232 // If folding this instruction (and it's subexprs) didn't extend any live 2233 // ranges, we're ok with it. 2234 if (BaseReg == 0 && ScaledReg == 0) 2235 return true; 2236 2237 // If all uses of this instruction are ultimately load/store/inlineasm's, 2238 // check to see if their addressing modes will include this instruction. If 2239 // so, we can fold it into all uses, so it doesn't matter if it has multiple 2240 // uses. 2241 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 2242 SmallPtrSet<Instruction*, 16> ConsideredInsts; 2243 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI)) 2244 return false; // Has a non-memory, non-foldable use! 2245 2246 // Now that we know that all uses of this instruction are part of a chain of 2247 // computation involving only operations that could theoretically be folded 2248 // into a memory use, loop over each of these uses and see if they could 2249 // *actually* fold the instruction. 2250 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 2251 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 2252 Instruction *User = MemoryUses[i].first; 2253 unsigned OpNo = MemoryUses[i].second; 2254 2255 // Get the access type of this use. If the use isn't a pointer, we don't 2256 // know what it accesses. 2257 Value *Address = User->getOperand(OpNo); 2258 if (!Address->getType()->isPointerTy()) 2259 return false; 2260 Type *AddressAccessTy = Address->getType()->getPointerElementType(); 2261 2262 // Do a match against the root of this address, ignoring profitability. This 2263 // will tell us if the addressing mode for the memory operation will 2264 // *actually* cover the shared instruction. 2265 ExtAddrMode Result; 2266 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2267 TPT.getRestorationPoint(); 2268 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy, 2269 MemoryInst, Result, InsertedTruncs, 2270 PromotedInsts, TPT); 2271 Matcher.IgnoreProfitability = true; 2272 bool Success = Matcher.MatchAddr(Address, 0); 2273 (void)Success; assert(Success && "Couldn't select *anything*?"); 2274 2275 // The match was to check the profitability, the changes made are not 2276 // part of the original matcher. Therefore, they should be dropped 2277 // otherwise the original matcher will not present the right state. 2278 TPT.rollback(LastKnownGood); 2279 2280 // If the match didn't cover I, then it won't be shared by it. 2281 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 2282 I) == MatchedAddrModeInsts.end()) 2283 return false; 2284 2285 MatchedAddrModeInsts.clear(); 2286 } 2287 2288 return true; 2289} 2290 2291} // end anonymous namespace 2292 2293/// IsNonLocalValue - Return true if the specified values are defined in a 2294/// different basic block than BB. 2295static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 2296 if (Instruction *I = dyn_cast<Instruction>(V)) 2297 return I->getParent() != BB; 2298 return false; 2299} 2300 2301/// OptimizeMemoryInst - Load and Store Instructions often have 2302/// addressing modes that can do significant amounts of computation. As such, 2303/// instruction selection will try to get the load or store to do as much 2304/// computation as possible for the program. The problem is that isel can only 2305/// see within a single block. As such, we sink as much legal addressing mode 2306/// stuff into the block as possible. 2307/// 2308/// This method is used to optimize both load/store and inline asms with memory 2309/// operands. 2310bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 2311 Type *AccessTy) { 2312 Value *Repl = Addr; 2313 2314 // Try to collapse single-value PHI nodes. This is necessary to undo 2315 // unprofitable PRE transformations. 2316 SmallVector<Value*, 8> worklist; 2317 SmallPtrSet<Value*, 16> Visited; 2318 worklist.push_back(Addr); 2319 2320 // Use a worklist to iteratively look through PHI nodes, and ensure that 2321 // the addressing mode obtained from the non-PHI roots of the graph 2322 // are equivalent. 2323 Value *Consensus = 0; 2324 unsigned NumUsesConsensus = 0; 2325 bool IsNumUsesConsensusValid = false; 2326 SmallVector<Instruction*, 16> AddrModeInsts; 2327 ExtAddrMode AddrMode; 2328 TypePromotionTransaction TPT; 2329 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2330 TPT.getRestorationPoint(); 2331 while (!worklist.empty()) { 2332 Value *V = worklist.back(); 2333 worklist.pop_back(); 2334 2335 // Break use-def graph loops. 2336 if (!Visited.insert(V)) { 2337 Consensus = 0; 2338 break; 2339 } 2340 2341 // For a PHI node, push all of its incoming values. 2342 if (PHINode *P = dyn_cast<PHINode>(V)) { 2343 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) 2344 worklist.push_back(P->getIncomingValue(i)); 2345 continue; 2346 } 2347 2348 // For non-PHIs, determine the addressing mode being computed. 2349 SmallVector<Instruction*, 16> NewAddrModeInsts; 2350 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 2351 V, AccessTy, MemoryInst, NewAddrModeInsts, *TLI, InsertedTruncsSet, 2352 PromotedInsts, TPT); 2353 2354 // This check is broken into two cases with very similar code to avoid using 2355 // getNumUses() as much as possible. Some values have a lot of uses, so 2356 // calling getNumUses() unconditionally caused a significant compile-time 2357 // regression. 2358 if (!Consensus) { 2359 Consensus = V; 2360 AddrMode = NewAddrMode; 2361 AddrModeInsts = NewAddrModeInsts; 2362 continue; 2363 } else if (NewAddrMode == AddrMode) { 2364 if (!IsNumUsesConsensusValid) { 2365 NumUsesConsensus = Consensus->getNumUses(); 2366 IsNumUsesConsensusValid = true; 2367 } 2368 2369 // Ensure that the obtained addressing mode is equivalent to that obtained 2370 // for all other roots of the PHI traversal. Also, when choosing one 2371 // such root as representative, select the one with the most uses in order 2372 // to keep the cost modeling heuristics in AddressingModeMatcher 2373 // applicable. 2374 unsigned NumUses = V->getNumUses(); 2375 if (NumUses > NumUsesConsensus) { 2376 Consensus = V; 2377 NumUsesConsensus = NumUses; 2378 AddrModeInsts = NewAddrModeInsts; 2379 } 2380 continue; 2381 } 2382 2383 Consensus = 0; 2384 break; 2385 } 2386 2387 // If the addressing mode couldn't be determined, or if multiple different 2388 // ones were determined, bail out now. 2389 if (!Consensus) { 2390 TPT.rollback(LastKnownGood); 2391 return false; 2392 } 2393 TPT.commit(); 2394 2395 // Check to see if any of the instructions supersumed by this addr mode are 2396 // non-local to I's BB. 2397 bool AnyNonLocal = false; 2398 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 2399 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 2400 AnyNonLocal = true; 2401 break; 2402 } 2403 } 2404 2405 // If all the instructions matched are already in this BB, don't do anything. 2406 if (!AnyNonLocal) { 2407 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 2408 return false; 2409 } 2410 2411 // Insert this computation right after this user. Since our caller is 2412 // scanning from the top of the BB to the bottom, reuse of the expr are 2413 // guaranteed to happen later. 2414 IRBuilder<> Builder(MemoryInst); 2415 2416 // Now that we determined the addressing expression we want to use and know 2417 // that we have to sink it into this block. Check to see if we have already 2418 // done this for some other load/store instr in this block. If so, reuse the 2419 // computation. 2420 Value *&SunkAddr = SunkAddrs[Addr]; 2421 if (SunkAddr) { 2422 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 2423 << *MemoryInst); 2424 if (SunkAddr->getType() != Addr->getType()) 2425 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 2426 } else { 2427 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 2428 << *MemoryInst); 2429 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 2430 Value *Result = 0; 2431 2432 // Start with the base register. Do this first so that subsequent address 2433 // matching finds it last, which will prevent it from trying to match it 2434 // as the scaled value in case it happens to be a mul. That would be 2435 // problematic if we've sunk a different mul for the scale, because then 2436 // we'd end up sinking both muls. 2437 if (AddrMode.BaseReg) { 2438 Value *V = AddrMode.BaseReg; 2439 if (V->getType()->isPointerTy()) 2440 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 2441 if (V->getType() != IntPtrTy) 2442 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 2443 Result = V; 2444 } 2445 2446 // Add the scale value. 2447 if (AddrMode.Scale) { 2448 Value *V = AddrMode.ScaledReg; 2449 if (V->getType() == IntPtrTy) { 2450 // done. 2451 } else if (V->getType()->isPointerTy()) { 2452 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 2453 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 2454 cast<IntegerType>(V->getType())->getBitWidth()) { 2455 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 2456 } else { 2457 // It is only safe to sign extend the BaseReg if we know that the math 2458 // required to create it did not overflow before we extend it. Since 2459 // the original IR value was tossed in favor of a constant back when 2460 // the AddrMode was created we need to bail out gracefully if widths 2461 // do not match instead of extending it. 2462 if (Result != AddrMode.BaseReg) 2463 cast<Instruction>(Result)->eraseFromParent(); 2464 return false; 2465 } 2466 if (AddrMode.Scale != 1) 2467 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 2468 "sunkaddr"); 2469 if (Result) 2470 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 2471 else 2472 Result = V; 2473 } 2474 2475 // Add in the BaseGV if present. 2476 if (AddrMode.BaseGV) { 2477 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 2478 if (Result) 2479 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 2480 else 2481 Result = V; 2482 } 2483 2484 // Add in the Base Offset if present. 2485 if (AddrMode.BaseOffs) { 2486 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 2487 if (Result) 2488 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 2489 else 2490 Result = V; 2491 } 2492 2493 if (Result == 0) 2494 SunkAddr = Constant::getNullValue(Addr->getType()); 2495 else 2496 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 2497 } 2498 2499 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 2500 2501 // If we have no uses, recursively delete the value and all dead instructions 2502 // using it. 2503 if (Repl->use_empty()) { 2504 // This can cause recursive deletion, which can invalidate our iterator. 2505 // Use a WeakVH to hold onto it in case this happens. 2506 WeakVH IterHandle(CurInstIterator); 2507 BasicBlock *BB = CurInstIterator->getParent(); 2508 2509 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 2510 2511 if (IterHandle != CurInstIterator) { 2512 // If the iterator instruction was recursively deleted, start over at the 2513 // start of the block. 2514 CurInstIterator = BB->begin(); 2515 SunkAddrs.clear(); 2516 } 2517 } 2518 ++NumMemoryInsts; 2519 return true; 2520} 2521 2522/// OptimizeInlineAsmInst - If there are any memory operands, use 2523/// OptimizeMemoryInst to sink their address computing into the block when 2524/// possible / profitable. 2525bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { 2526 bool MadeChange = false; 2527 2528 TargetLowering::AsmOperandInfoVector 2529 TargetConstraints = TLI->ParseConstraints(CS); 2530 unsigned ArgNo = 0; 2531 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 2532 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 2533 2534 // Compute the constraint code and ConstraintType to use. 2535 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 2536 2537 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 2538 OpInfo.isIndirect) { 2539 Value *OpVal = CS->getArgOperand(ArgNo++); 2540 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType()); 2541 } else if (OpInfo.Type == InlineAsm::isInput) 2542 ArgNo++; 2543 } 2544 2545 return MadeChange; 2546} 2547 2548/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 2549/// basic block as the load, unless conditions are unfavorable. This allows 2550/// SelectionDAG to fold the extend into the load. 2551/// 2552bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 2553 // Look for a load being extended. 2554 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 2555 if (!LI) return false; 2556 2557 // If they're already in the same block, there's nothing to do. 2558 if (LI->getParent() == I->getParent()) 2559 return false; 2560 2561 // If the load has other users and the truncate is not free, this probably 2562 // isn't worthwhile. 2563 if (!LI->hasOneUse() && 2564 TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) || 2565 !TLI->isTypeLegal(TLI->getValueType(I->getType()))) && 2566 !TLI->isTruncateFree(I->getType(), LI->getType())) 2567 return false; 2568 2569 // Check whether the target supports casts folded into loads. 2570 unsigned LType; 2571 if (isa<ZExtInst>(I)) 2572 LType = ISD::ZEXTLOAD; 2573 else { 2574 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 2575 LType = ISD::SEXTLOAD; 2576 } 2577 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 2578 return false; 2579 2580 // Move the extend into the same block as the load, so that SelectionDAG 2581 // can fold it. 2582 I->removeFromParent(); 2583 I->insertAfter(LI); 2584 ++NumExtsMoved; 2585 return true; 2586} 2587 2588bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 2589 BasicBlock *DefBB = I->getParent(); 2590 2591 // If the result of a {s|z}ext and its source are both live out, rewrite all 2592 // other uses of the source with result of extension. 2593 Value *Src = I->getOperand(0); 2594 if (Src->hasOneUse()) 2595 return false; 2596 2597 // Only do this xform if truncating is free. 2598 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 2599 return false; 2600 2601 // Only safe to perform the optimization if the source is also defined in 2602 // this block. 2603 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 2604 return false; 2605 2606 bool DefIsLiveOut = false; 2607 for (User *U : I->users()) { 2608 Instruction *UI = cast<Instruction>(U); 2609 2610 // Figure out which BB this ext is used in. 2611 BasicBlock *UserBB = UI->getParent(); 2612 if (UserBB == DefBB) continue; 2613 DefIsLiveOut = true; 2614 break; 2615 } 2616 if (!DefIsLiveOut) 2617 return false; 2618 2619 // Make sure none of the uses are PHI nodes. 2620 for (User *U : Src->users()) { 2621 Instruction *UI = cast<Instruction>(U); 2622 BasicBlock *UserBB = UI->getParent(); 2623 if (UserBB == DefBB) continue; 2624 // Be conservative. We don't want this xform to end up introducing 2625 // reloads just before load / store instructions. 2626 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 2627 return false; 2628 } 2629 2630 // InsertedTruncs - Only insert one trunc in each block once. 2631 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 2632 2633 bool MadeChange = false; 2634 for (Use &U : Src->uses()) { 2635 Instruction *User = cast<Instruction>(U.getUser()); 2636 2637 // Figure out which BB this ext is used in. 2638 BasicBlock *UserBB = User->getParent(); 2639 if (UserBB == DefBB) continue; 2640 2641 // Both src and def are live in this block. Rewrite the use. 2642 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 2643 2644 if (!InsertedTrunc) { 2645 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 2646 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 2647 InsertedTruncsSet.insert(InsertedTrunc); 2648 } 2649 2650 // Replace a use of the {s|z}ext source with a use of the result. 2651 U = InsertedTrunc; 2652 ++NumExtUses; 2653 MadeChange = true; 2654 } 2655 2656 return MadeChange; 2657} 2658 2659/// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be 2660/// turned into an explicit branch. 2661static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { 2662 // FIXME: This should use the same heuristics as IfConversion to determine 2663 // whether a select is better represented as a branch. This requires that 2664 // branch probability metadata is preserved for the select, which is not the 2665 // case currently. 2666 2667 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2668 2669 // If the branch is predicted right, an out of order CPU can avoid blocking on 2670 // the compare. Emit cmovs on compares with a memory operand as branches to 2671 // avoid stalls on the load from memory. If the compare has more than one use 2672 // there's probably another cmov or setcc around so it's not worth emitting a 2673 // branch. 2674 if (!Cmp) 2675 return false; 2676 2677 Value *CmpOp0 = Cmp->getOperand(0); 2678 Value *CmpOp1 = Cmp->getOperand(1); 2679 2680 // We check that the memory operand has one use to avoid uses of the loaded 2681 // value directly after the compare, making branches unprofitable. 2682 return Cmp->hasOneUse() && 2683 ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) || 2684 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse())); 2685} 2686 2687 2688/// If we have a SelectInst that will likely profit from branch prediction, 2689/// turn it into a branch. 2690bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) { 2691 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 2692 2693 // Can we convert the 'select' to CF ? 2694 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 2695 return false; 2696 2697 TargetLowering::SelectSupportKind SelectKind; 2698 if (VectorCond) 2699 SelectKind = TargetLowering::VectorMaskSelect; 2700 else if (SI->getType()->isVectorTy()) 2701 SelectKind = TargetLowering::ScalarCondVectorVal; 2702 else 2703 SelectKind = TargetLowering::ScalarValSelect; 2704 2705 // Do we have efficient codegen support for this kind of 'selects' ? 2706 if (TLI->isSelectSupported(SelectKind)) { 2707 // We have efficient codegen support for the select instruction. 2708 // Check if it is profitable to keep this 'select'. 2709 if (!TLI->isPredictableSelectExpensive() || 2710 !isFormingBranchFromSelectProfitable(SI)) 2711 return false; 2712 } 2713 2714 ModifiedDT = true; 2715 2716 // First, we split the block containing the select into 2 blocks. 2717 BasicBlock *StartBlock = SI->getParent(); 2718 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 2719 BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 2720 2721 // Create a new block serving as the landing pad for the branch. 2722 BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid", 2723 NextBlock->getParent(), NextBlock); 2724 2725 // Move the unconditional branch from the block with the select in it into our 2726 // landing pad block. 2727 StartBlock->getTerminator()->eraseFromParent(); 2728 BranchInst::Create(NextBlock, SmallBlock); 2729 2730 // Insert the real conditional branch based on the original condition. 2731 BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI); 2732 2733 // The select itself is replaced with a PHI Node. 2734 PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin()); 2735 PN->takeName(SI); 2736 PN->addIncoming(SI->getTrueValue(), StartBlock); 2737 PN->addIncoming(SI->getFalseValue(), SmallBlock); 2738 SI->replaceAllUsesWith(PN); 2739 SI->eraseFromParent(); 2740 2741 // Instruct OptimizeBlock to skip to the next block. 2742 CurInstIterator = StartBlock->end(); 2743 ++NumSelectsExpanded; 2744 return true; 2745} 2746 2747static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 2748 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 2749 int SplatElem = -1; 2750 for (unsigned i = 0; i < Mask.size(); ++i) { 2751 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 2752 return false; 2753 SplatElem = Mask[i]; 2754 } 2755 2756 return true; 2757} 2758 2759/// Some targets have expensive vector shifts if the lanes aren't all the same 2760/// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 2761/// it's often worth sinking a shufflevector splat down to its use so that 2762/// codegen can spot all lanes are identical. 2763bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 2764 BasicBlock *DefBB = SVI->getParent(); 2765 2766 // Only do this xform if variable vector shifts are particularly expensive. 2767 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 2768 return false; 2769 2770 // We only expect better codegen by sinking a shuffle if we can recognise a 2771 // constant splat. 2772 if (!isBroadcastShuffle(SVI)) 2773 return false; 2774 2775 // InsertedShuffles - Only insert a shuffle in each block once. 2776 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 2777 2778 bool MadeChange = false; 2779 for (User *U : SVI->users()) { 2780 Instruction *UI = cast<Instruction>(U); 2781 2782 // Figure out which BB this ext is used in. 2783 BasicBlock *UserBB = UI->getParent(); 2784 if (UserBB == DefBB) continue; 2785 2786 // For now only apply this when the splat is used by a shift instruction. 2787 if (!UI->isShift()) continue; 2788 2789 // Everything checks out, sink the shuffle if the user's block doesn't 2790 // already have a copy. 2791 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 2792 2793 if (!InsertedShuffle) { 2794 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 2795 InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0), 2796 SVI->getOperand(1), 2797 SVI->getOperand(2), "", InsertPt); 2798 } 2799 2800 UI->replaceUsesOfWith(SVI, InsertedShuffle); 2801 MadeChange = true; 2802 } 2803 2804 // If we removed all uses, nuke the shuffle. 2805 if (SVI->use_empty()) { 2806 SVI->eraseFromParent(); 2807 MadeChange = true; 2808 } 2809 2810 return MadeChange; 2811} 2812 2813bool CodeGenPrepare::OptimizeInst(Instruction *I) { 2814 if (PHINode *P = dyn_cast<PHINode>(I)) { 2815 // It is possible for very late stage optimizations (such as SimplifyCFG) 2816 // to introduce PHI nodes too late to be cleaned up. If we detect such a 2817 // trivial PHI, go ahead and zap it here. 2818 if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : 0, 2819 TLInfo, DT)) { 2820 P->replaceAllUsesWith(V); 2821 P->eraseFromParent(); 2822 ++NumPHIsElim; 2823 return true; 2824 } 2825 return false; 2826 } 2827 2828 if (CastInst *CI = dyn_cast<CastInst>(I)) { 2829 // If the source of the cast is a constant, then this should have 2830 // already been constant folded. The only reason NOT to constant fold 2831 // it is if something (e.g. LSR) was careful to place the constant 2832 // evaluation in a block other than then one that uses it (e.g. to hoist 2833 // the address of globals out of a loop). If this is the case, we don't 2834 // want to forward-subst the cast. 2835 if (isa<Constant>(CI->getOperand(0))) 2836 return false; 2837 2838 if (TLI && OptimizeNoopCopyExpression(CI, *TLI)) 2839 return true; 2840 2841 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 2842 /// Sink a zext or sext into its user blocks if the target type doesn't 2843 /// fit in one register 2844 if (TLI && TLI->getTypeAction(CI->getContext(), 2845 TLI->getValueType(CI->getType())) == 2846 TargetLowering::TypeExpandInteger) { 2847 return SinkCast(CI); 2848 } else { 2849 bool MadeChange = MoveExtToFormExtLoad(I); 2850 return MadeChange | OptimizeExtUses(I); 2851 } 2852 } 2853 return false; 2854 } 2855 2856 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 2857 if (!TLI || !TLI->hasMultipleConditionRegisters()) 2858 return OptimizeCmpExpression(CI); 2859 2860 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 2861 if (TLI) 2862 return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); 2863 return false; 2864 } 2865 2866 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 2867 if (TLI) 2868 return OptimizeMemoryInst(I, SI->getOperand(1), 2869 SI->getOperand(0)->getType()); 2870 return false; 2871 } 2872 2873 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 2874 if (GEPI->hasAllZeroIndices()) { 2875 /// The GEP operand must be a pointer, so must its result -> BitCast 2876 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 2877 GEPI->getName(), GEPI); 2878 GEPI->replaceAllUsesWith(NC); 2879 GEPI->eraseFromParent(); 2880 ++NumGEPsElim; 2881 OptimizeInst(NC); 2882 return true; 2883 } 2884 return false; 2885 } 2886 2887 if (CallInst *CI = dyn_cast<CallInst>(I)) 2888 return OptimizeCallInst(CI); 2889 2890 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 2891 return OptimizeSelectInst(SI); 2892 2893 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 2894 return OptimizeShuffleVectorInst(SVI); 2895 2896 return false; 2897} 2898 2899// In this pass we look for GEP and cast instructions that are used 2900// across basic blocks and rewrite them to improve basic-block-at-a-time 2901// selection. 2902bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 2903 SunkAddrs.clear(); 2904 bool MadeChange = false; 2905 2906 CurInstIterator = BB.begin(); 2907 while (CurInstIterator != BB.end()) 2908 MadeChange |= OptimizeInst(CurInstIterator++); 2909 2910 MadeChange |= DupRetToEnableTailCallOpts(&BB); 2911 2912 return MadeChange; 2913} 2914 2915// llvm.dbg.value is far away from the value then iSel may not be able 2916// handle it properly. iSel will drop llvm.dbg.value if it can not 2917// find a node corresponding to the value. 2918bool CodeGenPrepare::PlaceDbgValues(Function &F) { 2919 bool MadeChange = false; 2920 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { 2921 Instruction *PrevNonDbgInst = NULL; 2922 for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) { 2923 Instruction *Insn = BI; ++BI; 2924 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 2925 if (!DVI) { 2926 PrevNonDbgInst = Insn; 2927 continue; 2928 } 2929 2930 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 2931 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 2932 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 2933 DVI->removeFromParent(); 2934 if (isa<PHINode>(VI)) 2935 DVI->insertBefore(VI->getParent()->getFirstInsertionPt()); 2936 else 2937 DVI->insertAfter(VI); 2938 MadeChange = true; 2939 ++NumDbgValueMoved; 2940 } 2941 } 2942 } 2943 return MadeChange; 2944} 2945 2946// If there is a sequence that branches based on comparing a single bit 2947// against zero that can be combined into a single instruction, and the 2948// target supports folding these into a single instruction, sink the 2949// mask and compare into the branch uses. Do this before OptimizeBlock -> 2950// OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 2951// searched for. 2952bool CodeGenPrepare::sinkAndCmp(Function &F) { 2953 if (!EnableAndCmpSinking) 2954 return false; 2955 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 2956 return false; 2957 bool MadeChange = false; 2958 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 2959 BasicBlock *BB = I++; 2960 2961 // Does this BB end with the following? 2962 // %andVal = and %val, #single-bit-set 2963 // %icmpVal = icmp %andResult, 0 2964 // br i1 %cmpVal label %dest1, label %dest2" 2965 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 2966 if (!Brcc || !Brcc->isConditional()) 2967 continue; 2968 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 2969 if (!Cmp || Cmp->getParent() != BB) 2970 continue; 2971 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 2972 if (!Zero || !Zero->isZero()) 2973 continue; 2974 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 2975 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 2976 continue; 2977 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 2978 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 2979 continue; 2980 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 2981 2982 // Push the "and; icmp" for any users that are conditional branches. 2983 // Since there can only be one branch use per BB, we don't need to keep 2984 // track of which BBs we insert into. 2985 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 2986 UI != E; ) { 2987 Use &TheUse = *UI; 2988 // Find brcc use. 2989 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 2990 ++UI; 2991 if (!BrccUser || !BrccUser->isConditional()) 2992 continue; 2993 BasicBlock *UserBB = BrccUser->getParent(); 2994 if (UserBB == BB) continue; 2995 DEBUG(dbgs() << "found Brcc use\n"); 2996 2997 // Sink the "and; icmp" to use. 2998 MadeChange = true; 2999 BinaryOperator *NewAnd = 3000 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 3001 BrccUser); 3002 CmpInst *NewCmp = 3003 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 3004 "", BrccUser); 3005 TheUse = NewCmp; 3006 ++NumAndCmpsMoved; 3007 DEBUG(BrccUser->getParent()->dump()); 3008 } 3009 } 3010 return MadeChange; 3011} 3012