Local.cpp revision a18c5748989d0b2889d076a2951be17ce61d4f69
1//===-- Local.cpp - Functions to perform local transformations ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This family of functions perform various local transformations to the 11// program. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Transforms/Utils/Local.h" 16#include "llvm/ADT/DenseMap.h" 17#include "llvm/ADT/STLExtras.h" 18#include "llvm/ADT/SmallPtrSet.h" 19#include "llvm/Analysis/Dominators.h" 20#include "llvm/Analysis/InstructionSimplify.h" 21#include "llvm/Analysis/MemoryBuiltins.h" 22#include "llvm/Analysis/ProfileInfo.h" 23#include "llvm/Analysis/ValueTracking.h" 24#include "llvm/DIBuilder.h" 25#include "llvm/DebugInfo.h" 26#include "llvm/IR/Constants.h" 27#include "llvm/IR/DataLayout.h" 28#include "llvm/IR/DerivedTypes.h" 29#include "llvm/IR/GlobalAlias.h" 30#include "llvm/IR/GlobalVariable.h" 31#include "llvm/IR/IRBuilder.h" 32#include "llvm/IR/Instructions.h" 33#include "llvm/IR/IntrinsicInst.h" 34#include "llvm/IR/Intrinsics.h" 35#include "llvm/IR/MDBuilder.h" 36#include "llvm/IR/Metadata.h" 37#include "llvm/IR/Operator.h" 38#include "llvm/Support/CFG.h" 39#include "llvm/Support/Debug.h" 40#include "llvm/Support/GetElementPtrTypeIterator.h" 41#include "llvm/Support/MathExtras.h" 42#include "llvm/Support/ValueHandle.h" 43#include "llvm/Support/raw_ostream.h" 44using namespace llvm; 45 46//===----------------------------------------------------------------------===// 47// Local constant propagation. 48// 49 50/// ConstantFoldTerminator - If a terminator instruction is predicated on a 51/// constant value, convert it into an unconditional branch to the constant 52/// destination. This is a nontrivial operation because the successors of this 53/// basic block must have their PHI nodes updated. 54/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 55/// conditions and indirectbr addresses this might make dead if 56/// DeleteDeadConditions is true. 57bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, 58 const TargetLibraryInfo *TLI) { 59 TerminatorInst *T = BB->getTerminator(); 60 IRBuilder<> Builder(T); 61 62 // Branch - See if we are conditional jumping on constant 63 if (BranchInst *BI = dyn_cast<BranchInst>(T)) { 64 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 65 BasicBlock *Dest1 = BI->getSuccessor(0); 66 BasicBlock *Dest2 = BI->getSuccessor(1); 67 68 if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 69 // Are we branching on constant? 70 // YES. Change to unconditional branch... 71 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 72 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 73 74 //cerr << "Function: " << T->getParent()->getParent() 75 // << "\nRemoving branch from " << T->getParent() 76 // << "\n\nTo: " << OldDest << endl; 77 78 // Let the basic block know that we are letting go of it. Based on this, 79 // it will adjust it's PHI nodes. 80 OldDest->removePredecessor(BB); 81 82 // Replace the conditional branch with an unconditional one. 83 Builder.CreateBr(Destination); 84 BI->eraseFromParent(); 85 return true; 86 } 87 88 if (Dest2 == Dest1) { // Conditional branch to same location? 89 // This branch matches something like this: 90 // br bool %cond, label %Dest, label %Dest 91 // and changes it into: br label %Dest 92 93 // Let the basic block know that we are letting go of one copy of it. 94 assert(BI->getParent() && "Terminator not inserted in block!"); 95 Dest1->removePredecessor(BI->getParent()); 96 97 // Replace the conditional branch with an unconditional one. 98 Builder.CreateBr(Dest1); 99 Value *Cond = BI->getCondition(); 100 BI->eraseFromParent(); 101 if (DeleteDeadConditions) 102 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 103 return true; 104 } 105 return false; 106 } 107 108 if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) { 109 // If we are switching on a constant, we can convert the switch into a 110 // single branch instruction! 111 ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition()); 112 BasicBlock *TheOnlyDest = SI->getDefaultDest(); 113 BasicBlock *DefaultDest = TheOnlyDest; 114 115 // Figure out which case it goes to. 116 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 117 i != e; ++i) { 118 // Found case matching a constant operand? 119 if (i.getCaseValue() == CI) { 120 TheOnlyDest = i.getCaseSuccessor(); 121 break; 122 } 123 124 // Check to see if this branch is going to the same place as the default 125 // dest. If so, eliminate it as an explicit compare. 126 if (i.getCaseSuccessor() == DefaultDest) { 127 MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); 128 // MD should have 2 + NumCases operands. 129 if (MD && MD->getNumOperands() == 2 + SI->getNumCases()) { 130 // Collect branch weights into a vector. 131 SmallVector<uint32_t, 8> Weights; 132 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; 133 ++MD_i) { 134 ConstantInt* CI = dyn_cast<ConstantInt>(MD->getOperand(MD_i)); 135 assert(CI); 136 Weights.push_back(CI->getValue().getZExtValue()); 137 } 138 // Merge weight of this case to the default weight. 139 unsigned idx = i.getCaseIndex(); 140 Weights[0] += Weights[idx+1]; 141 // Remove weight for this case. 142 std::swap(Weights[idx+1], Weights.back()); 143 Weights.pop_back(); 144 SI->setMetadata(LLVMContext::MD_prof, 145 MDBuilder(BB->getContext()). 146 createBranchWeights(Weights)); 147 } 148 // Remove this entry. 149 DefaultDest->removePredecessor(SI->getParent()); 150 SI->removeCase(i); 151 --i; --e; 152 continue; 153 } 154 155 // Otherwise, check to see if the switch only branches to one destination. 156 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 157 // destinations. 158 if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = 0; 159 } 160 161 if (CI && !TheOnlyDest) { 162 // Branching on a constant, but not any of the cases, go to the default 163 // successor. 164 TheOnlyDest = SI->getDefaultDest(); 165 } 166 167 // If we found a single destination that we can fold the switch into, do so 168 // now. 169 if (TheOnlyDest) { 170 // Insert the new branch. 171 Builder.CreateBr(TheOnlyDest); 172 BasicBlock *BB = SI->getParent(); 173 174 // Remove entries from PHI nodes which we no longer branch to... 175 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) { 176 // Found case matching a constant operand? 177 BasicBlock *Succ = SI->getSuccessor(i); 178 if (Succ == TheOnlyDest) 179 TheOnlyDest = 0; // Don't modify the first branch to TheOnlyDest 180 else 181 Succ->removePredecessor(BB); 182 } 183 184 // Delete the old switch. 185 Value *Cond = SI->getCondition(); 186 SI->eraseFromParent(); 187 if (DeleteDeadConditions) 188 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 189 return true; 190 } 191 192 if (SI->getNumCases() == 1) { 193 // Otherwise, we can fold this switch into a conditional branch 194 // instruction if it has only one non-default destination. 195 SwitchInst::CaseIt FirstCase = SI->case_begin(); 196 IntegersSubset& Case = FirstCase.getCaseValueEx(); 197 if (Case.isSingleNumber()) { 198 // FIXME: Currently work with ConstantInt based numbers. 199 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 200 Case.getSingleNumber(0).toConstantInt(), 201 "cond"); 202 203 // Insert the new branch. 204 BranchInst *NewBr = Builder.CreateCondBr(Cond, 205 FirstCase.getCaseSuccessor(), 206 SI->getDefaultDest()); 207 MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); 208 if (MD && MD->getNumOperands() == 3) { 209 ConstantInt *SICase = dyn_cast<ConstantInt>(MD->getOperand(2)); 210 ConstantInt *SIDef = dyn_cast<ConstantInt>(MD->getOperand(1)); 211 assert(SICase && SIDef); 212 // The TrueWeight should be the weight for the single case of SI. 213 NewBr->setMetadata(LLVMContext::MD_prof, 214 MDBuilder(BB->getContext()). 215 createBranchWeights(SICase->getValue().getZExtValue(), 216 SIDef->getValue().getZExtValue())); 217 } 218 219 // Delete the old switch. 220 SI->eraseFromParent(); 221 return true; 222 } 223 } 224 return false; 225 } 226 227 if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) { 228 // indirectbr blockaddress(@F, @BB) -> br label @BB 229 if (BlockAddress *BA = 230 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 231 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 232 // Insert the new branch. 233 Builder.CreateBr(TheOnlyDest); 234 235 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 236 if (IBI->getDestination(i) == TheOnlyDest) 237 TheOnlyDest = 0; 238 else 239 IBI->getDestination(i)->removePredecessor(IBI->getParent()); 240 } 241 Value *Address = IBI->getAddress(); 242 IBI->eraseFromParent(); 243 if (DeleteDeadConditions) 244 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); 245 246 // If we didn't find our destination in the IBI successor list, then we 247 // have undefined behavior. Replace the unconditional branch with an 248 // 'unreachable' instruction. 249 if (TheOnlyDest) { 250 BB->getTerminator()->eraseFromParent(); 251 new UnreachableInst(BB->getContext(), BB); 252 } 253 254 return true; 255 } 256 } 257 258 return false; 259} 260 261 262//===----------------------------------------------------------------------===// 263// Local dead code elimination. 264// 265 266/// isInstructionTriviallyDead - Return true if the result produced by the 267/// instruction is not used, and the instruction has no side effects. 268/// 269bool llvm::isInstructionTriviallyDead(Instruction *I, 270 const TargetLibraryInfo *TLI) { 271 if (!I->use_empty() || isa<TerminatorInst>(I)) return false; 272 273 // We don't want the landingpad instruction removed by anything this general. 274 if (isa<LandingPadInst>(I)) 275 return false; 276 277 // We don't want debug info removed by anything this general, unless 278 // debug info is empty. 279 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) { 280 if (DDI->getAddress()) 281 return false; 282 return true; 283 } 284 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) { 285 if (DVI->getValue()) 286 return false; 287 return true; 288 } 289 290 if (!I->mayHaveSideEffects()) return true; 291 292 // Special case intrinsics that "may have side effects" but can be deleted 293 // when dead. 294 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 295 // Safe to delete llvm.stacksave if dead. 296 if (II->getIntrinsicID() == Intrinsic::stacksave) 297 return true; 298 299 // Lifetime intrinsics are dead when their right-hand is undef. 300 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 301 II->getIntrinsicID() == Intrinsic::lifetime_end) 302 return isa<UndefValue>(II->getArgOperand(1)); 303 } 304 305 if (isAllocLikeFn(I, TLI)) return true; 306 307 if (CallInst *CI = isFreeCall(I, TLI)) 308 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) 309 return C->isNullValue() || isa<UndefValue>(C); 310 311 return false; 312} 313 314/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 315/// trivially dead instruction, delete it. If that makes any of its operands 316/// trivially dead, delete them too, recursively. Return true if any 317/// instructions were deleted. 318bool 319llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V, 320 const TargetLibraryInfo *TLI) { 321 Instruction *I = dyn_cast<Instruction>(V); 322 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI)) 323 return false; 324 325 SmallVector<Instruction*, 16> DeadInsts; 326 DeadInsts.push_back(I); 327 328 do { 329 I = DeadInsts.pop_back_val(); 330 331 // Null out all of the instruction's operands to see if any operand becomes 332 // dead as we go. 333 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 334 Value *OpV = I->getOperand(i); 335 I->setOperand(i, 0); 336 337 if (!OpV->use_empty()) continue; 338 339 // If the operand is an instruction that became dead as we nulled out the 340 // operand, and if it is 'trivially' dead, delete it in a future loop 341 // iteration. 342 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 343 if (isInstructionTriviallyDead(OpI, TLI)) 344 DeadInsts.push_back(OpI); 345 } 346 347 I->eraseFromParent(); 348 } while (!DeadInsts.empty()); 349 350 return true; 351} 352 353/// areAllUsesEqual - Check whether the uses of a value are all the same. 354/// This is similar to Instruction::hasOneUse() except this will also return 355/// true when there are no uses or multiple uses that all refer to the same 356/// value. 357static bool areAllUsesEqual(Instruction *I) { 358 Value::use_iterator UI = I->use_begin(); 359 Value::use_iterator UE = I->use_end(); 360 if (UI == UE) 361 return true; 362 363 User *TheUse = *UI; 364 for (++UI; UI != UE; ++UI) { 365 if (*UI != TheUse) 366 return false; 367 } 368 return true; 369} 370 371/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 372/// dead PHI node, due to being a def-use chain of single-use nodes that 373/// either forms a cycle or is terminated by a trivially dead instruction, 374/// delete it. If that makes any of its operands trivially dead, delete them 375/// too, recursively. Return true if a change was made. 376bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, 377 const TargetLibraryInfo *TLI) { 378 SmallPtrSet<Instruction*, 4> Visited; 379 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 380 I = cast<Instruction>(*I->use_begin())) { 381 if (I->use_empty()) 382 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI); 383 384 // If we find an instruction more than once, we're on a cycle that 385 // won't prove fruitful. 386 if (!Visited.insert(I)) { 387 // Break the cycle and delete the instruction and its operands. 388 I->replaceAllUsesWith(UndefValue::get(I->getType())); 389 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI); 390 return true; 391 } 392 } 393 return false; 394} 395 396/// SimplifyInstructionsInBlock - Scan the specified basic block and try to 397/// simplify any instructions in it and recursively delete dead instructions. 398/// 399/// This returns true if it changed the code, note that it can delete 400/// instructions in other blocks as well in this block. 401bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD, 402 const TargetLibraryInfo *TLI) { 403 bool MadeChange = false; 404 405#ifndef NDEBUG 406 // In debug builds, ensure that the terminator of the block is never replaced 407 // or deleted by these simplifications. The idea of simplification is that it 408 // cannot introduce new instructions, and there is no way to replace the 409 // terminator of a block without introducing a new instruction. 410 AssertingVH<Instruction> TerminatorVH(--BB->end()); 411#endif 412 413 for (BasicBlock::iterator BI = BB->begin(), E = --BB->end(); BI != E; ) { 414 assert(!BI->isTerminator()); 415 Instruction *Inst = BI++; 416 417 WeakVH BIHandle(BI); 418 if (recursivelySimplifyInstruction(Inst, TD)) { 419 MadeChange = true; 420 if (BIHandle != BI) 421 BI = BB->begin(); 422 continue; 423 } 424 425 MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); 426 if (BIHandle != BI) 427 BI = BB->begin(); 428 } 429 return MadeChange; 430} 431 432//===----------------------------------------------------------------------===// 433// Control Flow Graph Restructuring. 434// 435 436 437/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this 438/// method is called when we're about to delete Pred as a predecessor of BB. If 439/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred. 440/// 441/// Unlike the removePredecessor method, this attempts to simplify uses of PHI 442/// nodes that collapse into identity values. For example, if we have: 443/// x = phi(1, 0, 0, 0) 444/// y = and x, z 445/// 446/// .. and delete the predecessor corresponding to the '1', this will attempt to 447/// recursively fold the and to 0. 448void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, 449 DataLayout *TD) { 450 // This only adjusts blocks with PHI nodes. 451 if (!isa<PHINode>(BB->begin())) 452 return; 453 454 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify 455 // them down. This will leave us with single entry phi nodes and other phis 456 // that can be removed. 457 BB->removePredecessor(Pred, true); 458 459 WeakVH PhiIt = &BB->front(); 460 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) { 461 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt)); 462 Value *OldPhiIt = PhiIt; 463 464 if (!recursivelySimplifyInstruction(PN, TD)) 465 continue; 466 467 // If recursive simplification ended up deleting the next PHI node we would 468 // iterate to, then our iterator is invalid, restart scanning from the top 469 // of the block. 470 if (PhiIt != OldPhiIt) PhiIt = &BB->front(); 471 } 472} 473 474 475/// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its 476/// predecessor is known to have one successor (DestBB!). Eliminate the edge 477/// between them, moving the instructions in the predecessor into DestBB and 478/// deleting the predecessor block. 479/// 480void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, Pass *P) { 481 // If BB has single-entry PHI nodes, fold them. 482 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 483 Value *NewVal = PN->getIncomingValue(0); 484 // Replace self referencing PHI with undef, it must be dead. 485 if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); 486 PN->replaceAllUsesWith(NewVal); 487 PN->eraseFromParent(); 488 } 489 490 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 491 assert(PredBB && "Block doesn't have a single predecessor!"); 492 493 // Zap anything that took the address of DestBB. Not doing this will give the 494 // address an invalid value. 495 if (DestBB->hasAddressTaken()) { 496 BlockAddress *BA = BlockAddress::get(DestBB); 497 Constant *Replacement = 498 ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1); 499 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 500 BA->getType())); 501 BA->destroyConstant(); 502 } 503 504 // Anything that branched to PredBB now branches to DestBB. 505 PredBB->replaceAllUsesWith(DestBB); 506 507 // Splice all the instructions from PredBB to DestBB. 508 PredBB->getTerminator()->eraseFromParent(); 509 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); 510 511 if (P) { 512 DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>(); 513 if (DT) { 514 BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock(); 515 DT->changeImmediateDominator(DestBB, PredBBIDom); 516 DT->eraseNode(PredBB); 517 } 518 ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>(); 519 if (PI) { 520 PI->replaceAllUses(PredBB, DestBB); 521 PI->removeEdge(ProfileInfo::getEdge(PredBB, DestBB)); 522 } 523 } 524 // Nuke BB. 525 PredBB->eraseFromParent(); 526} 527 528/// CanMergeValues - Return true if we can choose one of these values to use 529/// in place of the other. Note that we will always choose the non-undef 530/// value to keep. 531static bool CanMergeValues(Value *First, Value *Second) { 532 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); 533} 534 535/// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an 536/// almost-empty BB ending in an unconditional branch to Succ, into succ. 537/// 538/// Assumption: Succ is the single successor for BB. 539/// 540static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { 541 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 542 543 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 544 << Succ->getName() << "\n"); 545 // Shortcut, if there is only a single predecessor it must be BB and merging 546 // is always safe 547 if (Succ->getSinglePredecessor()) return true; 548 549 // Make a list of the predecessors of BB 550 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 551 552 // Look at all the phi nodes in Succ, to see if they present a conflict when 553 // merging these blocks 554 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 555 PHINode *PN = cast<PHINode>(I); 556 557 // If the incoming value from BB is again a PHINode in 558 // BB which has the same incoming value for *PI as PN does, we can 559 // merge the phi nodes and then the blocks can still be merged 560 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 561 if (BBPN && BBPN->getParent() == BB) { 562 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 563 BasicBlock *IBB = PN->getIncomingBlock(PI); 564 if (BBPreds.count(IBB) && 565 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), 566 PN->getIncomingValue(PI))) { 567 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 568 << Succ->getName() << " is conflicting with " 569 << BBPN->getName() << " with regard to common predecessor " 570 << IBB->getName() << "\n"); 571 return false; 572 } 573 } 574 } else { 575 Value* Val = PN->getIncomingValueForBlock(BB); 576 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 577 // See if the incoming value for the common predecessor is equal to the 578 // one for BB, in which case this phi node will not prevent the merging 579 // of the block. 580 BasicBlock *IBB = PN->getIncomingBlock(PI); 581 if (BBPreds.count(IBB) && 582 !CanMergeValues(Val, PN->getIncomingValue(PI))) { 583 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 584 << Succ->getName() << " is conflicting with regard to common " 585 << "predecessor " << IBB->getName() << "\n"); 586 return false; 587 } 588 } 589 } 590 } 591 592 return true; 593} 594 595typedef SmallVector<BasicBlock *, 16> PredBlockVector; 596typedef DenseMap<BasicBlock *, Value *> IncomingValueMap; 597 598/// \brief Determines the value to use as the phi node input for a block. 599/// 600/// Select between \p OldVal any value that we know flows from \p BB 601/// to a particular phi on the basis of which one (if either) is not 602/// undef. Update IncomingValues based on the selected value. 603/// 604/// \param OldVal The value we are considering selecting. 605/// \param BB The block that the value flows in from. 606/// \param IncomingValues A map from block-to-value for other phi inputs 607/// that we have examined. 608/// 609/// \returns the selected value. 610static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, 611 IncomingValueMap &IncomingValues) { 612 if (!isa<UndefValue>(OldVal)) { 613 assert((!IncomingValues.count(BB) || 614 IncomingValues.find(BB)->second == OldVal) && 615 "Expected OldVal to match incoming value from BB!"); 616 617 IncomingValues.insert(std::make_pair(BB, OldVal)); 618 return OldVal; 619 } 620 621 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 622 if (It != IncomingValues.end()) return It->second; 623 624 return OldVal; 625} 626 627/// \brief Create a map from block to value for the operands of a 628/// given phi. 629/// 630/// Create a map from block to value for each non-undef value flowing 631/// into \p PN. 632/// 633/// \param PN The phi we are collecting the map for. 634/// \param IncomingValues [out] The map from block to value for this phi. 635static void gatherIncomingValuesToPhi(PHINode *PN, 636 IncomingValueMap &IncomingValues) { 637 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 638 BasicBlock *BB = PN->getIncomingBlock(i); 639 Value *V = PN->getIncomingValue(i); 640 641 if (!isa<UndefValue>(V)) 642 IncomingValues.insert(std::make_pair(BB, V)); 643 } 644} 645 646/// \brief Replace the incoming undef values to a phi with the values 647/// from a block-to-value map. 648/// 649/// \param PN The phi we are replacing the undefs in. 650/// \param IncomingValues A map from block to value. 651static void replaceUndefValuesInPhi(PHINode *PN, 652 const IncomingValueMap &IncomingValues) { 653 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 654 Value *V = PN->getIncomingValue(i); 655 656 if (!isa<UndefValue>(V)) continue; 657 658 BasicBlock *BB = PN->getIncomingBlock(i); 659 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 660 if (It == IncomingValues.end()) continue; 661 662 PN->setIncomingValue(i, It->second); 663 } 664} 665 666/// \brief Replace a value flowing from a block to a phi with 667/// potentially multiple instances of that value flowing from the 668/// block's predecessors to the phi. 669/// 670/// \param BB The block with the value flowing into the phi. 671/// \param BBPreds The predecessors of BB. 672/// \param PN The phi that we are updating. 673static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, 674 const PredBlockVector &BBPreds, 675 PHINode *PN) { 676 Value *OldVal = PN->removeIncomingValue(BB, false); 677 assert(OldVal && "No entry in PHI for Pred BB!"); 678 679 IncomingValueMap IncomingValues; 680 681 // We are merging two blocks - BB, and the block containing PN - and 682 // as a result we need to redirect edges from the predecessors of BB 683 // to go to the block containing PN, and update PN 684 // accordingly. Since we allow merging blocks in the case where the 685 // predecessor and successor blocks both share some predecessors, 686 // and where some of those common predecessors might have undef 687 // values flowing into PN, we want to rewrite those values to be 688 // consistent with the non-undef values. 689 690 gatherIncomingValuesToPhi(PN, IncomingValues); 691 692 // If this incoming value is one of the PHI nodes in BB, the new entries 693 // in the PHI node are the entries from the old PHI. 694 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 695 PHINode *OldValPN = cast<PHINode>(OldVal); 696 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { 697 // Note that, since we are merging phi nodes and BB and Succ might 698 // have common predecessors, we could end up with a phi node with 699 // identical incoming branches. This will be cleaned up later (and 700 // will trigger asserts if we try to clean it up now, without also 701 // simplifying the corresponding conditional branch). 702 BasicBlock *PredBB = OldValPN->getIncomingBlock(i); 703 Value *PredVal = OldValPN->getIncomingValue(i); 704 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB, 705 IncomingValues); 706 707 // And add a new incoming value for this predecessor for the 708 // newly retargeted branch. 709 PN->addIncoming(Selected, PredBB); 710 } 711 } else { 712 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) { 713 // Update existing incoming values in PN for this 714 // predecessor of BB. 715 BasicBlock *PredBB = BBPreds[i]; 716 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB, 717 IncomingValues); 718 719 // And add a new incoming value for this predecessor for the 720 // newly retargeted branch. 721 PN->addIncoming(Selected, PredBB); 722 } 723 } 724 725 replaceUndefValuesInPhi(PN, IncomingValues); 726} 727 728/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an 729/// unconditional branch, and contains no instructions other than PHI nodes, 730/// potential side-effect free intrinsics and the branch. If possible, 731/// eliminate BB by rewriting all the predecessors to branch to the successor 732/// block and return true. If we can't transform, return false. 733bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) { 734 assert(BB != &BB->getParent()->getEntryBlock() && 735 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 736 737 // We can't eliminate infinite loops. 738 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 739 if (BB == Succ) return false; 740 741 // Check to see if merging these blocks would cause conflicts for any of the 742 // phi nodes in BB or Succ. If not, we can safely merge. 743 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; 744 745 // Check for cases where Succ has multiple predecessors and a PHI node in BB 746 // has uses which will not disappear when the PHI nodes are merged. It is 747 // possible to handle such cases, but difficult: it requires checking whether 748 // BB dominates Succ, which is non-trivial to calculate in the case where 749 // Succ has multiple predecessors. Also, it requires checking whether 750 // constructing the necessary self-referential PHI node doesn't introduce any 751 // conflicts; this isn't too difficult, but the previous code for doing this 752 // was incorrect. 753 // 754 // Note that if this check finds a live use, BB dominates Succ, so BB is 755 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 756 // folding the branch isn't profitable in that case anyway. 757 if (!Succ->getSinglePredecessor()) { 758 BasicBlock::iterator BBI = BB->begin(); 759 while (isa<PHINode>(*BBI)) { 760 for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end(); 761 UI != E; ++UI) { 762 if (PHINode* PN = dyn_cast<PHINode>(*UI)) { 763 if (PN->getIncomingBlock(UI) != BB) 764 return false; 765 } else { 766 return false; 767 } 768 } 769 ++BBI; 770 } 771 } 772 773 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 774 775 if (isa<PHINode>(Succ->begin())) { 776 // If there is more than one pred of succ, and there are PHI nodes in 777 // the successor, then we need to add incoming edges for the PHI nodes 778 // 779 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB)); 780 781 // Loop over all of the PHI nodes in the successor of BB. 782 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 783 PHINode *PN = cast<PHINode>(I); 784 785 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN); 786 } 787 } 788 789 if (Succ->getSinglePredecessor()) { 790 // BB is the only predecessor of Succ, so Succ will end up with exactly 791 // the same predecessors BB had. 792 793 // Copy over any phi, debug or lifetime instruction. 794 BB->getTerminator()->eraseFromParent(); 795 Succ->getInstList().splice(Succ->getFirstNonPHI(), BB->getInstList()); 796 } else { 797 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 798 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. 799 assert(PN->use_empty() && "There shouldn't be any uses here!"); 800 PN->eraseFromParent(); 801 } 802 } 803 804 // Everything that jumped to BB now goes to Succ. 805 BB->replaceAllUsesWith(Succ); 806 if (!Succ->hasName()) Succ->takeName(BB); 807 BB->eraseFromParent(); // Delete the old basic block. 808 return true; 809} 810 811/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI 812/// nodes in this block. This doesn't try to be clever about PHI nodes 813/// which differ only in the order of the incoming values, but instcombine 814/// orders them so it usually won't matter. 815/// 816bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 817 bool Changed = false; 818 819 // This implementation doesn't currently consider undef operands 820 // specially. Theoretically, two phis which are identical except for 821 // one having an undef where the other doesn't could be collapsed. 822 823 // Map from PHI hash values to PHI nodes. If multiple PHIs have 824 // the same hash value, the element is the first PHI in the 825 // linked list in CollisionMap. 826 DenseMap<uintptr_t, PHINode *> HashMap; 827 828 // Maintain linked lists of PHI nodes with common hash values. 829 DenseMap<PHINode *, PHINode *> CollisionMap; 830 831 // Examine each PHI. 832 for (BasicBlock::iterator I = BB->begin(); 833 PHINode *PN = dyn_cast<PHINode>(I++); ) { 834 // Compute a hash value on the operands. Instcombine will likely have sorted 835 // them, which helps expose duplicates, but we have to check all the 836 // operands to be safe in case instcombine hasn't run. 837 uintptr_t Hash = 0; 838 // This hash algorithm is quite weak as hash functions go, but it seems 839 // to do a good enough job for this particular purpose, and is very quick. 840 for (User::op_iterator I = PN->op_begin(), E = PN->op_end(); I != E; ++I) { 841 Hash ^= reinterpret_cast<uintptr_t>(static_cast<Value *>(*I)); 842 Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7)); 843 } 844 for (PHINode::block_iterator I = PN->block_begin(), E = PN->block_end(); 845 I != E; ++I) { 846 Hash ^= reinterpret_cast<uintptr_t>(static_cast<BasicBlock *>(*I)); 847 Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7)); 848 } 849 // Avoid colliding with the DenseMap sentinels ~0 and ~0-1. 850 Hash >>= 1; 851 // If we've never seen this hash value before, it's a unique PHI. 852 std::pair<DenseMap<uintptr_t, PHINode *>::iterator, bool> Pair = 853 HashMap.insert(std::make_pair(Hash, PN)); 854 if (Pair.second) continue; 855 // Otherwise it's either a duplicate or a hash collision. 856 for (PHINode *OtherPN = Pair.first->second; ; ) { 857 if (OtherPN->isIdenticalTo(PN)) { 858 // A duplicate. Replace this PHI with its duplicate. 859 PN->replaceAllUsesWith(OtherPN); 860 PN->eraseFromParent(); 861 Changed = true; 862 break; 863 } 864 // A non-duplicate hash collision. 865 DenseMap<PHINode *, PHINode *>::iterator I = CollisionMap.find(OtherPN); 866 if (I == CollisionMap.end()) { 867 // Set this PHI to be the head of the linked list of colliding PHIs. 868 PHINode *Old = Pair.first->second; 869 Pair.first->second = PN; 870 CollisionMap[PN] = Old; 871 break; 872 } 873 // Proceed to the next PHI in the list. 874 OtherPN = I->second; 875 } 876 } 877 878 return Changed; 879} 880 881/// enforceKnownAlignment - If the specified pointer points to an object that 882/// we control, modify the object's alignment to PrefAlign. This isn't 883/// often possible though. If alignment is important, a more reliable approach 884/// is to simply align all global variables and allocation instructions to 885/// their preferred alignment from the beginning. 886/// 887static unsigned enforceKnownAlignment(Value *V, unsigned Align, 888 unsigned PrefAlign, const DataLayout *TD) { 889 V = V->stripPointerCasts(); 890 891 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 892 // If the preferred alignment is greater than the natural stack alignment 893 // then don't round up. This avoids dynamic stack realignment. 894 if (TD && TD->exceedsNaturalStackAlignment(PrefAlign)) 895 return Align; 896 // If there is a requested alignment and if this is an alloca, round up. 897 if (AI->getAlignment() >= PrefAlign) 898 return AI->getAlignment(); 899 AI->setAlignment(PrefAlign); 900 return PrefAlign; 901 } 902 903 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 904 // If there is a large requested alignment and we can, bump up the alignment 905 // of the global. 906 if (GV->isDeclaration()) return Align; 907 // If the memory we set aside for the global may not be the memory used by 908 // the final program then it is impossible for us to reliably enforce the 909 // preferred alignment. 910 if (GV->isWeakForLinker()) return Align; 911 912 if (GV->getAlignment() >= PrefAlign) 913 return GV->getAlignment(); 914 // We can only increase the alignment of the global if it has no alignment 915 // specified or if it is not assigned a section. If it is assigned a 916 // section, the global could be densely packed with other objects in the 917 // section, increasing the alignment could cause padding issues. 918 if (!GV->hasSection() || GV->getAlignment() == 0) 919 GV->setAlignment(PrefAlign); 920 return GV->getAlignment(); 921 } 922 923 return Align; 924} 925 926/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that 927/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 928/// and it is more than the alignment of the ultimate object, see if we can 929/// increase the alignment of the ultimate object, making this check succeed. 930unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, 931 const DataLayout *TD) { 932 assert(V->getType()->isPointerTy() && 933 "getOrEnforceKnownAlignment expects a pointer!"); 934 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; 935 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 936 ComputeMaskedBits(V, KnownZero, KnownOne, TD); 937 unsigned TrailZ = KnownZero.countTrailingOnes(); 938 939 // Avoid trouble with rediculously large TrailZ values, such as 940 // those computed from a null pointer. 941 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 942 943 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 944 945 // LLVM doesn't support alignments larger than this currently. 946 Align = std::min(Align, +Value::MaximumAlignment); 947 948 if (PrefAlign > Align) 949 Align = enforceKnownAlignment(V, Align, PrefAlign, TD); 950 951 // We don't need to make any adjustment. 952 return Align; 953} 954 955///===---------------------------------------------------------------------===// 956/// Dbg Intrinsic utilities 957/// 958 959/// See if there is a dbg.value intrinsic for DIVar before I. 960static bool LdStHasDebugValue(DIVariable &DIVar, Instruction *I) { 961 // Since we can't guarantee that the original dbg.declare instrinsic 962 // is removed by LowerDbgDeclare(), we need to make sure that we are 963 // not inserting the same dbg.value intrinsic over and over. 964 llvm::BasicBlock::InstListType::iterator PrevI(I); 965 if (PrevI != I->getParent()->getInstList().begin()) { 966 --PrevI; 967 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI)) 968 if (DVI->getValue() == I->getOperand(0) && 969 DVI->getOffset() == 0 && 970 DVI->getVariable() == DIVar) 971 return true; 972 } 973 return false; 974} 975 976/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 977/// that has an associated llvm.dbg.decl intrinsic. 978bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 979 StoreInst *SI, DIBuilder &Builder) { 980 DIVariable DIVar(DDI->getVariable()); 981 assert((!DIVar || DIVar.isVariable()) && 982 "Variable in DbgDeclareInst should be either null or a DIVariable."); 983 if (!DIVar) 984 return false; 985 986 if (LdStHasDebugValue(DIVar, SI)) 987 return true; 988 989 Instruction *DbgVal = NULL; 990 // If an argument is zero extended then use argument directly. The ZExt 991 // may be zapped by an optimization pass in future. 992 Argument *ExtendedArg = NULL; 993 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) 994 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0)); 995 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) 996 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0)); 997 if (ExtendedArg) 998 DbgVal = Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, SI); 999 else 1000 DbgVal = Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, SI); 1001 1002 // Propagate any debug metadata from the store onto the dbg.value. 1003 DebugLoc SIDL = SI->getDebugLoc(); 1004 if (!SIDL.isUnknown()) 1005 DbgVal->setDebugLoc(SIDL); 1006 // Otherwise propagate debug metadata from dbg.declare. 1007 else 1008 DbgVal->setDebugLoc(DDI->getDebugLoc()); 1009 return true; 1010} 1011 1012/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 1013/// that has an associated llvm.dbg.decl intrinsic. 1014bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 1015 LoadInst *LI, DIBuilder &Builder) { 1016 DIVariable DIVar(DDI->getVariable()); 1017 assert((!DIVar || DIVar.isVariable()) && 1018 "Variable in DbgDeclareInst should be either null or a DIVariable."); 1019 if (!DIVar) 1020 return false; 1021 1022 if (LdStHasDebugValue(DIVar, LI)) 1023 return true; 1024 1025 Instruction *DbgVal = 1026 Builder.insertDbgValueIntrinsic(LI->getOperand(0), 0, 1027 DIVar, LI); 1028 1029 // Propagate any debug metadata from the store onto the dbg.value. 1030 DebugLoc LIDL = LI->getDebugLoc(); 1031 if (!LIDL.isUnknown()) 1032 DbgVal->setDebugLoc(LIDL); 1033 // Otherwise propagate debug metadata from dbg.declare. 1034 else 1035 DbgVal->setDebugLoc(DDI->getDebugLoc()); 1036 return true; 1037} 1038 1039/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 1040/// of llvm.dbg.value intrinsics. 1041bool llvm::LowerDbgDeclare(Function &F) { 1042 DIBuilder DIB(*F.getParent()); 1043 SmallVector<DbgDeclareInst *, 4> Dbgs; 1044 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) 1045 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) { 1046 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) 1047 Dbgs.push_back(DDI); 1048 } 1049 if (Dbgs.empty()) 1050 return false; 1051 1052 for (SmallVectorImpl<DbgDeclareInst *>::iterator I = Dbgs.begin(), 1053 E = Dbgs.end(); I != E; ++I) { 1054 DbgDeclareInst *DDI = *I; 1055 if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress())) { 1056 // We only remove the dbg.declare intrinsic if all uses are 1057 // converted to dbg.value intrinsics. 1058 bool RemoveDDI = true; 1059 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 1060 UI != E; ++UI) 1061 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) 1062 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 1063 else if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 1064 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 1065 else 1066 RemoveDDI = false; 1067 if (RemoveDDI) 1068 DDI->eraseFromParent(); 1069 } 1070 } 1071 return true; 1072} 1073 1074/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the 1075/// alloca 'V', if any. 1076DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) { 1077 if (MDNode *DebugNode = MDNode::getIfExists(V->getContext(), V)) 1078 for (Value::use_iterator UI = DebugNode->use_begin(), 1079 E = DebugNode->use_end(); UI != E; ++UI) 1080 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI)) 1081 return DDI; 1082 1083 return 0; 1084} 1085 1086bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 1087 DIBuilder &Builder) { 1088 DbgDeclareInst *DDI = FindAllocaDbgDeclare(AI); 1089 if (!DDI) 1090 return false; 1091 DIVariable DIVar(DDI->getVariable()); 1092 assert((!DIVar || DIVar.isVariable()) && 1093 "Variable in DbgDeclareInst should be either null or a DIVariable."); 1094 if (!DIVar) 1095 return false; 1096 1097 // Create a copy of the original DIDescriptor for user variable, appending 1098 // "deref" operation to a list of address elements, as new llvm.dbg.declare 1099 // will take a value storing address of the memory for variable, not 1100 // alloca itself. 1101 Type *Int64Ty = Type::getInt64Ty(AI->getContext()); 1102 SmallVector<Value*, 4> NewDIVarAddress; 1103 if (DIVar.hasComplexAddress()) { 1104 for (unsigned i = 0, n = DIVar.getNumAddrElements(); i < n; ++i) { 1105 NewDIVarAddress.push_back( 1106 ConstantInt::get(Int64Ty, DIVar.getAddrElement(i))); 1107 } 1108 } 1109 NewDIVarAddress.push_back(ConstantInt::get(Int64Ty, DIBuilder::OpDeref)); 1110 DIVariable NewDIVar = Builder.createComplexVariable( 1111 DIVar.getTag(), DIVar.getContext(), DIVar.getName(), 1112 DIVar.getFile(), DIVar.getLineNumber(), DIVar.getType(), 1113 NewDIVarAddress, DIVar.getArgNumber()); 1114 1115 // Insert llvm.dbg.declare in the same basic block as the original alloca, 1116 // and remove old llvm.dbg.declare. 1117 BasicBlock *BB = AI->getParent(); 1118 Builder.insertDeclare(NewAllocaAddress, NewDIVar, BB); 1119 DDI->eraseFromParent(); 1120 return true; 1121} 1122 1123bool llvm::removeUnreachableBlocks(Function &F) { 1124 SmallPtrSet<BasicBlock*, 16> Reachable; 1125 SmallVector<BasicBlock*, 128> Worklist; 1126 Worklist.push_back(&F.getEntryBlock()); 1127 Reachable.insert(&F.getEntryBlock()); 1128 do { 1129 BasicBlock *BB = Worklist.pop_back_val(); 1130 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) 1131 if (Reachable.insert(*SI)) 1132 Worklist.push_back(*SI); 1133 } while (!Worklist.empty()); 1134 1135 if (Reachable.size() == F.size()) 1136 return false; 1137 1138 assert(Reachable.size() < F.size()); 1139 for (Function::iterator I = llvm::next(F.begin()), E = F.end(); I != E; ++I) { 1140 if (Reachable.count(I)) 1141 continue; 1142 1143 for (succ_iterator SI = succ_begin(I), SE = succ_end(I); SI != SE; ++SI) 1144 if (Reachable.count(*SI)) 1145 (*SI)->removePredecessor(I); 1146 I->dropAllReferences(); 1147 } 1148 1149 for (Function::iterator I = llvm::next(F.begin()), E=F.end(); I != E;) 1150 if (!Reachable.count(I)) 1151 I = F.getBasicBlockList().erase(I); 1152 else 1153 ++I; 1154 1155 return true; 1156} 1157