1//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements a trivial dead store elimination that only considers 11// basic-block local redundant stores. 12// 13// FIXME: This should eventually be extended to be a post-dominator tree 14// traversal. Doing so would be pretty trivial. 15// 16//===----------------------------------------------------------------------===// 17 18#include "llvm/Transforms/Scalar.h" 19#include "llvm/ADT/STLExtras.h" 20#include "llvm/ADT/SetVector.h" 21#include "llvm/ADT/Statistic.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/Analysis/CaptureTracking.h" 24#include "llvm/Analysis/MemoryBuiltins.h" 25#include "llvm/Analysis/MemoryDependenceAnalysis.h" 26#include "llvm/Analysis/ValueTracking.h" 27#include "llvm/IR/Constants.h" 28#include "llvm/IR/DataLayout.h" 29#include "llvm/IR/Dominators.h" 30#include "llvm/IR/Function.h" 31#include "llvm/IR/GlobalVariable.h" 32#include "llvm/IR/Instructions.h" 33#include "llvm/IR/IntrinsicInst.h" 34#include "llvm/Pass.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Target/TargetLibraryInfo.h" 37#include "llvm/Transforms/Utils/Local.h" 38using namespace llvm; 39 40#define DEBUG_TYPE "dse" 41 42STATISTIC(NumFastStores, "Number of stores deleted"); 43STATISTIC(NumFastOther , "Number of other instrs removed"); 44 45namespace { 46 struct DSE : public FunctionPass { 47 AliasAnalysis *AA; 48 MemoryDependenceAnalysis *MD; 49 DominatorTree *DT; 50 const TargetLibraryInfo *TLI; 51 52 static char ID; // Pass identification, replacement for typeid 53 DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) { 54 initializeDSEPass(*PassRegistry::getPassRegistry()); 55 } 56 57 bool runOnFunction(Function &F) override { 58 if (skipOptnoneFunction(F)) 59 return false; 60 61 AA = &getAnalysis<AliasAnalysis>(); 62 MD = &getAnalysis<MemoryDependenceAnalysis>(); 63 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 64 TLI = AA->getTargetLibraryInfo(); 65 66 bool Changed = false; 67 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) 68 // Only check non-dead blocks. Dead blocks may have strange pointer 69 // cycles that will confuse alias analysis. 70 if (DT->isReachableFromEntry(I)) 71 Changed |= runOnBasicBlock(*I); 72 73 AA = nullptr; MD = nullptr; DT = nullptr; 74 return Changed; 75 } 76 77 bool runOnBasicBlock(BasicBlock &BB); 78 bool HandleFree(CallInst *F); 79 bool handleEndBlock(BasicBlock &BB); 80 void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, 81 SmallSetVector<Value*, 16> &DeadStackObjects); 82 83 void getAnalysisUsage(AnalysisUsage &AU) const override { 84 AU.setPreservesCFG(); 85 AU.addRequired<DominatorTreeWrapperPass>(); 86 AU.addRequired<AliasAnalysis>(); 87 AU.addRequired<MemoryDependenceAnalysis>(); 88 AU.addPreserved<AliasAnalysis>(); 89 AU.addPreserved<DominatorTreeWrapperPass>(); 90 AU.addPreserved<MemoryDependenceAnalysis>(); 91 } 92 }; 93} 94 95char DSE::ID = 0; 96INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false) 97INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 98INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 99INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 100INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false) 101 102FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); } 103 104//===----------------------------------------------------------------------===// 105// Helper functions 106//===----------------------------------------------------------------------===// 107 108/// DeleteDeadInstruction - Delete this instruction. Before we do, go through 109/// and zero out all the operands of this instruction. If any of them become 110/// dead, delete them and the computation tree that feeds them. 111/// 112/// If ValueSet is non-null, remove any deleted instructions from it as well. 113/// 114static void DeleteDeadInstruction(Instruction *I, 115 MemoryDependenceAnalysis &MD, 116 const TargetLibraryInfo *TLI, 117 SmallSetVector<Value*, 16> *ValueSet = nullptr) { 118 SmallVector<Instruction*, 32> NowDeadInsts; 119 120 NowDeadInsts.push_back(I); 121 --NumFastOther; 122 123 // Before we touch this instruction, remove it from memdep! 124 do { 125 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 126 ++NumFastOther; 127 128 // This instruction is dead, zap it, in stages. Start by removing it from 129 // MemDep, which needs to know the operands and needs it to be in the 130 // function. 131 MD.removeInstruction(DeadInst); 132 133 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { 134 Value *Op = DeadInst->getOperand(op); 135 DeadInst->setOperand(op, nullptr); 136 137 // If this operand just became dead, add it to the NowDeadInsts list. 138 if (!Op->use_empty()) continue; 139 140 if (Instruction *OpI = dyn_cast<Instruction>(Op)) 141 if (isInstructionTriviallyDead(OpI, TLI)) 142 NowDeadInsts.push_back(OpI); 143 } 144 145 DeadInst->eraseFromParent(); 146 147 if (ValueSet) ValueSet->remove(DeadInst); 148 } while (!NowDeadInsts.empty()); 149} 150 151 152/// hasMemoryWrite - Does this instruction write some memory? This only returns 153/// true for things that we can analyze with other helpers below. 154static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) { 155 if (isa<StoreInst>(I)) 156 return true; 157 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 158 switch (II->getIntrinsicID()) { 159 default: 160 return false; 161 case Intrinsic::memset: 162 case Intrinsic::memmove: 163 case Intrinsic::memcpy: 164 case Intrinsic::init_trampoline: 165 case Intrinsic::lifetime_end: 166 return true; 167 } 168 } 169 if (CallSite CS = I) { 170 if (Function *F = CS.getCalledFunction()) { 171 if (TLI && TLI->has(LibFunc::strcpy) && 172 F->getName() == TLI->getName(LibFunc::strcpy)) { 173 return true; 174 } 175 if (TLI && TLI->has(LibFunc::strncpy) && 176 F->getName() == TLI->getName(LibFunc::strncpy)) { 177 return true; 178 } 179 if (TLI && TLI->has(LibFunc::strcat) && 180 F->getName() == TLI->getName(LibFunc::strcat)) { 181 return true; 182 } 183 if (TLI && TLI->has(LibFunc::strncat) && 184 F->getName() == TLI->getName(LibFunc::strncat)) { 185 return true; 186 } 187 } 188 } 189 return false; 190} 191 192/// getLocForWrite - Return a Location stored to by the specified instruction. 193/// If isRemovable returns true, this function and getLocForRead completely 194/// describe the memory operations for this instruction. 195static AliasAnalysis::Location 196getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { 197 const DataLayout *DL = AA.getDataLayout(); 198 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 199 return AA.getLocation(SI); 200 201 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) { 202 // memcpy/memmove/memset. 203 AliasAnalysis::Location Loc = AA.getLocationForDest(MI); 204 // If we don't have target data around, an unknown size in Location means 205 // that we should use the size of the pointee type. This isn't valid for 206 // memset/memcpy, which writes more than an i8. 207 if (Loc.Size == AliasAnalysis::UnknownSize && DL == nullptr) 208 return AliasAnalysis::Location(); 209 return Loc; 210 } 211 212 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst); 213 if (!II) return AliasAnalysis::Location(); 214 215 switch (II->getIntrinsicID()) { 216 default: return AliasAnalysis::Location(); // Unhandled intrinsic. 217 case Intrinsic::init_trampoline: 218 // If we don't have target data around, an unknown size in Location means 219 // that we should use the size of the pointee type. This isn't valid for 220 // init.trampoline, which writes more than an i8. 221 if (!DL) return AliasAnalysis::Location(); 222 223 // FIXME: We don't know the size of the trampoline, so we can't really 224 // handle it here. 225 return AliasAnalysis::Location(II->getArgOperand(0)); 226 case Intrinsic::lifetime_end: { 227 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 228 return AliasAnalysis::Location(II->getArgOperand(1), Len); 229 } 230 } 231} 232 233/// getLocForRead - Return the location read by the specified "hasMemoryWrite" 234/// instruction if any. 235static AliasAnalysis::Location 236getLocForRead(Instruction *Inst, AliasAnalysis &AA) { 237 assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) && 238 "Unknown instruction case"); 239 240 // The only instructions that both read and write are the mem transfer 241 // instructions (memcpy/memmove). 242 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst)) 243 return AA.getLocationForSource(MTI); 244 return AliasAnalysis::Location(); 245} 246 247 248/// isRemovable - If the value of this instruction and the memory it writes to 249/// is unused, may we delete this instruction? 250static bool isRemovable(Instruction *I) { 251 // Don't remove volatile/atomic stores. 252 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 253 return SI->isUnordered(); 254 255 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 256 switch (II->getIntrinsicID()) { 257 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); 258 case Intrinsic::lifetime_end: 259 // Never remove dead lifetime_end's, e.g. because it is followed by a 260 // free. 261 return false; 262 case Intrinsic::init_trampoline: 263 // Always safe to remove init_trampoline. 264 return true; 265 266 case Intrinsic::memset: 267 case Intrinsic::memmove: 268 case Intrinsic::memcpy: 269 // Don't remove volatile memory intrinsics. 270 return !cast<MemIntrinsic>(II)->isVolatile(); 271 } 272 } 273 274 if (CallSite CS = I) 275 return CS.getInstruction()->use_empty(); 276 277 return false; 278} 279 280 281/// isShortenable - Returns true if this instruction can be safely shortened in 282/// length. 283static bool isShortenable(Instruction *I) { 284 // Don't shorten stores for now 285 if (isa<StoreInst>(I)) 286 return false; 287 288 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 289 switch (II->getIntrinsicID()) { 290 default: return false; 291 case Intrinsic::memset: 292 case Intrinsic::memcpy: 293 // Do shorten memory intrinsics. 294 return true; 295 } 296 } 297 298 // Don't shorten libcalls calls for now. 299 300 return false; 301} 302 303/// getStoredPointerOperand - Return the pointer that is being written to. 304static Value *getStoredPointerOperand(Instruction *I) { 305 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 306 return SI->getPointerOperand(); 307 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 308 return MI->getDest(); 309 310 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 311 switch (II->getIntrinsicID()) { 312 default: llvm_unreachable("Unexpected intrinsic!"); 313 case Intrinsic::init_trampoline: 314 return II->getArgOperand(0); 315 } 316 } 317 318 CallSite CS = I; 319 // All the supported functions so far happen to have dest as their first 320 // argument. 321 return CS.getArgument(0); 322} 323 324static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) { 325 uint64_t Size; 326 if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo())) 327 return Size; 328 return AliasAnalysis::UnknownSize; 329} 330 331namespace { 332 enum OverwriteResult 333 { 334 OverwriteComplete, 335 OverwriteEnd, 336 OverwriteUnknown 337 }; 338} 339 340/// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location 341/// completely overwrites a store to the 'Earlier' location. 342/// 'OverwriteEnd' if the end of the 'Earlier' location is completely 343/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined 344static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, 345 const AliasAnalysis::Location &Earlier, 346 AliasAnalysis &AA, 347 int64_t &EarlierOff, 348 int64_t &LaterOff) { 349 const DataLayout *DL = AA.getDataLayout(); 350 const Value *P1 = Earlier.Ptr->stripPointerCasts(); 351 const Value *P2 = Later.Ptr->stripPointerCasts(); 352 353 // If the start pointers are the same, we just have to compare sizes to see if 354 // the later store was larger than the earlier store. 355 if (P1 == P2) { 356 // If we don't know the sizes of either access, then we can't do a 357 // comparison. 358 if (Later.Size == AliasAnalysis::UnknownSize || 359 Earlier.Size == AliasAnalysis::UnknownSize) { 360 // If we have no DataLayout information around, then the size of the store 361 // is inferrable from the pointee type. If they are the same type, then 362 // we know that the store is safe. 363 if (DL == nullptr && Later.Ptr->getType() == Earlier.Ptr->getType()) 364 return OverwriteComplete; 365 366 return OverwriteUnknown; 367 } 368 369 // Make sure that the Later size is >= the Earlier size. 370 if (Later.Size >= Earlier.Size) 371 return OverwriteComplete; 372 } 373 374 // Otherwise, we have to have size information, and the later store has to be 375 // larger than the earlier one. 376 if (Later.Size == AliasAnalysis::UnknownSize || 377 Earlier.Size == AliasAnalysis::UnknownSize || DL == nullptr) 378 return OverwriteUnknown; 379 380 // Check to see if the later store is to the entire object (either a global, 381 // an alloca, or a byval/inalloca argument). If so, then it clearly 382 // overwrites any other store to the same object. 383 const Value *UO1 = GetUnderlyingObject(P1, DL), 384 *UO2 = GetUnderlyingObject(P2, DL); 385 386 // If we can't resolve the same pointers to the same object, then we can't 387 // analyze them at all. 388 if (UO1 != UO2) 389 return OverwriteUnknown; 390 391 // If the "Later" store is to a recognizable object, get its size. 392 uint64_t ObjectSize = getPointerSize(UO2, AA); 393 if (ObjectSize != AliasAnalysis::UnknownSize) 394 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size) 395 return OverwriteComplete; 396 397 // Okay, we have stores to two completely different pointers. Try to 398 // decompose the pointer into a "base + constant_offset" form. If the base 399 // pointers are equal, then we can reason about the two stores. 400 EarlierOff = 0; 401 LaterOff = 0; 402 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL); 403 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL); 404 405 // If the base pointers still differ, we have two completely different stores. 406 if (BP1 != BP2) 407 return OverwriteUnknown; 408 409 // The later store completely overlaps the earlier store if: 410 // 411 // 1. Both start at the same offset and the later one's size is greater than 412 // or equal to the earlier one's, or 413 // 414 // |--earlier--| 415 // |-- later --| 416 // 417 // 2. The earlier store has an offset greater than the later offset, but which 418 // still lies completely within the later store. 419 // 420 // |--earlier--| 421 // |----- later ------| 422 // 423 // We have to be careful here as *Off is signed while *.Size is unsigned. 424 if (EarlierOff >= LaterOff && 425 Later.Size >= Earlier.Size && 426 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) 427 return OverwriteComplete; 428 429 // The other interesting case is if the later store overwrites the end of 430 // the earlier store 431 // 432 // |--earlier--| 433 // |-- later --| 434 // 435 // In this case we may want to trim the size of earlier to avoid generating 436 // writes to addresses which will definitely be overwritten later 437 if (LaterOff > EarlierOff && 438 LaterOff < int64_t(EarlierOff + Earlier.Size) && 439 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)) 440 return OverwriteEnd; 441 442 // Otherwise, they don't completely overlap. 443 return OverwriteUnknown; 444} 445 446/// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a 447/// memory region into an identical pointer) then it doesn't actually make its 448/// input dead in the traditional sense. Consider this case: 449/// 450/// memcpy(A <- B) 451/// memcpy(A <- A) 452/// 453/// In this case, the second store to A does not make the first store to A dead. 454/// The usual situation isn't an explicit A<-A store like this (which can be 455/// trivially removed) but a case where two pointers may alias. 456/// 457/// This function detects when it is unsafe to remove a dependent instruction 458/// because the DSE inducing instruction may be a self-read. 459static bool isPossibleSelfRead(Instruction *Inst, 460 const AliasAnalysis::Location &InstStoreLoc, 461 Instruction *DepWrite, AliasAnalysis &AA) { 462 // Self reads can only happen for instructions that read memory. Get the 463 // location read. 464 AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA); 465 if (!InstReadLoc.Ptr) return false; // Not a reading instruction. 466 467 // If the read and written loc obviously don't alias, it isn't a read. 468 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false; 469 470 // Okay, 'Inst' may copy over itself. However, we can still remove a the 471 // DepWrite instruction if we can prove that it reads from the same location 472 // as Inst. This handles useful cases like: 473 // memcpy(A <- B) 474 // memcpy(A <- B) 475 // Here we don't know if A/B may alias, but we do know that B/B are must 476 // aliases, so removing the first memcpy is safe (assuming it writes <= # 477 // bytes as the second one. 478 AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA); 479 480 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr)) 481 return false; 482 483 // If DepWrite doesn't read memory or if we can't prove it is a must alias, 484 // then it can't be considered dead. 485 return true; 486} 487 488 489//===----------------------------------------------------------------------===// 490// DSE Pass 491//===----------------------------------------------------------------------===// 492 493bool DSE::runOnBasicBlock(BasicBlock &BB) { 494 bool MadeChange = false; 495 496 // Do a top-down walk on the BB. 497 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) { 498 Instruction *Inst = BBI++; 499 500 // Handle 'free' calls specially. 501 if (CallInst *F = isFreeCall(Inst, TLI)) { 502 MadeChange |= HandleFree(F); 503 continue; 504 } 505 506 // If we find something that writes memory, get its memory dependence. 507 if (!hasMemoryWrite(Inst, TLI)) 508 continue; 509 510 MemDepResult InstDep = MD->getDependency(Inst); 511 512 // Ignore any store where we can't find a local dependence. 513 // FIXME: cross-block DSE would be fun. :) 514 if (!InstDep.isDef() && !InstDep.isClobber()) 515 continue; 516 517 // If we're storing the same value back to a pointer that we just 518 // loaded from, then the store can be removed. 519 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 520 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) { 521 if (SI->getPointerOperand() == DepLoad->getPointerOperand() && 522 SI->getOperand(0) == DepLoad && isRemovable(SI)) { 523 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n " 524 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n'); 525 526 // DeleteDeadInstruction can delete the current instruction. Save BBI 527 // in case we need it. 528 WeakVH NextInst(BBI); 529 530 DeleteDeadInstruction(SI, *MD, TLI); 531 532 if (!NextInst) // Next instruction deleted. 533 BBI = BB.begin(); 534 else if (BBI != BB.begin()) // Revisit this instruction if possible. 535 --BBI; 536 ++NumFastStores; 537 MadeChange = true; 538 continue; 539 } 540 } 541 } 542 543 // Figure out what location is being stored to. 544 AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA); 545 546 // If we didn't get a useful location, fail. 547 if (!Loc.Ptr) 548 continue; 549 550 while (InstDep.isDef() || InstDep.isClobber()) { 551 // Get the memory clobbered by the instruction we depend on. MemDep will 552 // skip any instructions that 'Loc' clearly doesn't interact with. If we 553 // end up depending on a may- or must-aliased load, then we can't optimize 554 // away the store and we bail out. However, if we depend on on something 555 // that overwrites the memory location we *can* potentially optimize it. 556 // 557 // Find out what memory location the dependent instruction stores. 558 Instruction *DepWrite = InstDep.getInst(); 559 AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA); 560 // If we didn't get a useful location, or if it isn't a size, bail out. 561 if (!DepLoc.Ptr) 562 break; 563 564 // If we find a write that is a) removable (i.e., non-volatile), b) is 565 // completely obliterated by the store to 'Loc', and c) which we know that 566 // 'Inst' doesn't load from, then we can remove it. 567 if (isRemovable(DepWrite) && 568 !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) { 569 int64_t InstWriteOffset, DepWriteOffset; 570 OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, 571 DepWriteOffset, InstWriteOffset); 572 if (OR == OverwriteComplete) { 573 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " 574 << *DepWrite << "\n KILLER: " << *Inst << '\n'); 575 576 // Delete the store and now-dead instructions that feed it. 577 DeleteDeadInstruction(DepWrite, *MD, TLI); 578 ++NumFastStores; 579 MadeChange = true; 580 581 // DeleteDeadInstruction can delete the current instruction in loop 582 // cases, reset BBI. 583 BBI = Inst; 584 if (BBI != BB.begin()) 585 --BBI; 586 break; 587 } else if (OR == OverwriteEnd && isShortenable(DepWrite)) { 588 // TODO: base this on the target vector size so that if the earlier 589 // store was too small to get vector writes anyway then its likely 590 // a good idea to shorten it 591 // Power of 2 vector writes are probably always a bad idea to optimize 592 // as any store/memset/memcpy is likely using vector instructions so 593 // shortening it to not vector size is likely to be slower 594 MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite); 595 unsigned DepWriteAlign = DepIntrinsic->getAlignment(); 596 if (llvm::isPowerOf2_64(InstWriteOffset) || 597 ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) { 598 599 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: " 600 << *DepWrite << "\n KILLER (offset " 601 << InstWriteOffset << ", " 602 << DepLoc.Size << ")" 603 << *Inst << '\n'); 604 605 Value* DepWriteLength = DepIntrinsic->getLength(); 606 Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(), 607 InstWriteOffset - 608 DepWriteOffset); 609 DepIntrinsic->setLength(TrimmedLength); 610 MadeChange = true; 611 } 612 } 613 } 614 615 // If this is a may-aliased store that is clobbering the store value, we 616 // can keep searching past it for another must-aliased pointer that stores 617 // to the same location. For example, in: 618 // store -> P 619 // store -> Q 620 // store -> P 621 // we can remove the first store to P even though we don't know if P and Q 622 // alias. 623 if (DepWrite == &BB.front()) break; 624 625 // Can't look past this instruction if it might read 'Loc'. 626 if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref) 627 break; 628 629 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB); 630 } 631 } 632 633 // If this block ends in a return, unwind, or unreachable, all allocas are 634 // dead at its end, which means stores to them are also dead. 635 if (BB.getTerminator()->getNumSuccessors() == 0) 636 MadeChange |= handleEndBlock(BB); 637 638 return MadeChange; 639} 640 641/// Find all blocks that will unconditionally lead to the block BB and append 642/// them to F. 643static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks, 644 BasicBlock *BB, DominatorTree *DT) { 645 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { 646 BasicBlock *Pred = *I; 647 if (Pred == BB) continue; 648 TerminatorInst *PredTI = Pred->getTerminator(); 649 if (PredTI->getNumSuccessors() != 1) 650 continue; 651 652 if (DT->isReachableFromEntry(Pred)) 653 Blocks.push_back(Pred); 654 } 655} 656 657/// HandleFree - Handle frees of entire structures whose dependency is a store 658/// to a field of that structure. 659bool DSE::HandleFree(CallInst *F) { 660 bool MadeChange = false; 661 662 AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0)); 663 SmallVector<BasicBlock *, 16> Blocks; 664 Blocks.push_back(F->getParent()); 665 666 while (!Blocks.empty()) { 667 BasicBlock *BB = Blocks.pop_back_val(); 668 Instruction *InstPt = BB->getTerminator(); 669 if (BB == F->getParent()) InstPt = F; 670 671 MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB); 672 while (Dep.isDef() || Dep.isClobber()) { 673 Instruction *Dependency = Dep.getInst(); 674 if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency)) 675 break; 676 677 Value *DepPointer = 678 GetUnderlyingObject(getStoredPointerOperand(Dependency)); 679 680 // Check for aliasing. 681 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) 682 break; 683 684 Instruction *Next = std::next(BasicBlock::iterator(Dependency)); 685 686 // DCE instructions only used to calculate that store 687 DeleteDeadInstruction(Dependency, *MD, TLI); 688 ++NumFastStores; 689 MadeChange = true; 690 691 // Inst's old Dependency is now deleted. Compute the next dependency, 692 // which may also be dead, as in 693 // s[0] = 0; 694 // s[1] = 0; // This has just been deleted. 695 // free(s); 696 Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB); 697 } 698 699 if (Dep.isNonLocal()) 700 FindUnconditionalPreds(Blocks, BB, DT); 701 } 702 703 return MadeChange; 704} 705 706/// handleEndBlock - Remove dead stores to stack-allocated locations in the 707/// function end block. Ex: 708/// %A = alloca i32 709/// ... 710/// store i32 1, i32* %A 711/// ret void 712bool DSE::handleEndBlock(BasicBlock &BB) { 713 bool MadeChange = false; 714 715 // Keep track of all of the stack objects that are dead at the end of the 716 // function. 717 SmallSetVector<Value*, 16> DeadStackObjects; 718 719 // Find all of the alloca'd pointers in the entry block. 720 BasicBlock *Entry = BB.getParent()->begin(); 721 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) { 722 if (isa<AllocaInst>(I)) 723 DeadStackObjects.insert(I); 724 725 // Okay, so these are dead heap objects, but if the pointer never escapes 726 // then it's leaked by this function anyways. 727 else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true)) 728 DeadStackObjects.insert(I); 729 } 730 731 // Treat byval or inalloca arguments the same, stores to them are dead at the 732 // end of the function. 733 for (Function::arg_iterator AI = BB.getParent()->arg_begin(), 734 AE = BB.getParent()->arg_end(); AI != AE; ++AI) 735 if (AI->hasByValOrInAllocaAttr()) 736 DeadStackObjects.insert(AI); 737 738 // Scan the basic block backwards 739 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ 740 --BBI; 741 742 // If we find a store, check to see if it points into a dead stack value. 743 if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) { 744 // See through pointer-to-pointer bitcasts 745 SmallVector<Value *, 4> Pointers; 746 GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers); 747 748 // Stores to stack values are valid candidates for removal. 749 bool AllDead = true; 750 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), 751 E = Pointers.end(); I != E; ++I) 752 if (!DeadStackObjects.count(*I)) { 753 AllDead = false; 754 break; 755 } 756 757 if (AllDead) { 758 Instruction *Dead = BBI++; 759 760 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " 761 << *Dead << "\n Objects: "; 762 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), 763 E = Pointers.end(); I != E; ++I) { 764 dbgs() << **I; 765 if (std::next(I) != E) 766 dbgs() << ", "; 767 } 768 dbgs() << '\n'); 769 770 // DCE instructions only used to calculate that store. 771 DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects); 772 ++NumFastStores; 773 MadeChange = true; 774 continue; 775 } 776 } 777 778 // Remove any dead non-memory-mutating instructions. 779 if (isInstructionTriviallyDead(BBI, TLI)) { 780 Instruction *Inst = BBI++; 781 DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects); 782 ++NumFastOther; 783 MadeChange = true; 784 continue; 785 } 786 787 if (isa<AllocaInst>(BBI)) { 788 // Remove allocas from the list of dead stack objects; there can't be 789 // any references before the definition. 790 DeadStackObjects.remove(BBI); 791 continue; 792 } 793 794 if (CallSite CS = cast<Value>(BBI)) { 795 // Remove allocation function calls from the list of dead stack objects; 796 // there can't be any references before the definition. 797 if (isAllocLikeFn(BBI, TLI)) 798 DeadStackObjects.remove(BBI); 799 800 // If this call does not access memory, it can't be loading any of our 801 // pointers. 802 if (AA->doesNotAccessMemory(CS)) 803 continue; 804 805 // If the call might load from any of our allocas, then any store above 806 // the call is live. 807 DeadStackObjects.remove_if([&](Value *I) { 808 // See if the call site touches the value. 809 AliasAnalysis::ModRefResult A = 810 AA->getModRefInfo(CS, I, getPointerSize(I, *AA)); 811 812 return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref; 813 }); 814 815 // If all of the allocas were clobbered by the call then we're not going 816 // to find anything else to process. 817 if (DeadStackObjects.empty()) 818 break; 819 820 continue; 821 } 822 823 AliasAnalysis::Location LoadedLoc; 824 825 // If we encounter a use of the pointer, it is no longer considered dead 826 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { 827 if (!L->isUnordered()) // Be conservative with atomic/volatile load 828 break; 829 LoadedLoc = AA->getLocation(L); 830 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { 831 LoadedLoc = AA->getLocation(V); 832 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) { 833 LoadedLoc = AA->getLocationForSource(MTI); 834 } else if (!BBI->mayReadFromMemory()) { 835 // Instruction doesn't read memory. Note that stores that weren't removed 836 // above will hit this case. 837 continue; 838 } else { 839 // Unknown inst; assume it clobbers everything. 840 break; 841 } 842 843 // Remove any allocas from the DeadPointer set that are loaded, as this 844 // makes any stores above the access live. 845 RemoveAccessedObjects(LoadedLoc, DeadStackObjects); 846 847 // If all of the allocas were clobbered by the access then we're not going 848 // to find anything else to process. 849 if (DeadStackObjects.empty()) 850 break; 851 } 852 853 return MadeChange; 854} 855 856/// RemoveAccessedObjects - Check to see if the specified location may alias any 857/// of the stack objects in the DeadStackObjects set. If so, they become live 858/// because the location is being loaded. 859void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, 860 SmallSetVector<Value*, 16> &DeadStackObjects) { 861 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr); 862 863 // A constant can't be in the dead pointer set. 864 if (isa<Constant>(UnderlyingPointer)) 865 return; 866 867 // If the kill pointer can be easily reduced to an alloca, don't bother doing 868 // extraneous AA queries. 869 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) { 870 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer)); 871 return; 872 } 873 874 // Remove objects that could alias LoadedLoc. 875 DeadStackObjects.remove_if([&](Value *I) { 876 // See if the loaded location could alias the stack location. 877 AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA)); 878 return !AA->isNoAlias(StackLoc, LoadedLoc); 879 }); 880} 881