MemCpyOptimizer.cpp revision 2c39b15073db81d93bb629303915b7d7e5d088dc
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs various transformations related to eliminating memcpy 11// calls, or transforming sets of stores into memset's. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "memcpyopt" 16#include "llvm/Transforms/Scalar.h" 17#include "llvm/GlobalVariable.h" 18#include "llvm/IRBuilder.h" 19#include "llvm/Instructions.h" 20#include "llvm/IntrinsicInst.h" 21#include "llvm/ADT/SmallVector.h" 22#include "llvm/ADT/Statistic.h" 23#include "llvm/Analysis/AliasAnalysis.h" 24#include "llvm/Analysis/Dominators.h" 25#include "llvm/Analysis/MemoryDependenceAnalysis.h" 26#include "llvm/Analysis/ValueTracking.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/GetElementPtrTypeIterator.h" 29#include "llvm/Support/raw_ostream.h" 30#include "llvm/DataLayout.h" 31#include "llvm/Target/TargetLibraryInfo.h" 32#include "llvm/Transforms/Utils/Local.h" 33#include <list> 34using namespace llvm; 35 36STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 37STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 38STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 39STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 40 41static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 42 bool &VariableIdxFound, const DataLayout &TD){ 43 // Skip over the first indices. 44 gep_type_iterator GTI = gep_type_begin(GEP); 45 for (unsigned i = 1; i != Idx; ++i, ++GTI) 46 /*skip along*/; 47 48 // Compute the offset implied by the rest of the indices. 49 int64_t Offset = 0; 50 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 51 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 52 if (OpC == 0) 53 return VariableIdxFound = true; 54 if (OpC->isZero()) continue; // No offset. 55 56 // Handle struct indices, which add their field offset to the pointer. 57 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 58 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 59 continue; 60 } 61 62 // Otherwise, we have a sequential type like an array or vector. Multiply 63 // the index by the ElementSize. 64 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 65 Offset += Size*OpC->getSExtValue(); 66 } 67 68 return Offset; 69} 70 71/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 72/// constant offset, and return that constant offset. For example, Ptr1 might 73/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 74static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 75 const DataLayout &TD) { 76 Ptr1 = Ptr1->stripPointerCasts(); 77 Ptr2 = Ptr2->stripPointerCasts(); 78 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 79 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 80 81 bool VariableIdxFound = false; 82 83 // If one pointer is a GEP and the other isn't, then see if the GEP is a 84 // constant offset from the base, as in "P" and "gep P, 1". 85 if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 86 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD); 87 return !VariableIdxFound; 88 } 89 90 if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 91 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD); 92 return !VariableIdxFound; 93 } 94 95 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 96 // base. After that base, they may have some number of common (and 97 // potentially variable) indices. After that they handle some constant 98 // offset, which determines their offset from each other. At this point, we 99 // handle no other case. 100 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 101 return false; 102 103 // Skip any common indices and track the GEP types. 104 unsigned Idx = 1; 105 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 106 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 107 break; 108 109 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 110 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 111 if (VariableIdxFound) return false; 112 113 Offset = Offset2-Offset1; 114 return true; 115} 116 117 118/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 119/// This allows us to analyze stores like: 120/// store 0 -> P+1 121/// store 0 -> P+0 122/// store 0 -> P+3 123/// store 0 -> P+2 124/// which sometimes happens with stores to arrays of structs etc. When we see 125/// the first store, we make a range [1, 2). The second store extends the range 126/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 127/// two ranges into [0, 3) which is memset'able. 128namespace { 129struct MemsetRange { 130 // Start/End - A semi range that describes the span that this range covers. 131 // The range is closed at the start and open at the end: [Start, End). 132 int64_t Start, End; 133 134 /// StartPtr - The getelementptr instruction that points to the start of the 135 /// range. 136 Value *StartPtr; 137 138 /// Alignment - The known alignment of the first store. 139 unsigned Alignment; 140 141 /// TheStores - The actual stores that make up this range. 142 SmallVector<Instruction*, 16> TheStores; 143 144 bool isProfitableToUseMemset(const DataLayout &TD) const; 145 146}; 147} // end anon namespace 148 149bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const { 150 // If we found more than 4 stores to merge or 16 bytes, use memset. 151 if (TheStores.size() >= 4 || End-Start >= 16) return true; 152 153 // If there is nothing to merge, don't do anything. 154 if (TheStores.size() < 2) return false; 155 156 // If any of the stores are a memset, then it is always good to extend the 157 // memset. 158 for (unsigned i = 0, e = TheStores.size(); i != e; ++i) 159 if (!isa<StoreInst>(TheStores[i])) 160 return true; 161 162 // Assume that the code generator is capable of merging pairs of stores 163 // together if it wants to. 164 if (TheStores.size() == 2) return false; 165 166 // If we have fewer than 8 stores, it can still be worthwhile to do this. 167 // For example, merging 4 i8 stores into an i32 store is useful almost always. 168 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 169 // memset will be split into 2 32-bit stores anyway) and doing so can 170 // pessimize the llvm optimizer. 171 // 172 // Since we don't have perfect knowledge here, make some assumptions: assume 173 // the maximum GPR width is the same size as the pointer size and assume that 174 // this width can be stored. If so, check to see whether we will end up 175 // actually reducing the number of stores used. 176 unsigned Bytes = unsigned(End-Start); 177 unsigned AS = cast<StoreInst>(TheStores[0])->getPointerAddressSpace(); 178 unsigned NumPointerStores = Bytes/TD.getPointerSize(AS); 179 180 // Assume the remaining bytes if any are done a byte at a time. 181 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(AS); 182 183 // If we will reduce the # stores (according to this heuristic), do the 184 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 185 // etc. 186 return TheStores.size() > NumPointerStores+NumByteStores; 187} 188 189 190namespace { 191class MemsetRanges { 192 /// Ranges - A sorted list of the memset ranges. We use std::list here 193 /// because each element is relatively large and expensive to copy. 194 std::list<MemsetRange> Ranges; 195 typedef std::list<MemsetRange>::iterator range_iterator; 196 const DataLayout &TD; 197public: 198 MemsetRanges(const DataLayout &td) : TD(td) {} 199 200 typedef std::list<MemsetRange>::const_iterator const_iterator; 201 const_iterator begin() const { return Ranges.begin(); } 202 const_iterator end() const { return Ranges.end(); } 203 bool empty() const { return Ranges.empty(); } 204 205 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 206 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 207 addStore(OffsetFromFirst, SI); 208 else 209 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 210 } 211 212 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 213 int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType()); 214 215 addRange(OffsetFromFirst, StoreSize, 216 SI->getPointerOperand(), SI->getAlignment(), SI); 217 } 218 219 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 220 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 221 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 222 } 223 224 void addRange(int64_t Start, int64_t Size, Value *Ptr, 225 unsigned Alignment, Instruction *Inst); 226 227}; 228 229} // end anon namespace 230 231 232/// addRange - Add a new store to the MemsetRanges data structure. This adds a 233/// new range for the specified store at the specified offset, merging into 234/// existing ranges as appropriate. 235/// 236/// Do a linear search of the ranges to see if this can be joined and/or to 237/// find the insertion point in the list. We keep the ranges sorted for 238/// simplicity here. This is a linear search of a linked list, which is ugly, 239/// however the number of ranges is limited, so this won't get crazy slow. 240void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 241 unsigned Alignment, Instruction *Inst) { 242 int64_t End = Start+Size; 243 range_iterator I = Ranges.begin(), E = Ranges.end(); 244 245 while (I != E && Start > I->End) 246 ++I; 247 248 // We now know that I == E, in which case we didn't find anything to merge 249 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 250 // to insert a new range. Handle this now. 251 if (I == E || End < I->Start) { 252 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 253 R.Start = Start; 254 R.End = End; 255 R.StartPtr = Ptr; 256 R.Alignment = Alignment; 257 R.TheStores.push_back(Inst); 258 return; 259 } 260 261 // This store overlaps with I, add it. 262 I->TheStores.push_back(Inst); 263 264 // At this point, we may have an interval that completely contains our store. 265 // If so, just add it to the interval and return. 266 if (I->Start <= Start && I->End >= End) 267 return; 268 269 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 270 // but is not entirely contained within the range. 271 272 // See if the range extends the start of the range. In this case, it couldn't 273 // possibly cause it to join the prior range, because otherwise we would have 274 // stopped on *it*. 275 if (Start < I->Start) { 276 I->Start = Start; 277 I->StartPtr = Ptr; 278 I->Alignment = Alignment; 279 } 280 281 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 282 // is in or right at the end of I), and that End >= I->Start. Extend I out to 283 // End. 284 if (End > I->End) { 285 I->End = End; 286 range_iterator NextI = I; 287 while (++NextI != E && End >= NextI->Start) { 288 // Merge the range in. 289 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 290 if (NextI->End > I->End) 291 I->End = NextI->End; 292 Ranges.erase(NextI); 293 NextI = I; 294 } 295 } 296} 297 298//===----------------------------------------------------------------------===// 299// MemCpyOpt Pass 300//===----------------------------------------------------------------------===// 301 302namespace { 303 class MemCpyOpt : public FunctionPass { 304 MemoryDependenceAnalysis *MD; 305 TargetLibraryInfo *TLI; 306 const DataLayout *TD; 307 public: 308 static char ID; // Pass identification, replacement for typeid 309 MemCpyOpt() : FunctionPass(ID) { 310 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 311 MD = 0; 312 TLI = 0; 313 TD = 0; 314 } 315 316 bool runOnFunction(Function &F); 317 318 private: 319 // This transformation requires dominator postdominator info 320 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 321 AU.setPreservesCFG(); 322 AU.addRequired<DominatorTree>(); 323 AU.addRequired<MemoryDependenceAnalysis>(); 324 AU.addRequired<AliasAnalysis>(); 325 AU.addRequired<TargetLibraryInfo>(); 326 AU.addPreserved<AliasAnalysis>(); 327 AU.addPreserved<MemoryDependenceAnalysis>(); 328 } 329 330 // Helper fuctions 331 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 332 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); 333 bool processMemCpy(MemCpyInst *M); 334 bool processMemMove(MemMoveInst *M); 335 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 336 uint64_t cpyLen, unsigned cpyAlign, CallInst *C); 337 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 338 uint64_t MSize); 339 bool processByValArgument(CallSite CS, unsigned ArgNo); 340 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, 341 Value *ByteVal); 342 343 bool iterateOnFunction(Function &F); 344 }; 345 346 char MemCpyOpt::ID = 0; 347} 348 349// createMemCpyOptPass - The public interface to this file... 350FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 351 352INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 353 false, false) 354INITIALIZE_PASS_DEPENDENCY(DominatorTree) 355INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 356INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 357INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 358INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 359 false, false) 360 361/// tryMergingIntoMemset - When scanning forward over instructions, we look for 362/// some other patterns to fold away. In particular, this looks for stores to 363/// neighboring locations of memory. If it sees enough consecutive ones, it 364/// attempts to merge them together into a memcpy/memset. 365Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, 366 Value *StartPtr, Value *ByteVal) { 367 if (TD == 0) return 0; 368 369 // Okay, so we now have a single store that can be splatable. Scan to find 370 // all subsequent stores of the same value to offset from the same pointer. 371 // Join these together into ranges, so we can decide whether contiguous blocks 372 // are stored. 373 MemsetRanges Ranges(*TD); 374 375 BasicBlock::iterator BI = StartInst; 376 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 377 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 378 // If the instruction is readnone, ignore it, otherwise bail out. We 379 // don't even allow readonly here because we don't want something like: 380 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 381 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 382 break; 383 continue; 384 } 385 386 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 387 // If this is a store, see if we can merge it in. 388 if (!NextStore->isSimple()) break; 389 390 // Check to see if this stored value is of the same byte-splattable value. 391 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 392 break; 393 394 // Check to see if this store is to a constant offset from the start ptr. 395 int64_t Offset; 396 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), 397 Offset, *TD)) 398 break; 399 400 Ranges.addStore(Offset, NextStore); 401 } else { 402 MemSetInst *MSI = cast<MemSetInst>(BI); 403 404 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 405 !isa<ConstantInt>(MSI->getLength())) 406 break; 407 408 // Check to see if this store is to a constant offset from the start ptr. 409 int64_t Offset; 410 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD)) 411 break; 412 413 Ranges.addMemSet(Offset, MSI); 414 } 415 } 416 417 // If we have no ranges, then we just had a single store with nothing that 418 // could be merged in. This is a very common case of course. 419 if (Ranges.empty()) 420 return 0; 421 422 // If we had at least one store that could be merged in, add the starting 423 // store as well. We try to avoid this unless there is at least something 424 // interesting as a small compile-time optimization. 425 Ranges.addInst(0, StartInst); 426 427 // If we create any memsets, we put it right before the first instruction that 428 // isn't part of the memset block. This ensure that the memset is dominated 429 // by any addressing instruction needed by the start of the block. 430 IRBuilder<> Builder(BI); 431 432 // Now that we have full information about ranges, loop over the ranges and 433 // emit memset's for anything big enough to be worthwhile. 434 Instruction *AMemSet = 0; 435 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 436 I != E; ++I) { 437 const MemsetRange &Range = *I; 438 439 if (Range.TheStores.size() == 1) continue; 440 441 // If it is profitable to lower this range to memset, do so now. 442 if (!Range.isProfitableToUseMemset(*TD)) 443 continue; 444 445 // Otherwise, we do want to transform this! Create a new memset. 446 // Get the starting pointer of the block. 447 StartPtr = Range.StartPtr; 448 449 // Determine alignment 450 unsigned Alignment = Range.Alignment; 451 if (Alignment == 0) { 452 Type *EltType = 453 cast<PointerType>(StartPtr->getType())->getElementType(); 454 Alignment = TD->getABITypeAlignment(EltType); 455 } 456 457 AMemSet = 458 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 459 460 DEBUG(dbgs() << "Replace stores:\n"; 461 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 462 dbgs() << *Range.TheStores[i] << '\n'; 463 dbgs() << "With: " << *AMemSet << '\n'); 464 465 if (!Range.TheStores.empty()) 466 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 467 468 // Zap all the stores. 469 for (SmallVector<Instruction*, 16>::const_iterator 470 SI = Range.TheStores.begin(), 471 SE = Range.TheStores.end(); SI != SE; ++SI) { 472 MD->removeInstruction(*SI); 473 (*SI)->eraseFromParent(); 474 } 475 ++NumMemSetInfer; 476 } 477 478 return AMemSet; 479} 480 481 482bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 483 if (!SI->isSimple()) return false; 484 485 if (TD == 0) return false; 486 487 // Detect cases where we're performing call slot forwarding, but 488 // happen to be using a load-store pair to implement it, rather than 489 // a memcpy. 490 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 491 if (LI->isSimple() && LI->hasOneUse() && 492 LI->getParent() == SI->getParent()) { 493 MemDepResult ldep = MD->getDependency(LI); 494 CallInst *C = 0; 495 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 496 C = dyn_cast<CallInst>(ldep.getInst()); 497 498 if (C) { 499 // Check that nothing touches the dest of the "copy" between 500 // the call and the store. 501 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 502 AliasAnalysis::Location StoreLoc = AA.getLocation(SI); 503 for (BasicBlock::iterator I = --BasicBlock::iterator(SI), 504 E = C; I != E; --I) { 505 if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) { 506 C = 0; 507 break; 508 } 509 } 510 } 511 512 if (C) { 513 unsigned storeAlign = SI->getAlignment(); 514 if (!storeAlign) 515 storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType()); 516 unsigned loadAlign = LI->getAlignment(); 517 if (!loadAlign) 518 loadAlign = TD->getABITypeAlignment(LI->getType()); 519 520 bool changed = performCallSlotOptzn(LI, 521 SI->getPointerOperand()->stripPointerCasts(), 522 LI->getPointerOperand()->stripPointerCasts(), 523 TD->getTypeStoreSize(SI->getOperand(0)->getType()), 524 std::min(storeAlign, loadAlign), C); 525 if (changed) { 526 MD->removeInstruction(SI); 527 SI->eraseFromParent(); 528 MD->removeInstruction(LI); 529 LI->eraseFromParent(); 530 ++NumMemCpyInstr; 531 return true; 532 } 533 } 534 } 535 } 536 537 // There are two cases that are interesting for this code to handle: memcpy 538 // and memset. Right now we only handle memset. 539 540 // Ensure that the value being stored is something that can be memset'able a 541 // byte at a time like "0" or "-1" or any width, as well as things like 542 // 0xA0A0A0A0 and 0.0. 543 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) 544 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 545 ByteVal)) { 546 BBI = I; // Don't invalidate iterator. 547 return true; 548 } 549 550 return false; 551} 552 553bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 554 // See if there is another memset or store neighboring this memset which 555 // allows us to widen out the memset to do a single larger store. 556 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 557 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 558 MSI->getValue())) { 559 BBI = I; // Don't invalidate iterator. 560 return true; 561 } 562 return false; 563} 564 565 566/// performCallSlotOptzn - takes a memcpy and a call that it depends on, 567/// and checks for the possibility of a call slot optimization by having 568/// the call write its result directly into the destination of the memcpy. 569bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 570 Value *cpyDest, Value *cpySrc, 571 uint64_t cpyLen, unsigned cpyAlign, 572 CallInst *C) { 573 // The general transformation to keep in mind is 574 // 575 // call @func(..., src, ...) 576 // memcpy(dest, src, ...) 577 // 578 // -> 579 // 580 // memcpy(dest, src, ...) 581 // call @func(..., dest, ...) 582 // 583 // Since moving the memcpy is technically awkward, we additionally check that 584 // src only holds uninitialized values at the moment of the call, meaning that 585 // the memcpy can be discarded rather than moved. 586 587 // Deliberately get the source and destination with bitcasts stripped away, 588 // because we'll need to do type comparisons based on the underlying type. 589 CallSite CS(C); 590 591 // Require that src be an alloca. This simplifies the reasoning considerably. 592 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 593 if (!srcAlloca) 594 return false; 595 596 // Check that all of src is copied to dest. 597 if (TD == 0) return false; 598 599 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 600 if (!srcArraySize) 601 return false; 602 603 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * 604 srcArraySize->getZExtValue(); 605 606 if (cpyLen < srcSize) 607 return false; 608 609 // Check that accessing the first srcSize bytes of dest will not cause a 610 // trap. Otherwise the transform is invalid since it might cause a trap 611 // to occur earlier than it otherwise would. 612 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 613 // The destination is an alloca. Check it is larger than srcSize. 614 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 615 if (!destArraySize) 616 return false; 617 618 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * 619 destArraySize->getZExtValue(); 620 621 if (destSize < srcSize) 622 return false; 623 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 624 // If the destination is an sret parameter then only accesses that are 625 // outside of the returned struct type can trap. 626 if (!A->hasStructRetAttr()) 627 return false; 628 629 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 630 uint64_t destSize = TD->getTypeAllocSize(StructTy); 631 632 if (destSize < srcSize) 633 return false; 634 } else { 635 return false; 636 } 637 638 // Check that dest points to memory that is at least as aligned as src. 639 unsigned srcAlign = srcAlloca->getAlignment(); 640 if (!srcAlign) 641 srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType()); 642 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 643 // If dest is not aligned enough and we can't increase its alignment then 644 // bail out. 645 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 646 return false; 647 648 // Check that src is not accessed except via the call and the memcpy. This 649 // guarantees that it holds only undefined values when passed in (so the final 650 // memcpy can be dropped), that it is not read or written between the call and 651 // the memcpy, and that writing beyond the end of it is undefined. 652 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 653 srcAlloca->use_end()); 654 while (!srcUseList.empty()) { 655 User *UI = srcUseList.pop_back_val(); 656 657 if (isa<BitCastInst>(UI)) { 658 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 659 I != E; ++I) 660 srcUseList.push_back(*I); 661 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { 662 if (G->hasAllZeroIndices()) 663 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 664 I != E; ++I) 665 srcUseList.push_back(*I); 666 else 667 return false; 668 } else if (UI != C && UI != cpy) { 669 return false; 670 } 671 } 672 673 // Since we're changing the parameter to the callsite, we need to make sure 674 // that what would be the new parameter dominates the callsite. 675 DominatorTree &DT = getAnalysis<DominatorTree>(); 676 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 677 if (!DT.dominates(cpyDestInst, C)) 678 return false; 679 680 // In addition to knowing that the call does not access src in some 681 // unexpected manner, for example via a global, which we deduce from 682 // the use analysis, we also need to know that it does not sneakily 683 // access dest. We rely on AA to figure this out for us. 684 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 685 AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize); 686 // If necessary, perform additional analysis. 687 if (MR != AliasAnalysis::NoModRef) 688 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); 689 if (MR != AliasAnalysis::NoModRef) 690 return false; 691 692 // All the checks have passed, so do the transformation. 693 bool changedArgument = false; 694 for (unsigned i = 0; i < CS.arg_size(); ++i) 695 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 696 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 697 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 698 cpyDest->getName(), C); 699 changedArgument = true; 700 if (CS.getArgument(i)->getType() == Dest->getType()) 701 CS.setArgument(i, Dest); 702 else 703 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 704 CS.getArgument(i)->getType(), Dest->getName(), C)); 705 } 706 707 if (!changedArgument) 708 return false; 709 710 // If the destination wasn't sufficiently aligned then increase its alignment. 711 if (!isDestSufficientlyAligned) { 712 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 713 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 714 } 715 716 // Drop any cached information about the call, because we may have changed 717 // its dependence information by changing its parameter. 718 MD->removeInstruction(C); 719 720 // Remove the memcpy. 721 MD->removeInstruction(cpy); 722 ++NumMemCpyInstr; 723 724 return true; 725} 726 727/// processMemCpyMemCpyDependence - We've found that the (upward scanning) 728/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to 729/// copy from MDep's input if we can. MSize is the size of M's copy. 730/// 731bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 732 uint64_t MSize) { 733 // We can only transforms memcpy's where the dest of one is the source of the 734 // other. 735 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 736 return false; 737 738 // If dep instruction is reading from our current input, then it is a noop 739 // transfer and substituting the input won't change this instruction. Just 740 // ignore the input and let someone else zap MDep. This handles cases like: 741 // memcpy(a <- a) 742 // memcpy(b <- a) 743 if (M->getSource() == MDep->getSource()) 744 return false; 745 746 // Second, the length of the memcpy's must be the same, or the preceding one 747 // must be larger than the following one. 748 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 749 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 750 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 751 return false; 752 753 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 754 755 // Verify that the copied-from memory doesn't change in between the two 756 // transfers. For example, in: 757 // memcpy(a <- b) 758 // *b = 42; 759 // memcpy(c <- a) 760 // It would be invalid to transform the second memcpy into memcpy(c <- b). 761 // 762 // TODO: If the code between M and MDep is transparent to the destination "c", 763 // then we could still perform the xform by moving M up to the first memcpy. 764 // 765 // NOTE: This is conservative, it will stop on any read from the source loc, 766 // not just the defining memcpy. 767 MemDepResult SourceDep = 768 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), 769 false, M, M->getParent()); 770 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 771 return false; 772 773 // If the dest of the second might alias the source of the first, then the 774 // source and dest might overlap. We still want to eliminate the intermediate 775 // value, but we have to generate a memmove instead of memcpy. 776 bool UseMemMove = false; 777 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep))) 778 UseMemMove = true; 779 780 // If all checks passed, then we can transform M. 781 782 // Make sure to use the lesser of the alignment of the source and the dest 783 // since we're changing where we're reading from, but don't want to increase 784 // the alignment past what can be read from or written to. 785 // TODO: Is this worth it if we're creating a less aligned memcpy? For 786 // example we could be moving from movaps -> movq on x86. 787 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 788 789 IRBuilder<> Builder(M); 790 if (UseMemMove) 791 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 792 Align, M->isVolatile()); 793 else 794 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 795 Align, M->isVolatile()); 796 797 // Remove the instruction we're replacing. 798 MD->removeInstruction(M); 799 M->eraseFromParent(); 800 ++NumMemCpyInstr; 801 return true; 802} 803 804 805/// processMemCpy - perform simplification of memcpy's. If we have memcpy A 806/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 807/// B to be a memcpy from X to Z (or potentially a memmove, depending on 808/// circumstances). This allows later passes to remove the first memcpy 809/// altogether. 810bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 811 // We can only optimize statically-sized memcpy's that are non-volatile. 812 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 813 if (CopySize == 0 || M->isVolatile()) return false; 814 815 // If the source and destination of the memcpy are the same, then zap it. 816 if (M->getSource() == M->getDest()) { 817 MD->removeInstruction(M); 818 M->eraseFromParent(); 819 return false; 820 } 821 822 // If copying from a constant, try to turn the memcpy into a memset. 823 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 824 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 825 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 826 IRBuilder<> Builder(M); 827 Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize, 828 M->getAlignment(), false); 829 MD->removeInstruction(M); 830 M->eraseFromParent(); 831 ++NumCpyToSet; 832 return true; 833 } 834 835 // The are two possible optimizations we can do for memcpy: 836 // a) memcpy-memcpy xform which exposes redundance for DSE. 837 // b) call-memcpy xform for return slot optimization. 838 MemDepResult DepInfo = MD->getDependency(M); 839 if (DepInfo.isClobber()) { 840 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 841 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 842 CopySize->getZExtValue(), M->getAlignment(), 843 C)) { 844 MD->removeInstruction(M); 845 M->eraseFromParent(); 846 return true; 847 } 848 } 849 } 850 851 AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M); 852 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true, 853 M, M->getParent()); 854 if (SrcDepInfo.isClobber()) { 855 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 856 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); 857 } 858 859 return false; 860} 861 862/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 863/// are guaranteed not to alias. 864bool MemCpyOpt::processMemMove(MemMoveInst *M) { 865 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 866 867 if (!TLI->has(LibFunc::memmove)) 868 return false; 869 870 // See if the pointers alias. 871 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M))) 872 return false; 873 874 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 875 876 // If not, then we know we can transform this. 877 Module *Mod = M->getParent()->getParent()->getParent(); 878 Type *ArgTys[3] = { M->getRawDest()->getType(), 879 M->getRawSource()->getType(), 880 M->getLength()->getType() }; 881 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, 882 ArgTys)); 883 884 // MemDep may have over conservative information about this instruction, just 885 // conservatively flush it from the cache. 886 MD->removeInstruction(M); 887 888 ++NumMoveToCpy; 889 return true; 890} 891 892/// processByValArgument - This is called on every byval argument in call sites. 893bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 894 if (TD == 0) return false; 895 896 // Find out what feeds this byval argument. 897 Value *ByValArg = CS.getArgument(ArgNo); 898 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 899 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); 900 MemDepResult DepInfo = 901 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), 902 true, CS.getInstruction(), 903 CS.getInstruction()->getParent()); 904 if (!DepInfo.isClobber()) 905 return false; 906 907 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 908 // a memcpy, see if we can byval from the source of the memcpy instead of the 909 // result. 910 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 911 if (MDep == 0 || MDep->isVolatile() || 912 ByValArg->stripPointerCasts() != MDep->getDest()) 913 return false; 914 915 // The length of the memcpy must be larger or equal to the size of the byval. 916 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 917 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) 918 return false; 919 920 // Get the alignment of the byval. If the call doesn't specify the alignment, 921 // then it is some target specific value that we can't know. 922 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 923 if (ByValAlign == 0) return false; 924 925 // If it is greater than the memcpy, then we check to see if we can force the 926 // source of the memcpy to the alignment we need. If we fail, we bail out. 927 if (MDep->getAlignment() < ByValAlign && 928 getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign) 929 return false; 930 931 // Verify that the copied-from memory doesn't change in between the memcpy and 932 // the byval call. 933 // memcpy(a <- b) 934 // *b = 42; 935 // foo(*a) 936 // It would be invalid to transform the second memcpy into foo(*b). 937 // 938 // NOTE: This is conservative, it will stop on any read from the source loc, 939 // not just the defining memcpy. 940 MemDepResult SourceDep = 941 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), 942 false, CS.getInstruction(), MDep->getParent()); 943 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 944 return false; 945 946 Value *TmpCast = MDep->getSource(); 947 if (MDep->getSource()->getType() != ByValArg->getType()) 948 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 949 "tmpcast", CS.getInstruction()); 950 951 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 952 << " " << *MDep << "\n" 953 << " " << *CS.getInstruction() << "\n"); 954 955 // Otherwise we're good! Update the byval argument. 956 CS.setArgument(ArgNo, TmpCast); 957 ++NumMemCpyInstr; 958 return true; 959} 960 961/// iterateOnFunction - Executes one iteration of MemCpyOpt. 962bool MemCpyOpt::iterateOnFunction(Function &F) { 963 bool MadeChange = false; 964 965 // Walk all instruction in the function. 966 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 967 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 968 // Avoid invalidating the iterator. 969 Instruction *I = BI++; 970 971 bool RepeatInstruction = false; 972 973 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 974 MadeChange |= processStore(SI, BI); 975 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 976 RepeatInstruction = processMemSet(M, BI); 977 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 978 RepeatInstruction = processMemCpy(M); 979 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 980 RepeatInstruction = processMemMove(M); 981 else if (CallSite CS = (Value*)I) { 982 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 983 if (CS.isByValArgument(i)) 984 MadeChange |= processByValArgument(CS, i); 985 } 986 987 // Reprocess the instruction if desired. 988 if (RepeatInstruction) { 989 if (BI != BB->begin()) --BI; 990 MadeChange = true; 991 } 992 } 993 } 994 995 return MadeChange; 996} 997 998// MemCpyOpt::runOnFunction - This is the main transformation entry point for a 999// function. 1000// 1001bool MemCpyOpt::runOnFunction(Function &F) { 1002 bool MadeChange = false; 1003 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1004 TD = getAnalysisIfAvailable<DataLayout>(); 1005 TLI = &getAnalysis<TargetLibraryInfo>(); 1006 1007 // If we don't have at least memset and memcpy, there is little point of doing 1008 // anything here. These are required by a freestanding implementation, so if 1009 // even they are disabled, there is no point in trying hard. 1010 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) 1011 return false; 1012 1013 while (1) { 1014 if (!iterateOnFunction(F)) 1015 break; 1016 MadeChange = true; 1017 } 1018 1019 MD = 0; 1020 return MadeChange; 1021} 1022