MemCpyOptimizer.cpp revision b90584ae78a7acc4ac92e3ad52121a10c520b980
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs various transformations related to eliminating memcpy 11// calls, or transforming sets of stores into memset's. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "memcpyopt" 16#include "llvm/Transforms/Scalar.h" 17#include "llvm/GlobalVariable.h" 18#include "llvm/IntrinsicInst.h" 19#include "llvm/Instructions.h" 20#include "llvm/ADT/SmallVector.h" 21#include "llvm/ADT/Statistic.h" 22#include "llvm/Analysis/Dominators.h" 23#include "llvm/Analysis/AliasAnalysis.h" 24#include "llvm/Analysis/MemoryDependenceAnalysis.h" 25#include "llvm/Analysis/ValueTracking.h" 26#include "llvm/Support/Debug.h" 27#include "llvm/Support/GetElementPtrTypeIterator.h" 28#include "llvm/Support/IRBuilder.h" 29#include "llvm/Support/raw_ostream.h" 30#include "llvm/Target/TargetData.h" 31#include "llvm/Target/TargetLibraryInfo.h" 32#include <list> 33using namespace llvm; 34 35STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 36STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 37STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 38STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 39 40static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, 41 bool &VariableIdxFound, const TargetData &TD){ 42 // Skip over the first indices. 43 gep_type_iterator GTI = gep_type_begin(GEP); 44 for (unsigned i = 1; i != Idx; ++i, ++GTI) 45 /*skip along*/; 46 47 // Compute the offset implied by the rest of the indices. 48 int64_t Offset = 0; 49 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 50 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 51 if (OpC == 0) 52 return VariableIdxFound = true; 53 if (OpC->isZero()) continue; // No offset. 54 55 // Handle struct indices, which add their field offset to the pointer. 56 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 57 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 58 continue; 59 } 60 61 // Otherwise, we have a sequential type like an array or vector. Multiply 62 // the index by the ElementSize. 63 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 64 Offset += Size*OpC->getSExtValue(); 65 } 66 67 return Offset; 68} 69 70/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 71/// constant offset, and return that constant offset. For example, Ptr1 might 72/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 73static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 74 const TargetData &TD) { 75 Ptr1 = Ptr1->stripPointerCasts(); 76 Ptr2 = Ptr2->stripPointerCasts(); 77 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 78 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 79 80 bool VariableIdxFound = false; 81 82 // If one pointer is a GEP and the other isn't, then see if the GEP is a 83 // constant offset from the base, as in "P" and "gep P, 1". 84 if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 85 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD); 86 return !VariableIdxFound; 87 } 88 89 if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 90 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD); 91 return !VariableIdxFound; 92 } 93 94 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 95 // base. After that base, they may have some number of common (and 96 // potentially variable) indices. After that they handle some constant 97 // offset, which determines their offset from each other. At this point, we 98 // handle no other case. 99 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 100 return false; 101 102 // Skip any common indices and track the GEP types. 103 unsigned Idx = 1; 104 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 105 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 106 break; 107 108 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 109 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 110 if (VariableIdxFound) return false; 111 112 Offset = Offset2-Offset1; 113 return true; 114} 115 116 117/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 118/// This allows us to analyze stores like: 119/// store 0 -> P+1 120/// store 0 -> P+0 121/// store 0 -> P+3 122/// store 0 -> P+2 123/// which sometimes happens with stores to arrays of structs etc. When we see 124/// the first store, we make a range [1, 2). The second store extends the range 125/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 126/// two ranges into [0, 3) which is memset'able. 127namespace { 128struct MemsetRange { 129 // Start/End - A semi range that describes the span that this range covers. 130 // The range is closed at the start and open at the end: [Start, End). 131 int64_t Start, End; 132 133 /// StartPtr - The getelementptr instruction that points to the start of the 134 /// range. 135 Value *StartPtr; 136 137 /// Alignment - The known alignment of the first store. 138 unsigned Alignment; 139 140 /// TheStores - The actual stores that make up this range. 141 SmallVector<Instruction*, 16> TheStores; 142 143 bool isProfitableToUseMemset(const TargetData &TD) const; 144 145}; 146} // end anon namespace 147 148bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { 149 // If we found more than 8 stores to merge or 64 bytes, use memset. 150 if (TheStores.size() >= 8 || End-Start >= 64) return true; 151 152 // If there is nothing to merge, don't do anything. 153 if (TheStores.size() < 2) return false; 154 155 // If any of the stores are a memset, then it is always good to extend the 156 // memset. 157 for (unsigned i = 0, e = TheStores.size(); i != e; ++i) 158 if (!isa<StoreInst>(TheStores[i])) 159 return true; 160 161 // Assume that the code generator is capable of merging pairs of stores 162 // together if it wants to. 163 if (TheStores.size() == 2) return false; 164 165 // If we have fewer than 8 stores, it can still be worthwhile to do this. 166 // For example, merging 4 i8 stores into an i32 store is useful almost always. 167 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 168 // memset will be split into 2 32-bit stores anyway) and doing so can 169 // pessimize the llvm optimizer. 170 // 171 // Since we don't have perfect knowledge here, make some assumptions: assume 172 // the maximum GPR width is the same size as the pointer size and assume that 173 // this width can be stored. If so, check to see whether we will end up 174 // actually reducing the number of stores used. 175 unsigned Bytes = unsigned(End-Start); 176 unsigned NumPointerStores = Bytes/TD.getPointerSize(); 177 178 // Assume the remaining bytes if any are done a byte at a time. 179 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); 180 181 // If we will reduce the # stores (according to this heuristic), do the 182 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 183 // etc. 184 return TheStores.size() > NumPointerStores+NumByteStores; 185} 186 187 188namespace { 189class MemsetRanges { 190 /// Ranges - A sorted list of the memset ranges. We use std::list here 191 /// because each element is relatively large and expensive to copy. 192 std::list<MemsetRange> Ranges; 193 typedef std::list<MemsetRange>::iterator range_iterator; 194 const TargetData &TD; 195public: 196 MemsetRanges(const TargetData &td) : TD(td) {} 197 198 typedef std::list<MemsetRange>::const_iterator const_iterator; 199 const_iterator begin() const { return Ranges.begin(); } 200 const_iterator end() const { return Ranges.end(); } 201 bool empty() const { return Ranges.empty(); } 202 203 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 204 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 205 addStore(OffsetFromFirst, SI); 206 else 207 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 208 } 209 210 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 211 int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType()); 212 213 addRange(OffsetFromFirst, StoreSize, 214 SI->getPointerOperand(), SI->getAlignment(), SI); 215 } 216 217 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 218 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 219 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 220 } 221 222 void addRange(int64_t Start, int64_t Size, Value *Ptr, 223 unsigned Alignment, Instruction *Inst); 224 225}; 226 227} // end anon namespace 228 229 230/// addRange - Add a new store to the MemsetRanges data structure. This adds a 231/// new range for the specified store at the specified offset, merging into 232/// existing ranges as appropriate. 233/// 234/// Do a linear search of the ranges to see if this can be joined and/or to 235/// find the insertion point in the list. We keep the ranges sorted for 236/// simplicity here. This is a linear search of a linked list, which is ugly, 237/// however the number of ranges is limited, so this won't get crazy slow. 238void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 239 unsigned Alignment, Instruction *Inst) { 240 int64_t End = Start+Size; 241 range_iterator I = Ranges.begin(), E = Ranges.end(); 242 243 while (I != E && Start > I->End) 244 ++I; 245 246 // We now know that I == E, in which case we didn't find anything to merge 247 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 248 // to insert a new range. Handle this now. 249 if (I == E || End < I->Start) { 250 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 251 R.Start = Start; 252 R.End = End; 253 R.StartPtr = Ptr; 254 R.Alignment = Alignment; 255 R.TheStores.push_back(Inst); 256 return; 257 } 258 259 // This store overlaps with I, add it. 260 I->TheStores.push_back(Inst); 261 262 // At this point, we may have an interval that completely contains our store. 263 // If so, just add it to the interval and return. 264 if (I->Start <= Start && I->End >= End) 265 return; 266 267 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 268 // but is not entirely contained within the range. 269 270 // See if the range extends the start of the range. In this case, it couldn't 271 // possibly cause it to join the prior range, because otherwise we would have 272 // stopped on *it*. 273 if (Start < I->Start) { 274 I->Start = Start; 275 I->StartPtr = Ptr; 276 I->Alignment = Alignment; 277 } 278 279 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 280 // is in or right at the end of I), and that End >= I->Start. Extend I out to 281 // End. 282 if (End > I->End) { 283 I->End = End; 284 range_iterator NextI = I; 285 while (++NextI != E && End >= NextI->Start) { 286 // Merge the range in. 287 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 288 if (NextI->End > I->End) 289 I->End = NextI->End; 290 Ranges.erase(NextI); 291 NextI = I; 292 } 293 } 294} 295 296//===----------------------------------------------------------------------===// 297// MemCpyOpt Pass 298//===----------------------------------------------------------------------===// 299 300namespace { 301 class MemCpyOpt : public FunctionPass { 302 MemoryDependenceAnalysis *MD; 303 TargetLibraryInfo *TLI; 304 const TargetData *TD; 305 public: 306 static char ID; // Pass identification, replacement for typeid 307 MemCpyOpt() : FunctionPass(ID) { 308 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 309 MD = 0; 310 TLI = 0; 311 TD = 0; 312 } 313 314 bool runOnFunction(Function &F); 315 316 private: 317 // This transformation requires dominator postdominator info 318 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 319 AU.setPreservesCFG(); 320 AU.addRequired<DominatorTree>(); 321 AU.addRequired<MemoryDependenceAnalysis>(); 322 AU.addRequired<AliasAnalysis>(); 323 AU.addRequired<TargetLibraryInfo>(); 324 AU.addPreserved<AliasAnalysis>(); 325 AU.addPreserved<MemoryDependenceAnalysis>(); 326 } 327 328 // Helper fuctions 329 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 330 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); 331 bool processMemCpy(MemCpyInst *M); 332 bool processMemMove(MemMoveInst *M); 333 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 334 uint64_t cpyLen, CallInst *C); 335 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 336 uint64_t MSize); 337 bool processByValArgument(CallSite CS, unsigned ArgNo); 338 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, 339 Value *ByteVal); 340 341 bool iterateOnFunction(Function &F); 342 }; 343 344 char MemCpyOpt::ID = 0; 345} 346 347// createMemCpyOptPass - The public interface to this file... 348FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 349 350INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 351 false, false) 352INITIALIZE_PASS_DEPENDENCY(DominatorTree) 353INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 354INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 355INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 356INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 357 false, false) 358 359/// tryMergingIntoMemset - When scanning forward over instructions, we look for 360/// some other patterns to fold away. In particular, this looks for stores to 361/// neighboring locations of memory. If it sees enough consecutive ones, it 362/// attempts to merge them together into a memcpy/memset. 363Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, 364 Value *StartPtr, Value *ByteVal) { 365 if (TD == 0) return 0; 366 367 // Okay, so we now have a single store that can be splatable. Scan to find 368 // all subsequent stores of the same value to offset from the same pointer. 369 // Join these together into ranges, so we can decide whether contiguous blocks 370 // are stored. 371 MemsetRanges Ranges(*TD); 372 373 BasicBlock::iterator BI = StartInst; 374 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 375 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 376 // If the instruction is readnone, ignore it, otherwise bail out. We 377 // don't even allow readonly here because we don't want something like: 378 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 379 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 380 break; 381 continue; 382 } 383 384 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 385 // If this is a store, see if we can merge it in. 386 if (NextStore->isVolatile()) break; 387 388 // Check to see if this stored value is of the same byte-splattable value. 389 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 390 break; 391 392 // Check to see if this store is to a constant offset from the start ptr. 393 int64_t Offset; 394 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), 395 Offset, *TD)) 396 break; 397 398 Ranges.addStore(Offset, NextStore); 399 } else { 400 MemSetInst *MSI = cast<MemSetInst>(BI); 401 402 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 403 !isa<ConstantInt>(MSI->getLength())) 404 break; 405 406 // Check to see if this store is to a constant offset from the start ptr. 407 int64_t Offset; 408 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD)) 409 break; 410 411 Ranges.addMemSet(Offset, MSI); 412 } 413 } 414 415 // If we have no ranges, then we just had a single store with nothing that 416 // could be merged in. This is a very common case of course. 417 if (Ranges.empty()) 418 return 0; 419 420 // If we had at least one store that could be merged in, add the starting 421 // store as well. We try to avoid this unless there is at least something 422 // interesting as a small compile-time optimization. 423 Ranges.addInst(0, StartInst); 424 425 // If we create any memsets, we put it right before the first instruction that 426 // isn't part of the memset block. This ensure that the memset is dominated 427 // by any addressing instruction needed by the start of the block. 428 IRBuilder<> Builder(BI); 429 430 // Now that we have full information about ranges, loop over the ranges and 431 // emit memset's for anything big enough to be worthwhile. 432 Instruction *AMemSet = 0; 433 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 434 I != E; ++I) { 435 const MemsetRange &Range = *I; 436 437 if (Range.TheStores.size() == 1) continue; 438 439 // If it is profitable to lower this range to memset, do so now. 440 if (!Range.isProfitableToUseMemset(*TD)) 441 continue; 442 443 // Otherwise, we do want to transform this! Create a new memset. 444 // Get the starting pointer of the block. 445 StartPtr = Range.StartPtr; 446 447 // Determine alignment 448 unsigned Alignment = Range.Alignment; 449 if (Alignment == 0) { 450 const Type *EltType = 451 cast<PointerType>(StartPtr->getType())->getElementType(); 452 Alignment = TD->getABITypeAlignment(EltType); 453 } 454 455 AMemSet = 456 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 457 458 DEBUG(dbgs() << "Replace stores:\n"; 459 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 460 dbgs() << *Range.TheStores[i] << '\n'; 461 dbgs() << "With: " << *AMemSet << '\n'); 462 463 if (!Range.TheStores.empty()) 464 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 465 466 // Zap all the stores. 467 for (SmallVector<Instruction*, 16>::const_iterator 468 SI = Range.TheStores.begin(), 469 SE = Range.TheStores.end(); SI != SE; ++SI) { 470 MD->removeInstruction(*SI); 471 (*SI)->eraseFromParent(); 472 } 473 ++NumMemSetInfer; 474 } 475 476 return AMemSet; 477} 478 479 480bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 481 if (SI->isVolatile()) return false; 482 483 if (TD == 0) return false; 484 485 // Detect cases where we're performing call slot forwarding, but 486 // happen to be using a load-store pair to implement it, rather than 487 // a memcpy. 488 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 489 if (!LI->isVolatile() && LI->hasOneUse()) { 490 MemDepResult dep = MD->getDependency(LI); 491 CallInst *C = 0; 492 if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst())) 493 C = dyn_cast<CallInst>(dep.getInst()); 494 495 if (C) { 496 bool changed = performCallSlotOptzn(LI, 497 SI->getPointerOperand()->stripPointerCasts(), 498 LI->getPointerOperand()->stripPointerCasts(), 499 TD->getTypeStoreSize(SI->getOperand(0)->getType()), C); 500 if (changed) { 501 MD->removeInstruction(SI); 502 SI->eraseFromParent(); 503 MD->removeInstruction(LI); 504 LI->eraseFromParent(); 505 ++NumMemCpyInstr; 506 return true; 507 } 508 } 509 } 510 } 511 512 // There are two cases that are interesting for this code to handle: memcpy 513 // and memset. Right now we only handle memset. 514 515 // Ensure that the value being stored is something that can be memset'able a 516 // byte at a time like "0" or "-1" or any width, as well as things like 517 // 0xA0A0A0A0 and 0.0. 518 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) 519 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 520 ByteVal)) { 521 BBI = I; // Don't invalidate iterator. 522 return true; 523 } 524 525 return false; 526} 527 528bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 529 // See if there is another memset or store neighboring this memset which 530 // allows us to widen out the memset to do a single larger store. 531 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 532 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 533 MSI->getValue())) { 534 BBI = I; // Don't invalidate iterator. 535 return true; 536 } 537 return false; 538} 539 540 541/// performCallSlotOptzn - takes a memcpy and a call that it depends on, 542/// and checks for the possibility of a call slot optimization by having 543/// the call write its result directly into the destination of the memcpy. 544bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 545 Value *cpyDest, Value *cpySrc, 546 uint64_t cpyLen, CallInst *C) { 547 // The general transformation to keep in mind is 548 // 549 // call @func(..., src, ...) 550 // memcpy(dest, src, ...) 551 // 552 // -> 553 // 554 // memcpy(dest, src, ...) 555 // call @func(..., dest, ...) 556 // 557 // Since moving the memcpy is technically awkward, we additionally check that 558 // src only holds uninitialized values at the moment of the call, meaning that 559 // the memcpy can be discarded rather than moved. 560 561 // Deliberately get the source and destination with bitcasts stripped away, 562 // because we'll need to do type comparisons based on the underlying type. 563 CallSite CS(C); 564 565 // Require that src be an alloca. This simplifies the reasoning considerably. 566 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 567 if (!srcAlloca) 568 return false; 569 570 // Check that all of src is copied to dest. 571 if (TD == 0) return false; 572 573 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 574 if (!srcArraySize) 575 return false; 576 577 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * 578 srcArraySize->getZExtValue(); 579 580 if (cpyLen < srcSize) 581 return false; 582 583 // Check that accessing the first srcSize bytes of dest will not cause a 584 // trap. Otherwise the transform is invalid since it might cause a trap 585 // to occur earlier than it otherwise would. 586 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 587 // The destination is an alloca. Check it is larger than srcSize. 588 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 589 if (!destArraySize) 590 return false; 591 592 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * 593 destArraySize->getZExtValue(); 594 595 if (destSize < srcSize) 596 return false; 597 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 598 // If the destination is an sret parameter then only accesses that are 599 // outside of the returned struct type can trap. 600 if (!A->hasStructRetAttr()) 601 return false; 602 603 const Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 604 uint64_t destSize = TD->getTypeAllocSize(StructTy); 605 606 if (destSize < srcSize) 607 return false; 608 } else { 609 return false; 610 } 611 612 // Check that src is not accessed except via the call and the memcpy. This 613 // guarantees that it holds only undefined values when passed in (so the final 614 // memcpy can be dropped), that it is not read or written between the call and 615 // the memcpy, and that writing beyond the end of it is undefined. 616 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 617 srcAlloca->use_end()); 618 while (!srcUseList.empty()) { 619 User *UI = srcUseList.pop_back_val(); 620 621 if (isa<BitCastInst>(UI)) { 622 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 623 I != E; ++I) 624 srcUseList.push_back(*I); 625 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { 626 if (G->hasAllZeroIndices()) 627 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 628 I != E; ++I) 629 srcUseList.push_back(*I); 630 else 631 return false; 632 } else if (UI != C && UI != cpy) { 633 return false; 634 } 635 } 636 637 // Since we're changing the parameter to the callsite, we need to make sure 638 // that what would be the new parameter dominates the callsite. 639 DominatorTree &DT = getAnalysis<DominatorTree>(); 640 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 641 if (!DT.dominates(cpyDestInst, C)) 642 return false; 643 644 // In addition to knowing that the call does not access src in some 645 // unexpected manner, for example via a global, which we deduce from 646 // the use analysis, we also need to know that it does not sneakily 647 // access dest. We rely on AA to figure this out for us. 648 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 649 if (AA.getModRefInfo(C, cpyDest, srcSize) != AliasAnalysis::NoModRef) 650 return false; 651 652 // All the checks have passed, so do the transformation. 653 bool changedArgument = false; 654 for (unsigned i = 0; i < CS.arg_size(); ++i) 655 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 656 if (cpySrc->getType() != cpyDest->getType()) 657 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 658 cpyDest->getName(), C); 659 changedArgument = true; 660 if (CS.getArgument(i)->getType() == cpyDest->getType()) 661 CS.setArgument(i, cpyDest); 662 else 663 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, 664 CS.getArgument(i)->getType(), cpyDest->getName(), C)); 665 } 666 667 if (!changedArgument) 668 return false; 669 670 // Drop any cached information about the call, because we may have changed 671 // its dependence information by changing its parameter. 672 MD->removeInstruction(C); 673 674 // Remove the memcpy. 675 MD->removeInstruction(cpy); 676 ++NumMemCpyInstr; 677 678 return true; 679} 680 681/// processMemCpyMemCpyDependence - We've found that the (upward scanning) 682/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to 683/// copy from MDep's input if we can. MSize is the size of M's copy. 684/// 685bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 686 uint64_t MSize) { 687 // We can only transforms memcpy's where the dest of one is the source of the 688 // other. 689 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 690 return false; 691 692 // If dep instruction is reading from our current input, then it is a noop 693 // transfer and substituting the input won't change this instruction. Just 694 // ignore the input and let someone else zap MDep. This handles cases like: 695 // memcpy(a <- a) 696 // memcpy(b <- a) 697 if (M->getSource() == MDep->getSource()) 698 return false; 699 700 // Second, the length of the memcpy's must be the same, or the preceding one 701 // must be larger than the following one. 702 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 703 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 704 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 705 return false; 706 707 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 708 709 // Verify that the copied-from memory doesn't change in between the two 710 // transfers. For example, in: 711 // memcpy(a <- b) 712 // *b = 42; 713 // memcpy(c <- a) 714 // It would be invalid to transform the second memcpy into memcpy(c <- b). 715 // 716 // TODO: If the code between M and MDep is transparent to the destination "c", 717 // then we could still perform the xform by moving M up to the first memcpy. 718 // 719 // NOTE: This is conservative, it will stop on any read from the source loc, 720 // not just the defining memcpy. 721 MemDepResult SourceDep = 722 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), 723 false, M, M->getParent()); 724 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 725 return false; 726 727 // If the dest of the second might alias the source of the first, then the 728 // source and dest might overlap. We still want to eliminate the intermediate 729 // value, but we have to generate a memmove instead of memcpy. 730 bool UseMemMove = false; 731 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep))) 732 UseMemMove = true; 733 734 // If all checks passed, then we can transform M. 735 736 // Make sure to use the lesser of the alignment of the source and the dest 737 // since we're changing where we're reading from, but don't want to increase 738 // the alignment past what can be read from or written to. 739 // TODO: Is this worth it if we're creating a less aligned memcpy? For 740 // example we could be moving from movaps -> movq on x86. 741 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 742 743 IRBuilder<> Builder(M); 744 if (UseMemMove) 745 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 746 Align, M->isVolatile()); 747 else 748 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 749 Align, M->isVolatile()); 750 751 // Remove the instruction we're replacing. 752 MD->removeInstruction(M); 753 M->eraseFromParent(); 754 ++NumMemCpyInstr; 755 return true; 756} 757 758 759/// processMemCpy - perform simplification of memcpy's. If we have memcpy A 760/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 761/// B to be a memcpy from X to Z (or potentially a memmove, depending on 762/// circumstances). This allows later passes to remove the first memcpy 763/// altogether. 764bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 765 // We can only optimize statically-sized memcpy's that are non-volatile. 766 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 767 if (CopySize == 0 || M->isVolatile()) return false; 768 769 // If the source and destination of the memcpy are the same, then zap it. 770 if (M->getSource() == M->getDest()) { 771 MD->removeInstruction(M); 772 M->eraseFromParent(); 773 return false; 774 } 775 776 // If copying from a constant, try to turn the memcpy into a memset. 777 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 778 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 779 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 780 IRBuilder<> Builder(M); 781 Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize, 782 M->getAlignment(), false); 783 MD->removeInstruction(M); 784 M->eraseFromParent(); 785 ++NumCpyToSet; 786 return true; 787 } 788 789 // The are two possible optimizations we can do for memcpy: 790 // a) memcpy-memcpy xform which exposes redundance for DSE. 791 // b) call-memcpy xform for return slot optimization. 792 MemDepResult DepInfo = MD->getDependency(M); 793 if (!DepInfo.isClobber()) 794 return false; 795 796 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst())) 797 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); 798 799 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 800 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 801 CopySize->getZExtValue(), C)) { 802 MD->removeInstruction(M); 803 M->eraseFromParent(); 804 return true; 805 } 806 } 807 808 return false; 809} 810 811/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 812/// are guaranteed not to alias. 813bool MemCpyOpt::processMemMove(MemMoveInst *M) { 814 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 815 816 if (!TLI->has(LibFunc::memmove)) 817 return false; 818 819 // See if the pointers alias. 820 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M))) 821 return false; 822 823 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 824 825 // If not, then we know we can transform this. 826 Module *Mod = M->getParent()->getParent()->getParent(); 827 const Type *ArgTys[3] = { M->getRawDest()->getType(), 828 M->getRawSource()->getType(), 829 M->getLength()->getType() }; 830 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, 831 ArgTys, 3)); 832 833 // MemDep may have over conservative information about this instruction, just 834 // conservatively flush it from the cache. 835 MD->removeInstruction(M); 836 837 ++NumMoveToCpy; 838 return true; 839} 840 841/// processByValArgument - This is called on every byval argument in call sites. 842bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 843 if (TD == 0) return false; 844 845 // Find out what feeds this byval argument. 846 Value *ByValArg = CS.getArgument(ArgNo); 847 const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType(); 848 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); 849 MemDepResult DepInfo = 850 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), 851 true, CS.getInstruction(), 852 CS.getInstruction()->getParent()); 853 if (!DepInfo.isClobber()) 854 return false; 855 856 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 857 // a memcpy, see if we can byval from the source of the memcpy instead of the 858 // result. 859 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 860 if (MDep == 0 || MDep->isVolatile() || 861 ByValArg->stripPointerCasts() != MDep->getDest()) 862 return false; 863 864 // The length of the memcpy must be larger or equal to the size of the byval. 865 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 866 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) 867 return false; 868 869 // Get the alignment of the byval. If it is greater than the memcpy, then we 870 // can't do the substitution. If the call doesn't specify the alignment, then 871 // it is some target specific value that we can't know. 872 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 873 if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign) 874 return false; 875 876 // Verify that the copied-from memory doesn't change in between the memcpy and 877 // the byval call. 878 // memcpy(a <- b) 879 // *b = 42; 880 // foo(*a) 881 // It would be invalid to transform the second memcpy into foo(*b). 882 // 883 // NOTE: This is conservative, it will stop on any read from the source loc, 884 // not just the defining memcpy. 885 MemDepResult SourceDep = 886 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), 887 false, CS.getInstruction(), MDep->getParent()); 888 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 889 return false; 890 891 Value *TmpCast = MDep->getSource(); 892 if (MDep->getSource()->getType() != ByValArg->getType()) 893 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 894 "tmpcast", CS.getInstruction()); 895 896 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 897 << " " << *MDep << "\n" 898 << " " << *CS.getInstruction() << "\n"); 899 900 // Otherwise we're good! Update the byval argument. 901 CS.setArgument(ArgNo, TmpCast); 902 ++NumMemCpyInstr; 903 return true; 904} 905 906/// iterateOnFunction - Executes one iteration of MemCpyOpt. 907bool MemCpyOpt::iterateOnFunction(Function &F) { 908 bool MadeChange = false; 909 910 // Walk all instruction in the function. 911 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 912 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 913 // Avoid invalidating the iterator. 914 Instruction *I = BI++; 915 916 bool RepeatInstruction = false; 917 918 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 919 MadeChange |= processStore(SI, BI); 920 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 921 RepeatInstruction = processMemSet(M, BI); 922 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 923 RepeatInstruction = processMemCpy(M); 924 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 925 RepeatInstruction = processMemMove(M); 926 else if (CallSite CS = (Value*)I) { 927 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 928 if (CS.paramHasAttr(i+1, Attribute::ByVal)) 929 MadeChange |= processByValArgument(CS, i); 930 } 931 932 // Reprocess the instruction if desired. 933 if (RepeatInstruction) { 934 if (BI != BB->begin()) --BI; 935 MadeChange = true; 936 } 937 } 938 } 939 940 return MadeChange; 941} 942 943// MemCpyOpt::runOnFunction - This is the main transformation entry point for a 944// function. 945// 946bool MemCpyOpt::runOnFunction(Function &F) { 947 bool MadeChange = false; 948 MD = &getAnalysis<MemoryDependenceAnalysis>(); 949 TD = getAnalysisIfAvailable<TargetData>(); 950 TLI = &getAnalysis<TargetLibraryInfo>(); 951 952 // If we don't have at least memset and memcpy, there is little point of doing 953 // anything here. These are required by a freestanding implementation, so if 954 // even they are disabled, there is no point in trying hard. 955 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) 956 return false; 957 958 while (1) { 959 if (!iterateOnFunction(F)) 960 break; 961 MadeChange = true; 962 } 963 964 MD = 0; 965 return MadeChange; 966} 967