MemCpyOptimizer.cpp revision 49c7e3e290e4633971cbeac996d8cffbe2aedc1d
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs various transformations related to eliminating memcpy 11// calls, or transforming sets of stores into memset's. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "memcpyopt" 16#include "llvm/Transforms/Scalar.h" 17#include "llvm/GlobalVariable.h" 18#include "llvm/IntrinsicInst.h" 19#include "llvm/Instructions.h" 20#include "llvm/ADT/SmallVector.h" 21#include "llvm/ADT/Statistic.h" 22#include "llvm/Analysis/Dominators.h" 23#include "llvm/Analysis/AliasAnalysis.h" 24#include "llvm/Analysis/MemoryDependenceAnalysis.h" 25#include "llvm/Support/Debug.h" 26#include "llvm/Support/GetElementPtrTypeIterator.h" 27#include "llvm/Support/raw_ostream.h" 28#include "llvm/Target/TargetData.h" 29#include <list> 30using namespace llvm; 31 32STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 33STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 34STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 35STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 36 37/// isBytewiseValue - If the specified value can be set by repeating the same 38/// byte in memory, return the i8 value that it is represented with. This is 39/// true for all i8 values obviously, but is also true for i32 0, i32 -1, 40/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 41/// byte store (e.g. i16 0x1234), return null. 42static Value *isBytewiseValue(Value *V) { 43 // All byte-wide stores are splatable, even of arbitrary variables. 44 if (V->getType()->isIntegerTy(8)) return V; 45 46 // Constant float and double values can be handled as integer values if the 47 // corresponding integer value is "byteable". An important case is 0.0. 48 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 49 if (CFP->getType()->isFloatTy()) 50 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 51 if (CFP->getType()->isDoubleTy()) 52 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 53 // Don't handle long double formats, which have strange constraints. 54 } 55 56 // We can handle constant integers that are power of two in size and a 57 // multiple of 8 bits. 58 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 59 unsigned Width = CI->getBitWidth(); 60 if (isPowerOf2_32(Width) && Width > 8) { 61 // We can handle this value if the recursive binary decomposition is the 62 // same at all levels. 63 APInt Val = CI->getValue(); 64 APInt Val2; 65 while (Val.getBitWidth() != 8) { 66 unsigned NextWidth = Val.getBitWidth()/2; 67 Val2 = Val.lshr(NextWidth); 68 Val2 = Val2.trunc(Val.getBitWidth()/2); 69 Val = Val.trunc(Val.getBitWidth()/2); 70 71 // If the top/bottom halves aren't the same, reject it. 72 if (Val != Val2) 73 return 0; 74 } 75 return ConstantInt::get(V->getContext(), Val); 76 } 77 } 78 79 // A ConstantArray is splatable if all its members are equal and also 80 // splatable. 81 if (ConstantArray *CA = dyn_cast<ConstantArray>(V)) { 82 if (CA->getNumOperands() == 0) 83 return 0; 84 85 Value *Val = isBytewiseValue(CA->getOperand(0)); 86 if (!Val) 87 return 0; 88 89 for (unsigned I = 1, E = CA->getNumOperands(); I != E; ++I) 90 if (CA->getOperand(I-1) != CA->getOperand(I)) 91 return 0; 92 93 return Val; 94 } 95 96 // Conceptually, we could handle things like: 97 // %a = zext i8 %X to i16 98 // %b = shl i16 %a, 8 99 // %c = or i16 %a, %b 100 // but until there is an example that actually needs this, it doesn't seem 101 // worth worrying about. 102 return 0; 103} 104 105static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, 106 bool &VariableIdxFound, TargetData &TD) { 107 // Skip over the first indices. 108 gep_type_iterator GTI = gep_type_begin(GEP); 109 for (unsigned i = 1; i != Idx; ++i, ++GTI) 110 /*skip along*/; 111 112 // Compute the offset implied by the rest of the indices. 113 int64_t Offset = 0; 114 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 115 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 116 if (OpC == 0) 117 return VariableIdxFound = true; 118 if (OpC->isZero()) continue; // No offset. 119 120 // Handle struct indices, which add their field offset to the pointer. 121 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 122 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 123 continue; 124 } 125 126 // Otherwise, we have a sequential type like an array or vector. Multiply 127 // the index by the ElementSize. 128 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 129 Offset += Size*OpC->getSExtValue(); 130 } 131 132 return Offset; 133} 134 135/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 136/// constant offset, and return that constant offset. For example, Ptr1 might 137/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 138static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 139 TargetData &TD) { 140 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 141 // base. After that base, they may have some number of common (and 142 // potentially variable) indices. After that they handle some constant 143 // offset, which determines their offset from each other. At this point, we 144 // handle no other case. 145 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 146 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 147 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 148 return false; 149 150 // Skip any common indices and track the GEP types. 151 unsigned Idx = 1; 152 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 153 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 154 break; 155 156 bool VariableIdxFound = false; 157 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 158 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 159 if (VariableIdxFound) return false; 160 161 Offset = Offset2-Offset1; 162 return true; 163} 164 165 166/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 167/// This allows us to analyze stores like: 168/// store 0 -> P+1 169/// store 0 -> P+0 170/// store 0 -> P+3 171/// store 0 -> P+2 172/// which sometimes happens with stores to arrays of structs etc. When we see 173/// the first store, we make a range [1, 2). The second store extends the range 174/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 175/// two ranges into [0, 3) which is memset'able. 176namespace { 177struct MemsetRange { 178 // Start/End - A semi range that describes the span that this range covers. 179 // The range is closed at the start and open at the end: [Start, End). 180 int64_t Start, End; 181 182 /// StartPtr - The getelementptr instruction that points to the start of the 183 /// range. 184 Value *StartPtr; 185 186 /// Alignment - The known alignment of the first store. 187 unsigned Alignment; 188 189 /// TheStores - The actual stores that make up this range. 190 SmallVector<StoreInst*, 16> TheStores; 191 192 bool isProfitableToUseMemset(const TargetData &TD) const; 193 194}; 195} // end anon namespace 196 197bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { 198 // If we found more than 8 stores to merge or 64 bytes, use memset. 199 if (TheStores.size() >= 8 || End-Start >= 64) return true; 200 201 // Assume that the code generator is capable of merging pairs of stores 202 // together if it wants to. 203 if (TheStores.size() <= 2) return false; 204 205 // If we have fewer than 8 stores, it can still be worthwhile to do this. 206 // For example, merging 4 i8 stores into an i32 store is useful almost always. 207 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 208 // memset will be split into 2 32-bit stores anyway) and doing so can 209 // pessimize the llvm optimizer. 210 // 211 // Since we don't have perfect knowledge here, make some assumptions: assume 212 // the maximum GPR width is the same size as the pointer size and assume that 213 // this width can be stored. If so, check to see whether we will end up 214 // actually reducing the number of stores used. 215 unsigned Bytes = unsigned(End-Start); 216 unsigned NumPointerStores = Bytes/TD.getPointerSize(); 217 218 // Assume the remaining bytes if any are done a byte at a time. 219 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); 220 221 // If we will reduce the # stores (according to this heuristic), do the 222 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 223 // etc. 224 return TheStores.size() > NumPointerStores+NumByteStores; 225} 226 227 228namespace { 229class MemsetRanges { 230 /// Ranges - A sorted list of the memset ranges. We use std::list here 231 /// because each element is relatively large and expensive to copy. 232 std::list<MemsetRange> Ranges; 233 typedef std::list<MemsetRange>::iterator range_iterator; 234 TargetData &TD; 235public: 236 MemsetRanges(TargetData &td) : TD(td) {} 237 238 typedef std::list<MemsetRange>::const_iterator const_iterator; 239 const_iterator begin() const { return Ranges.begin(); } 240 const_iterator end() const { return Ranges.end(); } 241 bool empty() const { return Ranges.empty(); } 242 243 void addStore(int64_t OffsetFromFirst, StoreInst *SI); 244}; 245 246} // end anon namespace 247 248 249/// addStore - Add a new store to the MemsetRanges data structure. This adds a 250/// new range for the specified store at the specified offset, merging into 251/// existing ranges as appropriate. 252void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { 253 int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType()); 254 255 // Do a linear search of the ranges to see if this can be joined and/or to 256 // find the insertion point in the list. We keep the ranges sorted for 257 // simplicity here. This is a linear search of a linked list, which is ugly, 258 // however the number of ranges is limited, so this won't get crazy slow. 259 range_iterator I = Ranges.begin(), E = Ranges.end(); 260 261 while (I != E && Start > I->End) 262 ++I; 263 264 // We now know that I == E, in which case we didn't find anything to merge 265 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 266 // to insert a new range. Handle this now. 267 if (I == E || End < I->Start) { 268 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 269 R.Start = Start; 270 R.End = End; 271 R.StartPtr = SI->getPointerOperand(); 272 R.Alignment = SI->getAlignment(); 273 R.TheStores.push_back(SI); 274 return; 275 } 276 277 // This store overlaps with I, add it. 278 I->TheStores.push_back(SI); 279 280 // At this point, we may have an interval that completely contains our store. 281 // If so, just add it to the interval and return. 282 if (I->Start <= Start && I->End >= End) 283 return; 284 285 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 286 // but is not entirely contained within the range. 287 288 // See if the range extends the start of the range. In this case, it couldn't 289 // possibly cause it to join the prior range, because otherwise we would have 290 // stopped on *it*. 291 if (Start < I->Start) { 292 I->Start = Start; 293 I->StartPtr = SI->getPointerOperand(); 294 I->Alignment = SI->getAlignment(); 295 } 296 297 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 298 // is in or right at the end of I), and that End >= I->Start. Extend I out to 299 // End. 300 if (End > I->End) { 301 I->End = End; 302 range_iterator NextI = I; 303 while (++NextI != E && End >= NextI->Start) { 304 // Merge the range in. 305 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 306 if (NextI->End > I->End) 307 I->End = NextI->End; 308 Ranges.erase(NextI); 309 NextI = I; 310 } 311 } 312} 313 314//===----------------------------------------------------------------------===// 315// MemCpyOpt Pass 316//===----------------------------------------------------------------------===// 317 318namespace { 319 class MemCpyOpt : public FunctionPass { 320 MemoryDependenceAnalysis *MD; 321 bool runOnFunction(Function &F); 322 public: 323 static char ID; // Pass identification, replacement for typeid 324 MemCpyOpt() : FunctionPass(ID) { 325 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 326 MD = 0; 327 } 328 329 private: 330 // This transformation requires dominator postdominator info 331 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 332 AU.setPreservesCFG(); 333 AU.addRequired<DominatorTree>(); 334 AU.addRequired<MemoryDependenceAnalysis>(); 335 AU.addRequired<AliasAnalysis>(); 336 AU.addPreserved<AliasAnalysis>(); 337 AU.addPreserved<MemoryDependenceAnalysis>(); 338 } 339 340 // Helper fuctions 341 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 342 bool processMemCpy(MemCpyInst *M); 343 bool processMemMove(MemMoveInst *M); 344 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 345 uint64_t cpyLen, CallInst *C); 346 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 347 uint64_t MSize); 348 bool processByValArgument(CallSite CS, unsigned ArgNo); 349 bool iterateOnFunction(Function &F); 350 }; 351 352 char MemCpyOpt::ID = 0; 353} 354 355// createMemCpyOptPass - The public interface to this file... 356FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 357 358INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 359 false, false) 360INITIALIZE_PASS_DEPENDENCY(DominatorTree) 361INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 362INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 363INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 364 false, false) 365 366/// processStore - When GVN is scanning forward over instructions, we look for 367/// some other patterns to fold away. In particular, this looks for stores to 368/// neighboring locations of memory. If it sees enough consequtive ones 369/// (currently 4) it attempts to merge them together into a memcpy/memset. 370bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 371 if (SI->isVolatile()) return false; 372 373 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 374 if (!TD) return false; 375 376 // Detect cases where we're performing call slot forwarding, but 377 // happen to be using a load-store pair to implement it, rather than 378 // a memcpy. 379 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 380 if (!LI->isVolatile() && LI->hasOneUse()) { 381 MemDepResult dep = MD->getDependency(LI); 382 CallInst *C = 0; 383 if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst())) 384 C = dyn_cast<CallInst>(dep.getInst()); 385 386 if (C) { 387 bool changed = performCallSlotOptzn(LI, 388 SI->getPointerOperand()->stripPointerCasts(), 389 LI->getPointerOperand()->stripPointerCasts(), 390 TD->getTypeStoreSize(SI->getOperand(0)->getType()), C); 391 if (changed) { 392 MD->removeInstruction(SI); 393 SI->eraseFromParent(); 394 LI->eraseFromParent(); 395 ++NumMemCpyInstr; 396 return true; 397 } 398 } 399 } 400 } 401 402 LLVMContext &Context = SI->getContext(); 403 404 // There are two cases that are interesting for this code to handle: memcpy 405 // and memset. Right now we only handle memset. 406 407 // Ensure that the value being stored is something that can be memset'able a 408 // byte at a time like "0" or "-1" or any width, as well as things like 409 // 0xA0A0A0A0 and 0.0. 410 Value *ByteVal = isBytewiseValue(SI->getOperand(0)); 411 if (!ByteVal) 412 return false; 413 414 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 415 Module *M = SI->getParent()->getParent()->getParent(); 416 417 // Okay, so we now have a single store that can be splatable. Scan to find 418 // all subsequent stores of the same value to offset from the same pointer. 419 // Join these together into ranges, so we can decide whether contiguous blocks 420 // are stored. 421 MemsetRanges Ranges(*TD); 422 423 Value *StartPtr = SI->getPointerOperand(); 424 425 BasicBlock::iterator BI = SI; 426 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 427 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { 428 // If the call is readnone, ignore it, otherwise bail out. We don't even 429 // allow readonly here because we don't want something like: 430 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 431 if (AA.getModRefBehavior(CallSite(BI)) == 432 AliasAnalysis::DoesNotAccessMemory) 433 continue; 434 435 // TODO: If this is a memset, try to join it in. 436 437 break; 438 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI)) 439 break; 440 441 // If this is a non-store instruction it is fine, ignore it. 442 StoreInst *NextStore = dyn_cast<StoreInst>(BI); 443 if (NextStore == 0) continue; 444 445 // If this is a store, see if we can merge it in. 446 if (NextStore->isVolatile()) break; 447 448 // Check to see if this stored value is of the same byte-splattable value. 449 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 450 break; 451 452 // Check to see if this store is to a constant offset from the start ptr. 453 int64_t Offset; 454 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD)) 455 break; 456 457 Ranges.addStore(Offset, NextStore); 458 } 459 460 // If we have no ranges, then we just had a single store with nothing that 461 // could be merged in. This is a very common case of course. 462 if (Ranges.empty()) 463 return false; 464 465 // If we had at least one store that could be merged in, add the starting 466 // store as well. We try to avoid this unless there is at least something 467 // interesting as a small compile-time optimization. 468 Ranges.addStore(0, SI); 469 470 471 // Now that we have full information about ranges, loop over the ranges and 472 // emit memset's for anything big enough to be worthwhile. 473 bool MadeChange = false; 474 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 475 I != E; ++I) { 476 const MemsetRange &Range = *I; 477 478 if (Range.TheStores.size() == 1) continue; 479 480 // If it is profitable to lower this range to memset, do so now. 481 if (!Range.isProfitableToUseMemset(*TD)) 482 continue; 483 484 // Otherwise, we do want to transform this! Create a new memset. We put 485 // the memset right before the first instruction that isn't part of this 486 // memset block. This ensure that the memset is dominated by any addressing 487 // instruction needed by the start of the block. 488 BasicBlock::iterator InsertPt = BI; 489 490 // Get the starting pointer of the block. 491 StartPtr = Range.StartPtr; 492 493 // Determine alignment 494 unsigned Alignment = Range.Alignment; 495 if (Alignment == 0) { 496 const Type *EltType = 497 cast<PointerType>(StartPtr->getType())->getElementType(); 498 Alignment = TD->getABITypeAlignment(EltType); 499 } 500 501 // Cast the start ptr to be i8* as memset requires. 502 const PointerType* StartPTy = cast<PointerType>(StartPtr->getType()); 503 const PointerType *i8Ptr = Type::getInt8PtrTy(Context, 504 StartPTy->getAddressSpace()); 505 if (StartPTy!= i8Ptr) 506 StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(), 507 InsertPt); 508 509 Value *Ops[] = { 510 StartPtr, ByteVal, // Start, value 511 // size 512 ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start), 513 // align 514 ConstantInt::get(Type::getInt32Ty(Context), Alignment), 515 // volatile 516 ConstantInt::getFalse(Context), 517 }; 518 const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() }; 519 520 Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2); 521 522 Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt); 523 DEBUG(dbgs() << "Replace stores:\n"; 524 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 525 dbgs() << *Range.TheStores[i] << '\n'; 526 dbgs() << "With: " << *C << '\n'); (void)C; 527 528 // Don't invalidate the iterator 529 BBI = BI; 530 531 // Zap all the stores. 532 for (SmallVector<StoreInst*, 16>::const_iterator 533 SI = Range.TheStores.begin(), 534 SE = Range.TheStores.end(); SI != SE; ++SI) 535 (*SI)->eraseFromParent(); 536 ++NumMemSetInfer; 537 MadeChange = true; 538 } 539 540 return MadeChange; 541} 542 543 544/// performCallSlotOptzn - takes a memcpy and a call that it depends on, 545/// and checks for the possibility of a call slot optimization by having 546/// the call write its result directly into the destination of the memcpy. 547bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 548 Value *cpyDest, Value *cpySrc, 549 uint64_t cpyLen, CallInst *C) { 550 // The general transformation to keep in mind is 551 // 552 // call @func(..., src, ...) 553 // memcpy(dest, src, ...) 554 // 555 // -> 556 // 557 // memcpy(dest, src, ...) 558 // call @func(..., dest, ...) 559 // 560 // Since moving the memcpy is technically awkward, we additionally check that 561 // src only holds uninitialized values at the moment of the call, meaning that 562 // the memcpy can be discarded rather than moved. 563 564 // Deliberately get the source and destination with bitcasts stripped away, 565 // because we'll need to do type comparisons based on the underlying type. 566 CallSite CS(C); 567 568 // Require that src be an alloca. This simplifies the reasoning considerably. 569 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 570 if (!srcAlloca) 571 return false; 572 573 // Check that all of src is copied to dest. 574 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 575 if (!TD) return false; 576 577 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 578 if (!srcArraySize) 579 return false; 580 581 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * 582 srcArraySize->getZExtValue(); 583 584 if (cpyLen < srcSize) 585 return false; 586 587 // Check that accessing the first srcSize bytes of dest will not cause a 588 // trap. Otherwise the transform is invalid since it might cause a trap 589 // to occur earlier than it otherwise would. 590 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 591 // The destination is an alloca. Check it is larger than srcSize. 592 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 593 if (!destArraySize) 594 return false; 595 596 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * 597 destArraySize->getZExtValue(); 598 599 if (destSize < srcSize) 600 return false; 601 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 602 // If the destination is an sret parameter then only accesses that are 603 // outside of the returned struct type can trap. 604 if (!A->hasStructRetAttr()) 605 return false; 606 607 const Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 608 uint64_t destSize = TD->getTypeAllocSize(StructTy); 609 610 if (destSize < srcSize) 611 return false; 612 } else { 613 return false; 614 } 615 616 // Check that src is not accessed except via the call and the memcpy. This 617 // guarantees that it holds only undefined values when passed in (so the final 618 // memcpy can be dropped), that it is not read or written between the call and 619 // the memcpy, and that writing beyond the end of it is undefined. 620 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 621 srcAlloca->use_end()); 622 while (!srcUseList.empty()) { 623 User *UI = srcUseList.pop_back_val(); 624 625 if (isa<BitCastInst>(UI)) { 626 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 627 I != E; ++I) 628 srcUseList.push_back(*I); 629 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { 630 if (G->hasAllZeroIndices()) 631 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 632 I != E; ++I) 633 srcUseList.push_back(*I); 634 else 635 return false; 636 } else if (UI != C && UI != cpy) { 637 return false; 638 } 639 } 640 641 // Since we're changing the parameter to the callsite, we need to make sure 642 // that what would be the new parameter dominates the callsite. 643 DominatorTree &DT = getAnalysis<DominatorTree>(); 644 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 645 if (!DT.dominates(cpyDestInst, C)) 646 return false; 647 648 // In addition to knowing that the call does not access src in some 649 // unexpected manner, for example via a global, which we deduce from 650 // the use analysis, we also need to know that it does not sneakily 651 // access dest. We rely on AA to figure this out for us. 652 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 653 if (AA.getModRefInfo(C, cpyDest, srcSize) != 654 AliasAnalysis::NoModRef) 655 return false; 656 657 // All the checks have passed, so do the transformation. 658 bool changedArgument = false; 659 for (unsigned i = 0; i < CS.arg_size(); ++i) 660 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 661 if (cpySrc->getType() != cpyDest->getType()) 662 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 663 cpyDest->getName(), C); 664 changedArgument = true; 665 if (CS.getArgument(i)->getType() == cpyDest->getType()) 666 CS.setArgument(i, cpyDest); 667 else 668 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, 669 CS.getArgument(i)->getType(), cpyDest->getName(), C)); 670 } 671 672 if (!changedArgument) 673 return false; 674 675 // Drop any cached information about the call, because we may have changed 676 // its dependence information by changing its parameter. 677 MD->removeInstruction(C); 678 679 // Remove the memcpy. 680 MD->removeInstruction(cpy); 681 ++NumMemCpyInstr; 682 683 return true; 684} 685 686/// processMemCpyMemCpyDependence - We've found that the (upward scanning) 687/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to 688/// copy from MDep's input if we can. MSize is the size of M's copy. 689/// 690bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 691 uint64_t MSize) { 692 // We can only transforms memcpy's where the dest of one is the source of the 693 // other. 694 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 695 return false; 696 697 // If dep instruction is reading from our current input, then it is a noop 698 // transfer and substituting the input won't change this instruction. Just 699 // ignore the input and let someone else zap MDep. This handles cases like: 700 // memcpy(a <- a) 701 // memcpy(b <- a) 702 if (M->getSource() == MDep->getSource()) 703 return false; 704 705 // Second, the length of the memcpy's must be the same, or the preceeding one 706 // must be larger than the following one. 707 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 708 if (!C1) return false; 709 710 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 711 712 // Verify that the copied-from memory doesn't change in between the two 713 // transfers. For example, in: 714 // memcpy(a <- b) 715 // *b = 42; 716 // memcpy(c <- a) 717 // It would be invalid to transform the second memcpy into memcpy(c <- b). 718 // 719 // TODO: If the code between M and MDep is transparent to the destination "c", 720 // then we could still perform the xform by moving M up to the first memcpy. 721 // 722 // NOTE: This is conservative, it will stop on any read from the source loc, 723 // not just the defining memcpy. 724 MemDepResult SourceDep = 725 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), 726 false, M, M->getParent()); 727 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 728 return false; 729 730 // If the dest of the second might alias the source of the first, then the 731 // source and dest might overlap. We still want to eliminate the intermediate 732 // value, but we have to generate a memmove instead of memcpy. 733 Intrinsic::ID ResultFn = Intrinsic::memcpy; 734 if (AA.alias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)) != 735 AliasAnalysis::NoAlias) 736 ResultFn = Intrinsic::memmove; 737 738 // If all checks passed, then we can transform M. 739 const Type *ArgTys[3] = { 740 M->getRawDest()->getType(), 741 MDep->getRawSource()->getType(), 742 M->getLength()->getType() 743 }; 744 Function *MemCpyFun = 745 Intrinsic::getDeclaration(MDep->getParent()->getParent()->getParent(), 746 ResultFn, ArgTys, 3); 747 748 // Make sure to use the lesser of the alignment of the source and the dest 749 // since we're changing where we're reading from, but don't want to increase 750 // the alignment past what can be read from or written to. 751 // TODO: Is this worth it if we're creating a less aligned memcpy? For 752 // example we could be moving from movaps -> movq on x86. 753 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 754 Value *Args[5] = { 755 M->getRawDest(), 756 MDep->getRawSource(), 757 M->getLength(), 758 ConstantInt::get(Type::getInt32Ty(MemCpyFun->getContext()), Align), 759 M->getVolatileCst() 760 }; 761 CallInst::Create(MemCpyFun, Args, Args+5, "", M); 762 763 // Remove the instruction we're replacing. 764 MD->removeInstruction(M); 765 M->eraseFromParent(); 766 ++NumMemCpyInstr; 767 return true; 768} 769 770 771/// processMemCpy - perform simplification of memcpy's. If we have memcpy A 772/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 773/// B to be a memcpy from X to Z (or potentially a memmove, depending on 774/// circumstances). This allows later passes to remove the first memcpy 775/// altogether. 776bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 777 // We can only optimize statically-sized memcpy's that are non-volatile. 778 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 779 if (CopySize == 0 || M->isVolatile()) return false; 780 781 // If the source and destination of the memcpy are the same, then zap it. 782 if (M->getSource() == M->getDest()) { 783 MD->removeInstruction(M); 784 M->eraseFromParent(); 785 return false; 786 } 787 788 // If copying from a constant, try to turn the memcpy into a memset. 789 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 790 if (!GV->mayBeOverridden() && GV->isConstant() && GV->hasInitializer()) 791 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 792 Value *Ops[] = { 793 M->getRawDest(), ByteVal, // Start, value 794 CopySize, // Size 795 M->getAlignmentCst(), // Alignment 796 ConstantInt::getFalse(M->getContext()), // volatile 797 }; 798 const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() }; 799 Module *Mod = M->getParent()->getParent()->getParent(); 800 Function *MemSetF = Intrinsic::getDeclaration(Mod, Intrinsic::memset, 801 Tys, 2); 802 CallInst::Create(MemSetF, Ops, Ops+5, "", M); 803 MD->removeInstruction(M); 804 M->eraseFromParent(); 805 ++NumCpyToSet; 806 return true; 807 } 808 809 // The are two possible optimizations we can do for memcpy: 810 // a) memcpy-memcpy xform which exposes redundance for DSE. 811 // b) call-memcpy xform for return slot optimization. 812 MemDepResult DepInfo = MD->getDependency(M); 813 if (!DepInfo.isClobber()) 814 return false; 815 816 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst())) 817 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); 818 819 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 820 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 821 CopySize->getZExtValue(), C)) { 822 M->eraseFromParent(); 823 return true; 824 } 825 } 826 return false; 827} 828 829/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 830/// are guaranteed not to alias. 831bool MemCpyOpt::processMemMove(MemMoveInst *M) { 832 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 833 834 // See if the pointers alias. 835 if (AA.alias(AA.getLocationForDest(M), 836 AA.getLocationForSource(M)) != 837 AliasAnalysis::NoAlias) 838 return false; 839 840 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 841 842 // If not, then we know we can transform this. 843 Module *Mod = M->getParent()->getParent()->getParent(); 844 const Type *ArgTys[3] = { M->getRawDest()->getType(), 845 M->getRawSource()->getType(), 846 M->getLength()->getType() }; 847 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, 848 ArgTys, 3)); 849 850 // MemDep may have over conservative information about this instruction, just 851 // conservatively flush it from the cache. 852 MD->removeInstruction(M); 853 854 ++NumMoveToCpy; 855 return true; 856} 857 858/// processByValArgument - This is called on every byval argument in call sites. 859bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 860 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 861 if (!TD) return false; 862 863 // Find out what feeds this byval argument. 864 Value *ByValArg = CS.getArgument(ArgNo); 865 const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType(); 866 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); 867 MemDepResult DepInfo = 868 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), 869 true, CS.getInstruction(), 870 CS.getInstruction()->getParent()); 871 if (!DepInfo.isClobber()) 872 return false; 873 874 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 875 // a memcpy, see if we can byval from the source of the memcpy instead of the 876 // result. 877 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 878 if (MDep == 0 || MDep->isVolatile() || 879 ByValArg->stripPointerCasts() != MDep->getDest()) 880 return false; 881 882 // The length of the memcpy must be larger or equal to the size of the byval. 883 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 884 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) 885 return false; 886 887 // Get the alignment of the byval. If it is greater than the memcpy, then we 888 // can't do the substitution. If the call doesn't specify the alignment, then 889 // it is some target specific value that we can't know. 890 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 891 if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign) 892 return false; 893 894 // Verify that the copied-from memory doesn't change in between the memcpy and 895 // the byval call. 896 // memcpy(a <- b) 897 // *b = 42; 898 // foo(*a) 899 // It would be invalid to transform the second memcpy into foo(*b). 900 // 901 // NOTE: This is conservative, it will stop on any read from the source loc, 902 // not just the defining memcpy. 903 MemDepResult SourceDep = 904 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), 905 false, CS.getInstruction(), MDep->getParent()); 906 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 907 return false; 908 909 Value *TmpCast = MDep->getSource(); 910 if (MDep->getSource()->getType() != ByValArg->getType()) 911 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 912 "tmpcast", CS.getInstruction()); 913 914 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 915 << " " << *MDep << "\n" 916 << " " << *CS.getInstruction() << "\n"); 917 918 // Otherwise we're good! Update the byval argument. 919 CS.setArgument(ArgNo, TmpCast); 920 ++NumMemCpyInstr; 921 return true; 922} 923 924/// iterateOnFunction - Executes one iteration of MemCpyOpt. 925bool MemCpyOpt::iterateOnFunction(Function &F) { 926 bool MadeChange = false; 927 928 // Walk all instruction in the function. 929 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 930 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 931 // Avoid invalidating the iterator. 932 Instruction *I = BI++; 933 934 bool RepeatInstruction = false; 935 936 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 937 MadeChange |= processStore(SI, BI); 938 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) { 939 RepeatInstruction = processMemCpy(M); 940 } else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) { 941 RepeatInstruction = processMemMove(M); 942 } else if (CallSite CS = (Value*)I) { 943 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 944 if (CS.paramHasAttr(i+1, Attribute::ByVal)) 945 MadeChange |= processByValArgument(CS, i); 946 } 947 948 // Reprocess the instruction if desired. 949 if (RepeatInstruction) { 950 --BI; 951 MadeChange = true; 952 } 953 } 954 } 955 956 return MadeChange; 957} 958 959// MemCpyOpt::runOnFunction - This is the main transformation entry point for a 960// function. 961// 962bool MemCpyOpt::runOnFunction(Function &F) { 963 bool MadeChange = false; 964 MD = &getAnalysis<MemoryDependenceAnalysis>(); 965 while (1) { 966 if (!iterateOnFunction(F)) 967 break; 968 MadeChange = true; 969 } 970 971 MD = 0; 972 return MadeChange; 973} 974