MemCpyOptimizer.cpp revision 7a0b4fdd143a8333453dbfa1a85af641c98b5ca4
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs various transformations related to eliminating memcpy 11// calls, or transforming sets of stores into memset's. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "memcpyopt" 16#include "llvm/Transforms/Scalar.h" 17#include "llvm/IntrinsicInst.h" 18#include "llvm/Instructions.h" 19#include "llvm/ADT/SmallVector.h" 20#include "llvm/ADT/Statistic.h" 21#include "llvm/Analysis/Dominators.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/Analysis/MemoryDependenceAnalysis.h" 24#include "llvm/Support/Debug.h" 25#include "llvm/Support/GetElementPtrTypeIterator.h" 26#include "llvm/Support/raw_ostream.h" 27#include "llvm/Target/TargetData.h" 28#include <list> 29using namespace llvm; 30 31STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 32STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 33STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 34 35/// isBytewiseValue - If the specified value can be set by repeating the same 36/// byte in memory, return the i8 value that it is represented with. This is 37/// true for all i8 values obviously, but is also true for i32 0, i32 -1, 38/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 39/// byte store (e.g. i16 0x1234), return null. 40static Value *isBytewiseValue(Value *V) { 41 // All byte-wide stores are splatable, even of arbitrary variables. 42 if (V->getType()->isIntegerTy(8)) return V; 43 44 // Constant float and double values can be handled as integer values if the 45 // corresponding integer value is "byteable". An important case is 0.0. 46 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 47 if (CFP->getType()->isFloatTy()) 48 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 49 if (CFP->getType()->isDoubleTy()) 50 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 51 // Don't handle long double formats, which have strange constraints. 52 } 53 54 // We can handle constant integers that are power of two in size and a 55 // multiple of 8 bits. 56 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 57 unsigned Width = CI->getBitWidth(); 58 if (isPowerOf2_32(Width) && Width > 8) { 59 // We can handle this value if the recursive binary decomposition is the 60 // same at all levels. 61 APInt Val = CI->getValue(); 62 APInt Val2; 63 while (Val.getBitWidth() != 8) { 64 unsigned NextWidth = Val.getBitWidth()/2; 65 Val2 = Val.lshr(NextWidth); 66 Val2.trunc(Val.getBitWidth()/2); 67 Val.trunc(Val.getBitWidth()/2); 68 69 // If the top/bottom halves aren't the same, reject it. 70 if (Val != Val2) 71 return 0; 72 } 73 return ConstantInt::get(V->getContext(), Val); 74 } 75 } 76 77 // Conceptually, we could handle things like: 78 // %a = zext i8 %X to i16 79 // %b = shl i16 %a, 8 80 // %c = or i16 %a, %b 81 // but until there is an example that actually needs this, it doesn't seem 82 // worth worrying about. 83 return 0; 84} 85 86static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, 87 bool &VariableIdxFound, TargetData &TD) { 88 // Skip over the first indices. 89 gep_type_iterator GTI = gep_type_begin(GEP); 90 for (unsigned i = 1; i != Idx; ++i, ++GTI) 91 /*skip along*/; 92 93 // Compute the offset implied by the rest of the indices. 94 int64_t Offset = 0; 95 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 96 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 97 if (OpC == 0) 98 return VariableIdxFound = true; 99 if (OpC->isZero()) continue; // No offset. 100 101 // Handle struct indices, which add their field offset to the pointer. 102 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 103 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 104 continue; 105 } 106 107 // Otherwise, we have a sequential type like an array or vector. Multiply 108 // the index by the ElementSize. 109 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 110 Offset += Size*OpC->getSExtValue(); 111 } 112 113 return Offset; 114} 115 116/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 117/// constant offset, and return that constant offset. For example, Ptr1 might 118/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 119static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 120 TargetData &TD) { 121 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 122 // base. After that base, they may have some number of common (and 123 // potentially variable) indices. After that they handle some constant 124 // offset, which determines their offset from each other. At this point, we 125 // handle no other case. 126 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 127 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 128 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 129 return false; 130 131 // Skip any common indices and track the GEP types. 132 unsigned Idx = 1; 133 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 134 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 135 break; 136 137 bool VariableIdxFound = false; 138 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 139 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 140 if (VariableIdxFound) return false; 141 142 Offset = Offset2-Offset1; 143 return true; 144} 145 146 147/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 148/// This allows us to analyze stores like: 149/// store 0 -> P+1 150/// store 0 -> P+0 151/// store 0 -> P+3 152/// store 0 -> P+2 153/// which sometimes happens with stores to arrays of structs etc. When we see 154/// the first store, we make a range [1, 2). The second store extends the range 155/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 156/// two ranges into [0, 3) which is memset'able. 157namespace { 158struct MemsetRange { 159 // Start/End - A semi range that describes the span that this range covers. 160 // The range is closed at the start and open at the end: [Start, End). 161 int64_t Start, End; 162 163 /// StartPtr - The getelementptr instruction that points to the start of the 164 /// range. 165 Value *StartPtr; 166 167 /// Alignment - The known alignment of the first store. 168 unsigned Alignment; 169 170 /// TheStores - The actual stores that make up this range. 171 SmallVector<StoreInst*, 16> TheStores; 172 173 bool isProfitableToUseMemset(const TargetData &TD) const; 174 175}; 176} // end anon namespace 177 178bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { 179 // If we found more than 8 stores to merge or 64 bytes, use memset. 180 if (TheStores.size() >= 8 || End-Start >= 64) return true; 181 182 // Assume that the code generator is capable of merging pairs of stores 183 // together if it wants to. 184 if (TheStores.size() <= 2) return false; 185 186 // If we have fewer than 8 stores, it can still be worthwhile to do this. 187 // For example, merging 4 i8 stores into an i32 store is useful almost always. 188 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 189 // memset will be split into 2 32-bit stores anyway) and doing so can 190 // pessimize the llvm optimizer. 191 // 192 // Since we don't have perfect knowledge here, make some assumptions: assume 193 // the maximum GPR width is the same size as the pointer size and assume that 194 // this width can be stored. If so, check to see whether we will end up 195 // actually reducing the number of stores used. 196 unsigned Bytes = unsigned(End-Start); 197 unsigned NumPointerStores = Bytes/TD.getPointerSize(); 198 199 // Assume the remaining bytes if any are done a byte at a time. 200 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); 201 202 // If we will reduce the # stores (according to this heuristic), do the 203 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 204 // etc. 205 return TheStores.size() > NumPointerStores+NumByteStores; 206} 207 208 209namespace { 210class MemsetRanges { 211 /// Ranges - A sorted list of the memset ranges. We use std::list here 212 /// because each element is relatively large and expensive to copy. 213 std::list<MemsetRange> Ranges; 214 typedef std::list<MemsetRange>::iterator range_iterator; 215 TargetData &TD; 216public: 217 MemsetRanges(TargetData &td) : TD(td) {} 218 219 typedef std::list<MemsetRange>::const_iterator const_iterator; 220 const_iterator begin() const { return Ranges.begin(); } 221 const_iterator end() const { return Ranges.end(); } 222 bool empty() const { return Ranges.empty(); } 223 224 void addStore(int64_t OffsetFromFirst, StoreInst *SI); 225}; 226 227} // end anon namespace 228 229 230/// addStore - Add a new store to the MemsetRanges data structure. This adds a 231/// new range for the specified store at the specified offset, merging into 232/// existing ranges as appropriate. 233void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { 234 int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType()); 235 236 // Do a linear search of the ranges to see if this can be joined and/or to 237 // find the insertion point in the list. We keep the ranges sorted for 238 // simplicity here. This is a linear search of a linked list, which is ugly, 239 // however the number of ranges is limited, so this won't get crazy slow. 240 range_iterator I = Ranges.begin(), E = Ranges.end(); 241 242 while (I != E && Start > I->End) 243 ++I; 244 245 // We now know that I == E, in which case we didn't find anything to merge 246 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 247 // to insert a new range. Handle this now. 248 if (I == E || End < I->Start) { 249 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 250 R.Start = Start; 251 R.End = End; 252 R.StartPtr = SI->getPointerOperand(); 253 R.Alignment = SI->getAlignment(); 254 R.TheStores.push_back(SI); 255 return; 256 } 257 258 // This store overlaps with I, add it. 259 I->TheStores.push_back(SI); 260 261 // At this point, we may have an interval that completely contains our store. 262 // If so, just add it to the interval and return. 263 if (I->Start <= Start && I->End >= End) 264 return; 265 266 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 267 // but is not entirely contained within the range. 268 269 // See if the range extends the start of the range. In this case, it couldn't 270 // possibly cause it to join the prior range, because otherwise we would have 271 // stopped on *it*. 272 if (Start < I->Start) { 273 I->Start = Start; 274 I->StartPtr = SI->getPointerOperand(); 275 I->Alignment = SI->getAlignment(); 276 } 277 278 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 279 // is in or right at the end of I), and that End >= I->Start. Extend I out to 280 // End. 281 if (End > I->End) { 282 I->End = End; 283 range_iterator NextI = I; 284 while (++NextI != E && End >= NextI->Start) { 285 // Merge the range in. 286 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 287 if (NextI->End > I->End) 288 I->End = NextI->End; 289 Ranges.erase(NextI); 290 NextI = I; 291 } 292 } 293} 294 295//===----------------------------------------------------------------------===// 296// MemCpyOpt Pass 297//===----------------------------------------------------------------------===// 298 299namespace { 300 class MemCpyOpt : public FunctionPass { 301 MemoryDependenceAnalysis *MD; 302 bool runOnFunction(Function &F); 303 public: 304 static char ID; // Pass identification, replacement for typeid 305 MemCpyOpt() : FunctionPass(ID) { 306 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 307 MD = 0; 308 } 309 310 private: 311 // This transformation requires dominator postdominator info 312 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 313 AU.setPreservesCFG(); 314 AU.addRequired<DominatorTree>(); 315 AU.addRequired<MemoryDependenceAnalysis>(); 316 AU.addRequired<AliasAnalysis>(); 317 AU.addPreserved<AliasAnalysis>(); 318 AU.addPreserved<MemoryDependenceAnalysis>(); 319 } 320 321 // Helper fuctions 322 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 323 bool processMemCpy(MemCpyInst *M); 324 bool processMemMove(MemMoveInst *M); 325 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 326 uint64_t cpyLen, CallInst *C); 327 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 328 uint64_t MSize); 329 bool processByValArgument(CallSite CS, unsigned ArgNo); 330 bool iterateOnFunction(Function &F); 331 }; 332 333 char MemCpyOpt::ID = 0; 334} 335 336// createMemCpyOptPass - The public interface to this file... 337FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 338 339INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 340 false, false) 341INITIALIZE_PASS_DEPENDENCY(DominatorTree) 342INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 343INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 344INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 345 false, false) 346 347/// processStore - When GVN is scanning forward over instructions, we look for 348/// some other patterns to fold away. In particular, this looks for stores to 349/// neighboring locations of memory. If it sees enough consequtive ones 350/// (currently 4) it attempts to merge them together into a memcpy/memset. 351bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 352 if (SI->isVolatile()) return false; 353 354 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 355 if (!TD) return false; 356 357 // Detect cases where we're performing call slot forwarding, but 358 // happen to be using a load-store pair to implement it, rather than 359 // a memcpy. 360 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 361 if (!LI->isVolatile() && LI->hasOneUse()) { 362 MemDepResult dep = MD->getDependency(LI); 363 CallInst *C = 0; 364 if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst())) 365 C = dyn_cast<CallInst>(dep.getInst()); 366 367 if (C) { 368 bool changed = performCallSlotOptzn(LI, 369 SI->getPointerOperand()->stripPointerCasts(), 370 LI->getPointerOperand()->stripPointerCasts(), 371 TD->getTypeStoreSize(SI->getOperand(0)->getType()), C); 372 if (changed) { 373 MD->removeInstruction(SI); 374 SI->eraseFromParent(); 375 LI->eraseFromParent(); 376 ++NumMemCpyInstr; 377 return true; 378 } 379 } 380 } 381 } 382 383 LLVMContext &Context = SI->getContext(); 384 385 // There are two cases that are interesting for this code to handle: memcpy 386 // and memset. Right now we only handle memset. 387 388 // Ensure that the value being stored is something that can be memset'able a 389 // byte at a time like "0" or "-1" or any width, as well as things like 390 // 0xA0A0A0A0 and 0.0. 391 Value *ByteVal = isBytewiseValue(SI->getOperand(0)); 392 if (!ByteVal) 393 return false; 394 395 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 396 Module *M = SI->getParent()->getParent()->getParent(); 397 398 // Okay, so we now have a single store that can be splatable. Scan to find 399 // all subsequent stores of the same value to offset from the same pointer. 400 // Join these together into ranges, so we can decide whether contiguous blocks 401 // are stored. 402 MemsetRanges Ranges(*TD); 403 404 Value *StartPtr = SI->getPointerOperand(); 405 406 BasicBlock::iterator BI = SI; 407 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 408 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { 409 // If the call is readnone, ignore it, otherwise bail out. We don't even 410 // allow readonly here because we don't want something like: 411 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 412 if (AA.getModRefBehavior(CallSite(BI)) == 413 AliasAnalysis::DoesNotAccessMemory) 414 continue; 415 416 // TODO: If this is a memset, try to join it in. 417 418 break; 419 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI)) 420 break; 421 422 // If this is a non-store instruction it is fine, ignore it. 423 StoreInst *NextStore = dyn_cast<StoreInst>(BI); 424 if (NextStore == 0) continue; 425 426 // If this is a store, see if we can merge it in. 427 if (NextStore->isVolatile()) break; 428 429 // Check to see if this stored value is of the same byte-splattable value. 430 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 431 break; 432 433 // Check to see if this store is to a constant offset from the start ptr. 434 int64_t Offset; 435 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD)) 436 break; 437 438 Ranges.addStore(Offset, NextStore); 439 } 440 441 // If we have no ranges, then we just had a single store with nothing that 442 // could be merged in. This is a very common case of course. 443 if (Ranges.empty()) 444 return false; 445 446 // If we had at least one store that could be merged in, add the starting 447 // store as well. We try to avoid this unless there is at least something 448 // interesting as a small compile-time optimization. 449 Ranges.addStore(0, SI); 450 451 452 // Now that we have full information about ranges, loop over the ranges and 453 // emit memset's for anything big enough to be worthwhile. 454 bool MadeChange = false; 455 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 456 I != E; ++I) { 457 const MemsetRange &Range = *I; 458 459 if (Range.TheStores.size() == 1) continue; 460 461 // If it is profitable to lower this range to memset, do so now. 462 if (!Range.isProfitableToUseMemset(*TD)) 463 continue; 464 465 // Otherwise, we do want to transform this! Create a new memset. We put 466 // the memset right before the first instruction that isn't part of this 467 // memset block. This ensure that the memset is dominated by any addressing 468 // instruction needed by the start of the block. 469 BasicBlock::iterator InsertPt = BI; 470 471 // Get the starting pointer of the block. 472 StartPtr = Range.StartPtr; 473 474 // Determine alignment 475 unsigned Alignment = Range.Alignment; 476 if (Alignment == 0) { 477 const Type *EltType = 478 cast<PointerType>(StartPtr->getType())->getElementType(); 479 Alignment = TD->getABITypeAlignment(EltType); 480 } 481 482 // Cast the start ptr to be i8* as memset requires. 483 const PointerType* StartPTy = cast<PointerType>(StartPtr->getType()); 484 const PointerType *i8Ptr = Type::getInt8PtrTy(Context, 485 StartPTy->getAddressSpace()); 486 if (StartPTy!= i8Ptr) 487 StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(), 488 InsertPt); 489 490 Value *Ops[] = { 491 StartPtr, ByteVal, // Start, value 492 // size 493 ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start), 494 // align 495 ConstantInt::get(Type::getInt32Ty(Context), Alignment), 496 // volatile 497 ConstantInt::getFalse(Context), 498 }; 499 const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() }; 500 501 Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2); 502 503 Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt); 504 DEBUG(dbgs() << "Replace stores:\n"; 505 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 506 dbgs() << *Range.TheStores[i] << '\n'; 507 dbgs() << "With: " << *C << '\n'); C=C; 508 509 // Don't invalidate the iterator 510 BBI = BI; 511 512 // Zap all the stores. 513 for (SmallVector<StoreInst*, 16>::const_iterator 514 SI = Range.TheStores.begin(), 515 SE = Range.TheStores.end(); SI != SE; ++SI) 516 (*SI)->eraseFromParent(); 517 ++NumMemSetInfer; 518 MadeChange = true; 519 } 520 521 return MadeChange; 522} 523 524 525/// performCallSlotOptzn - takes a memcpy and a call that it depends on, 526/// and checks for the possibility of a call slot optimization by having 527/// the call write its result directly into the destination of the memcpy. 528bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 529 Value *cpyDest, Value *cpySrc, 530 uint64_t cpyLen, CallInst *C) { 531 // The general transformation to keep in mind is 532 // 533 // call @func(..., src, ...) 534 // memcpy(dest, src, ...) 535 // 536 // -> 537 // 538 // memcpy(dest, src, ...) 539 // call @func(..., dest, ...) 540 // 541 // Since moving the memcpy is technically awkward, we additionally check that 542 // src only holds uninitialized values at the moment of the call, meaning that 543 // the memcpy can be discarded rather than moved. 544 545 // Deliberately get the source and destination with bitcasts stripped away, 546 // because we'll need to do type comparisons based on the underlying type. 547 CallSite CS(C); 548 549 // Require that src be an alloca. This simplifies the reasoning considerably. 550 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 551 if (!srcAlloca) 552 return false; 553 554 // Check that all of src is copied to dest. 555 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 556 if (!TD) return false; 557 558 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 559 if (!srcArraySize) 560 return false; 561 562 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * 563 srcArraySize->getZExtValue(); 564 565 if (cpyLen < srcSize) 566 return false; 567 568 // Check that accessing the first srcSize bytes of dest will not cause a 569 // trap. Otherwise the transform is invalid since it might cause a trap 570 // to occur earlier than it otherwise would. 571 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 572 // The destination is an alloca. Check it is larger than srcSize. 573 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 574 if (!destArraySize) 575 return false; 576 577 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * 578 destArraySize->getZExtValue(); 579 580 if (destSize < srcSize) 581 return false; 582 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 583 // If the destination is an sret parameter then only accesses that are 584 // outside of the returned struct type can trap. 585 if (!A->hasStructRetAttr()) 586 return false; 587 588 const Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 589 uint64_t destSize = TD->getTypeAllocSize(StructTy); 590 591 if (destSize < srcSize) 592 return false; 593 } else { 594 return false; 595 } 596 597 // Check that src is not accessed except via the call and the memcpy. This 598 // guarantees that it holds only undefined values when passed in (so the final 599 // memcpy can be dropped), that it is not read or written between the call and 600 // the memcpy, and that writing beyond the end of it is undefined. 601 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 602 srcAlloca->use_end()); 603 while (!srcUseList.empty()) { 604 User *UI = srcUseList.pop_back_val(); 605 606 if (isa<BitCastInst>(UI)) { 607 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 608 I != E; ++I) 609 srcUseList.push_back(*I); 610 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { 611 if (G->hasAllZeroIndices()) 612 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 613 I != E; ++I) 614 srcUseList.push_back(*I); 615 else 616 return false; 617 } else if (UI != C && UI != cpy) { 618 return false; 619 } 620 } 621 622 // Since we're changing the parameter to the callsite, we need to make sure 623 // that what would be the new parameter dominates the callsite. 624 DominatorTree &DT = getAnalysis<DominatorTree>(); 625 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 626 if (!DT.dominates(cpyDestInst, C)) 627 return false; 628 629 // In addition to knowing that the call does not access src in some 630 // unexpected manner, for example via a global, which we deduce from 631 // the use analysis, we also need to know that it does not sneakily 632 // access dest. We rely on AA to figure this out for us. 633 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 634 if (AA.getModRefInfo(C, cpyDest, srcSize) != 635 AliasAnalysis::NoModRef) 636 return false; 637 638 // All the checks have passed, so do the transformation. 639 bool changedArgument = false; 640 for (unsigned i = 0; i < CS.arg_size(); ++i) 641 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 642 if (cpySrc->getType() != cpyDest->getType()) 643 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 644 cpyDest->getName(), C); 645 changedArgument = true; 646 if (CS.getArgument(i)->getType() == cpyDest->getType()) 647 CS.setArgument(i, cpyDest); 648 else 649 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, 650 CS.getArgument(i)->getType(), cpyDest->getName(), C)); 651 } 652 653 if (!changedArgument) 654 return false; 655 656 // Drop any cached information about the call, because we may have changed 657 // its dependence information by changing its parameter. 658 MD->removeInstruction(C); 659 660 // Remove the memcpy. 661 MD->removeInstruction(cpy); 662 ++NumMemCpyInstr; 663 664 return true; 665} 666 667/// processMemCpyMemCpyDependence - We've found that the (upward scanning) 668/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to 669/// copy from MDep's input if we can. MSize is the size of M's copy. 670/// 671bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 672 uint64_t MSize) { 673 // We can only transforms memcpy's where the dest of one is the source of the 674 // other. 675 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 676 return false; 677 678 // Second, the length of the memcpy's must be the same, or the preceeding one 679 // must be larger than the following one. 680 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 681 if (!C1) return false; 682 683 uint64_t DepSize = C1->getValue().getZExtValue(); 684 if (DepSize < MSize) 685 return false; 686 687 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 688 689 // Verify that the copied-from memory doesn't change in between the two 690 // transfers. For example, in: 691 // memcpy(a <- b) 692 // *b = 42; 693 // memcpy(c <- a) 694 // It would be invalid to transform the second memcpy into memcpy(c <- b). 695 // 696 // TODO: If the code between M and MDep is transparent to the destination "c", 697 // then we could still perform the xform by moving M up to the first memcpy. 698 // 699 // NOTE: This is conservative, it will stop on any read from the source loc, 700 // not just the defining memcpy. 701 MemDepResult SourceDep = 702 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), 703 false, M, M->getParent()); 704 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 705 return false; 706 707 // If the dest of the second might alias the source of the first, then the 708 // source and dest might overlap. We still want to eliminate the intermediate 709 // value, but we have to generate a memmove instead of memcpy. 710 Intrinsic::ID ResultFn = Intrinsic::memcpy; 711 if (!AA.isNoAlias(M->getRawDest(), MSize, MDep->getRawSource(), DepSize)) 712 ResultFn = Intrinsic::memmove; 713 714 // If all checks passed, then we can transform M. 715 const Type *ArgTys[3] = { 716 M->getRawDest()->getType(), 717 MDep->getRawSource()->getType(), 718 M->getLength()->getType() 719 }; 720 Function *MemCpyFun = 721 Intrinsic::getDeclaration(MDep->getParent()->getParent()->getParent(), 722 ResultFn, ArgTys, 3); 723 724 // Make sure to use the lesser of the alignment of the source and the dest 725 // since we're changing where we're reading from, but don't want to increase 726 // the alignment past what can be read from or written to. 727 // TODO: Is this worth it if we're creating a less aligned memcpy? For 728 // example we could be moving from movaps -> movq on x86. 729 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 730 Value *Args[5] = { 731 M->getRawDest(), 732 MDep->getRawSource(), 733 M->getLength(), 734 ConstantInt::get(Type::getInt32Ty(MemCpyFun->getContext()), Align), 735 M->getVolatileCst() 736 }; 737 CallInst::Create(MemCpyFun, Args, Args+5, "", M); 738 739 // Remove the instruction we're replacing. 740 MD->removeInstruction(M); 741 M->eraseFromParent(); 742 ++NumMemCpyInstr; 743 return true; 744} 745 746 747/// processMemCpy - perform simplification of memcpy's. If we have memcpy A 748/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 749/// B to be a memcpy from X to Z (or potentially a memmove, depending on 750/// circumstances). This allows later passes to remove the first memcpy 751/// altogether. 752bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 753 // We can only optimize statically-sized memcpy's that are non-volatile. 754 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 755 if (CopySize == 0 || M->isVolatile()) return false; 756 757 // The are two possible optimizations we can do for memcpy: 758 // a) memcpy-memcpy xform which exposes redundance for DSE. 759 // b) call-memcpy xform for return slot optimization. 760 MemDepResult DepInfo = MD->getDependency(M); 761 if (!DepInfo.isClobber()) 762 return false; 763 764 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst())) 765 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); 766 767 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 768 bool changed = performCallSlotOptzn(M, M->getDest(), M->getSource(), 769 CopySize->getZExtValue(), C); 770 if (changed) M->eraseFromParent(); 771 return changed; 772 } 773 return false; 774} 775 776/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 777/// are guaranteed not to alias. 778bool MemCpyOpt::processMemMove(MemMoveInst *M) { 779 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 780 781 // If the memmove is a constant size, use it for the alias query, this allows 782 // us to optimize things like: memmove(P, P+64, 64); 783 uint64_t MemMoveSize = AliasAnalysis::UnknownSize; 784 if (ConstantInt *Len = dyn_cast<ConstantInt>(M->getLength())) 785 MemMoveSize = Len->getZExtValue(); 786 787 // See if the pointers alias. 788 if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) != 789 AliasAnalysis::NoAlias) 790 return false; 791 792 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 793 794 // If not, then we know we can transform this. 795 Module *Mod = M->getParent()->getParent()->getParent(); 796 const Type *ArgTys[3] = { M->getRawDest()->getType(), 797 M->getRawSource()->getType(), 798 M->getLength()->getType() }; 799 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, 800 ArgTys, 3)); 801 802 // MemDep may have over conservative information about this instruction, just 803 // conservatively flush it from the cache. 804 MD->removeInstruction(M); 805 806 ++NumMoveToCpy; 807 return true; 808} 809 810/// processByValArgument - This is called on every byval argument in call sites. 811bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 812 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 813 if (!TD) return false; 814 815 // Find out what feeds this byval argument. 816 Value *ByValArg = CS.getArgument(ArgNo); 817 uint64_t ByValSize = TD->getTypeAllocSize(ByValArg->getType()); 818 MemDepResult DepInfo = 819 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), 820 true, CS.getInstruction(), 821 CS.getInstruction()->getParent()); 822 if (!DepInfo.isClobber()) 823 return false; 824 825 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 826 // a memcpy, see if we can byval from the source of the memcpy instead of the 827 // result. 828 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 829 if (MDep == 0 || MDep->isVolatile() || 830 ByValArg->stripPointerCasts() != MDep->getDest()) 831 return false; 832 833 // The length of the memcpy must be larger or equal to the size of the byval. 834 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 835 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) 836 return false; 837 838 // Get the alignment of the byval. If it is greater than the memcpy, then we 839 // can't do the substitution. If the call doesn't specify the alignment, then 840 // it is some target specific value that we can't know. 841 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 842 if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign) 843 return false; 844 845 // Verify that the copied-from memory doesn't change in between the memcpy and 846 // the byval call. 847 // memcpy(a <- b) 848 // *b = 42; 849 // foo(*a) 850 // It would be invalid to transform the second memcpy into foo(*b). 851 // 852 // NOTE: This is conservative, it will stop on any read from the source loc, 853 // not just the defining memcpy. 854 MemDepResult SourceDep = 855 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), 856 false, CS.getInstruction(), MDep->getParent()); 857 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 858 return false; 859 860 Value *TmpCast = MDep->getSource(); 861 if (MDep->getSource()->getType() != ByValArg->getType()) 862 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 863 "tmpcast", CS.getInstruction()); 864 865 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 866 << " " << *MDep << "\n" 867 << " " << *CS.getInstruction() << "\n"); 868 869 // Otherwise we're good! Update the byval argument. 870 CS.setArgument(ArgNo, TmpCast); 871 ++NumMemCpyInstr; 872 return true; 873} 874 875/// iterateOnFunction - Executes one iteration of MemCpyOpt. 876bool MemCpyOpt::iterateOnFunction(Function &F) { 877 bool MadeChange = false; 878 879 // Walk all instruction in the function. 880 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 881 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 882 // Avoid invalidating the iterator. 883 Instruction *I = BI++; 884 885 bool RepeatInstruction = false; 886 887 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 888 MadeChange |= processStore(SI, BI); 889 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) { 890 RepeatInstruction = processMemCpy(M); 891 } else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) { 892 RepeatInstruction = processMemMove(M); 893 } else if (CallSite CS = (Value*)I) { 894 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 895 if (CS.paramHasAttr(i+1, Attribute::ByVal)) 896 MadeChange |= processByValArgument(CS, i); 897 } 898 899 // Reprocess the instruction if desired. 900 if (RepeatInstruction) { 901 --BI; 902 MadeChange = true; 903 } 904 } 905 } 906 907 return MadeChange; 908} 909 910// MemCpyOpt::runOnFunction - This is the main transformation entry point for a 911// function. 912// 913bool MemCpyOpt::runOnFunction(Function &F) { 914 bool MadeChange = false; 915 MD = &getAnalysis<MemoryDependenceAnalysis>(); 916 while (1) { 917 if (!iterateOnFunction(F)) 918 break; 919 MadeChange = true; 920 } 921 922 MD = 0; 923 return MadeChange; 924} 925 926 927 928