MemCpyOptimizer.cpp revision cb33fd17cce475a1d47b2695e311b6934ad0ef86
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs various transformations related to eliminating memcpy 11// calls, or transforming sets of stores into memset's. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "memcpyopt" 16#include "llvm/Transforms/Scalar.h" 17#include "llvm/IntrinsicInst.h" 18#include "llvm/Instructions.h" 19#include "llvm/LLVMContext.h" 20#include "llvm/ADT/SmallVector.h" 21#include "llvm/ADT/Statistic.h" 22#include "llvm/Analysis/Dominators.h" 23#include "llvm/Analysis/AliasAnalysis.h" 24#include "llvm/Analysis/MemoryDependenceAnalysis.h" 25#include "llvm/Support/Debug.h" 26#include "llvm/Support/GetElementPtrTypeIterator.h" 27#include "llvm/Support/raw_ostream.h" 28#include "llvm/Target/TargetData.h" 29#include <list> 30using namespace llvm; 31 32STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 33STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 34STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 35 36/// isBytewiseValue - If the specified value can be set by repeating the same 37/// byte in memory, return the i8 value that it is represented with. This is 38/// true for all i8 values obviously, but is also true for i32 0, i32 -1, 39/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 40/// byte store (e.g. i16 0x1234), return null. 41static Value *isBytewiseValue(Value *V) { 42 LLVMContext &Context = V->getContext(); 43 44 // All byte-wide stores are splatable, even of arbitrary variables. 45 if (V->getType() == Type::getInt8Ty(Context)) return V; 46 47 // Constant float and double values can be handled as integer values if the 48 // corresponding integer value is "byteable". An important case is 0.0. 49 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 50 if (CFP->getType()->isFloatTy()) 51 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context)); 52 if (CFP->getType()->isDoubleTy()) 53 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context)); 54 // Don't handle long double formats, which have strange constraints. 55 } 56 57 // We can handle constant integers that are power of two in size and a 58 // multiple of 8 bits. 59 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 60 unsigned Width = CI->getBitWidth(); 61 if (isPowerOf2_32(Width) && Width > 8) { 62 // We can handle this value if the recursive binary decomposition is the 63 // same at all levels. 64 APInt Val = CI->getValue(); 65 APInt Val2; 66 while (Val.getBitWidth() != 8) { 67 unsigned NextWidth = Val.getBitWidth()/2; 68 Val2 = Val.lshr(NextWidth); 69 Val2.trunc(Val.getBitWidth()/2); 70 Val.trunc(Val.getBitWidth()/2); 71 72 // If the top/bottom halves aren't the same, reject it. 73 if (Val != Val2) 74 return 0; 75 } 76 return ConstantInt::get(Context, Val); 77 } 78 } 79 80 // Conceptually, we could handle things like: 81 // %a = zext i8 %X to i16 82 // %b = shl i16 %a, 8 83 // %c = or i16 %a, %b 84 // but until there is an example that actually needs this, it doesn't seem 85 // worth worrying about. 86 return 0; 87} 88 89static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, 90 bool &VariableIdxFound, TargetData &TD) { 91 // Skip over the first indices. 92 gep_type_iterator GTI = gep_type_begin(GEP); 93 for (unsigned i = 1; i != Idx; ++i, ++GTI) 94 /*skip along*/; 95 96 // Compute the offset implied by the rest of the indices. 97 int64_t Offset = 0; 98 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 99 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 100 if (OpC == 0) 101 return VariableIdxFound = true; 102 if (OpC->isZero()) continue; // No offset. 103 104 // Handle struct indices, which add their field offset to the pointer. 105 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 106 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 107 continue; 108 } 109 110 // Otherwise, we have a sequential type like an array or vector. Multiply 111 // the index by the ElementSize. 112 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 113 Offset += Size*OpC->getSExtValue(); 114 } 115 116 return Offset; 117} 118 119/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 120/// constant offset, and return that constant offset. For example, Ptr1 might 121/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 122static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 123 TargetData &TD) { 124 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 125 // base. After that base, they may have some number of common (and 126 // potentially variable) indices. After that they handle some constant 127 // offset, which determines their offset from each other. At this point, we 128 // handle no other case. 129 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 130 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 131 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 132 return false; 133 134 // Skip any common indices and track the GEP types. 135 unsigned Idx = 1; 136 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 137 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 138 break; 139 140 bool VariableIdxFound = false; 141 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 142 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 143 if (VariableIdxFound) return false; 144 145 Offset = Offset2-Offset1; 146 return true; 147} 148 149 150/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 151/// This allows us to analyze stores like: 152/// store 0 -> P+1 153/// store 0 -> P+0 154/// store 0 -> P+3 155/// store 0 -> P+2 156/// which sometimes happens with stores to arrays of structs etc. When we see 157/// the first store, we make a range [1, 2). The second store extends the range 158/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 159/// two ranges into [0, 3) which is memset'able. 160namespace { 161struct MemsetRange { 162 // Start/End - A semi range that describes the span that this range covers. 163 // The range is closed at the start and open at the end: [Start, End). 164 int64_t Start, End; 165 166 /// StartPtr - The getelementptr instruction that points to the start of the 167 /// range. 168 Value *StartPtr; 169 170 /// Alignment - The known alignment of the first store. 171 unsigned Alignment; 172 173 /// TheStores - The actual stores that make up this range. 174 SmallVector<StoreInst*, 16> TheStores; 175 176 bool isProfitableToUseMemset(const TargetData &TD) const; 177 178}; 179} // end anon namespace 180 181bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { 182 // If we found more than 8 stores to merge or 64 bytes, use memset. 183 if (TheStores.size() >= 8 || End-Start >= 64) return true; 184 185 // Assume that the code generator is capable of merging pairs of stores 186 // together if it wants to. 187 if (TheStores.size() <= 2) return false; 188 189 // If we have fewer than 8 stores, it can still be worthwhile to do this. 190 // For example, merging 4 i8 stores into an i32 store is useful almost always. 191 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 192 // memset will be split into 2 32-bit stores anyway) and doing so can 193 // pessimize the llvm optimizer. 194 // 195 // Since we don't have perfect knowledge here, make some assumptions: assume 196 // the maximum GPR width is the same size as the pointer size and assume that 197 // this width can be stored. If so, check to see whether we will end up 198 // actually reducing the number of stores used. 199 unsigned Bytes = unsigned(End-Start); 200 unsigned NumPointerStores = Bytes/TD.getPointerSize(); 201 202 // Assume the remaining bytes if any are done a byte at a time. 203 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); 204 205 // If we will reduce the # stores (according to this heuristic), do the 206 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 207 // etc. 208 return TheStores.size() > NumPointerStores+NumByteStores; 209} 210 211 212namespace { 213class MemsetRanges { 214 /// Ranges - A sorted list of the memset ranges. We use std::list here 215 /// because each element is relatively large and expensive to copy. 216 std::list<MemsetRange> Ranges; 217 typedef std::list<MemsetRange>::iterator range_iterator; 218 TargetData &TD; 219public: 220 MemsetRanges(TargetData &td) : TD(td) {} 221 222 typedef std::list<MemsetRange>::const_iterator const_iterator; 223 const_iterator begin() const { return Ranges.begin(); } 224 const_iterator end() const { return Ranges.end(); } 225 bool empty() const { return Ranges.empty(); } 226 227 void addStore(int64_t OffsetFromFirst, StoreInst *SI); 228}; 229 230} // end anon namespace 231 232 233/// addStore - Add a new store to the MemsetRanges data structure. This adds a 234/// new range for the specified store at the specified offset, merging into 235/// existing ranges as appropriate. 236void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { 237 int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType()); 238 239 // Do a linear search of the ranges to see if this can be joined and/or to 240 // find the insertion point in the list. We keep the ranges sorted for 241 // simplicity here. This is a linear search of a linked list, which is ugly, 242 // however the number of ranges is limited, so this won't get crazy slow. 243 range_iterator I = Ranges.begin(), E = Ranges.end(); 244 245 while (I != E && Start > I->End) 246 ++I; 247 248 // We now know that I == E, in which case we didn't find anything to merge 249 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 250 // to insert a new range. Handle this now. 251 if (I == E || End < I->Start) { 252 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 253 R.Start = Start; 254 R.End = End; 255 R.StartPtr = SI->getPointerOperand(); 256 R.Alignment = SI->getAlignment(); 257 R.TheStores.push_back(SI); 258 return; 259 } 260 261 // This store overlaps with I, add it. 262 I->TheStores.push_back(SI); 263 264 // At this point, we may have an interval that completely contains our store. 265 // If so, just add it to the interval and return. 266 if (I->Start <= Start && I->End >= End) 267 return; 268 269 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 270 // but is not entirely contained within the range. 271 272 // See if the range extends the start of the range. In this case, it couldn't 273 // possibly cause it to join the prior range, because otherwise we would have 274 // stopped on *it*. 275 if (Start < I->Start) { 276 I->Start = Start; 277 I->StartPtr = SI->getPointerOperand(); 278 I->Alignment = SI->getAlignment(); 279 } 280 281 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 282 // is in or right at the end of I), and that End >= I->Start. Extend I out to 283 // End. 284 if (End > I->End) { 285 I->End = End; 286 range_iterator NextI = I; 287 while (++NextI != E && End >= NextI->Start) { 288 // Merge the range in. 289 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 290 if (NextI->End > I->End) 291 I->End = NextI->End; 292 Ranges.erase(NextI); 293 NextI = I; 294 } 295 } 296} 297 298//===----------------------------------------------------------------------===// 299// MemCpyOpt Pass 300//===----------------------------------------------------------------------===// 301 302namespace { 303 class MemCpyOpt : public FunctionPass { 304 bool runOnFunction(Function &F); 305 public: 306 static char ID; // Pass identification, replacement for typeid 307 MemCpyOpt() : FunctionPass(&ID) {} 308 309 private: 310 // This transformation requires dominator postdominator info 311 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 312 AU.setPreservesCFG(); 313 AU.addRequired<DominatorTree>(); 314 AU.addRequired<MemoryDependenceAnalysis>(); 315 AU.addRequired<AliasAnalysis>(); 316 AU.addPreserved<AliasAnalysis>(); 317 AU.addPreserved<MemoryDependenceAnalysis>(); 318 } 319 320 // Helper fuctions 321 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 322 bool processMemCpy(MemCpyInst *M); 323 bool processMemMove(MemMoveInst *M); 324 bool performCallSlotOptzn(MemCpyInst *cpy, CallInst *C); 325 bool iterateOnFunction(Function &F); 326 }; 327 328 char MemCpyOpt::ID = 0; 329} 330 331// createMemCpyOptPass - The public interface to this file... 332FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 333 334static RegisterPass<MemCpyOpt> X("memcpyopt", 335 "MemCpy Optimization"); 336 337 338 339/// processStore - When GVN is scanning forward over instructions, we look for 340/// some other patterns to fold away. In particular, this looks for stores to 341/// neighboring locations of memory. If it sees enough consequtive ones 342/// (currently 4) it attempts to merge them together into a memcpy/memset. 343bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 344 if (SI->isVolatile()) return false; 345 346 LLVMContext &Context = SI->getContext(); 347 348 // There are two cases that are interesting for this code to handle: memcpy 349 // and memset. Right now we only handle memset. 350 351 // Ensure that the value being stored is something that can be memset'able a 352 // byte at a time like "0" or "-1" or any width, as well as things like 353 // 0xA0A0A0A0 and 0.0. 354 Value *ByteVal = isBytewiseValue(SI->getOperand(0)); 355 if (!ByteVal) 356 return false; 357 358 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 359 if (!TD) return false; 360 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 361 Module *M = SI->getParent()->getParent()->getParent(); 362 363 // Okay, so we now have a single store that can be splatable. Scan to find 364 // all subsequent stores of the same value to offset from the same pointer. 365 // Join these together into ranges, so we can decide whether contiguous blocks 366 // are stored. 367 MemsetRanges Ranges(*TD); 368 369 Value *StartPtr = SI->getPointerOperand(); 370 371 BasicBlock::iterator BI = SI; 372 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 373 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { 374 // If the call is readnone, ignore it, otherwise bail out. We don't even 375 // allow readonly here because we don't want something like: 376 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 377 if (AA.getModRefBehavior(CallSite::get(BI)) == 378 AliasAnalysis::DoesNotAccessMemory) 379 continue; 380 381 // TODO: If this is a memset, try to join it in. 382 383 break; 384 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI)) 385 break; 386 387 // If this is a non-store instruction it is fine, ignore it. 388 StoreInst *NextStore = dyn_cast<StoreInst>(BI); 389 if (NextStore == 0) continue; 390 391 // If this is a store, see if we can merge it in. 392 if (NextStore->isVolatile()) break; 393 394 // Check to see if this stored value is of the same byte-splattable value. 395 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 396 break; 397 398 // Check to see if this store is to a constant offset from the start ptr. 399 int64_t Offset; 400 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD)) 401 break; 402 403 Ranges.addStore(Offset, NextStore); 404 } 405 406 // If we have no ranges, then we just had a single store with nothing that 407 // could be merged in. This is a very common case of course. 408 if (Ranges.empty()) 409 return false; 410 411 // If we had at least one store that could be merged in, add the starting 412 // store as well. We try to avoid this unless there is at least something 413 // interesting as a small compile-time optimization. 414 Ranges.addStore(0, SI); 415 416 Function *MemSetF = 0; 417 418 // Now that we have full information about ranges, loop over the ranges and 419 // emit memset's for anything big enough to be worthwhile. 420 bool MadeChange = false; 421 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 422 I != E; ++I) { 423 const MemsetRange &Range = *I; 424 425 if (Range.TheStores.size() == 1) continue; 426 427 // If it is profitable to lower this range to memset, do so now. 428 if (!Range.isProfitableToUseMemset(*TD)) 429 continue; 430 431 // Otherwise, we do want to transform this! Create a new memset. We put 432 // the memset right before the first instruction that isn't part of this 433 // memset block. This ensure that the memset is dominated by any addressing 434 // instruction needed by the start of the block. 435 BasicBlock::iterator InsertPt = BI; 436 437 if (MemSetF == 0) { 438 const Type *Ty = Type::getInt64Ty(Context); 439 MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1); 440 } 441 442 // Get the starting pointer of the block. 443 StartPtr = Range.StartPtr; 444 445 // Cast the start ptr to be i8* as memset requires. 446 const Type *i8Ptr = Type::getInt8PtrTy(Context); 447 if (StartPtr->getType() != i8Ptr) 448 StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(), 449 InsertPt); 450 451 Value *Ops[] = { 452 StartPtr, ByteVal, // Start, value 453 // size 454 ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start), 455 // align 456 ConstantInt::get(Type::getInt32Ty(Context), Range.Alignment) 457 }; 458 Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt); 459 DEBUG(dbgs() << "Replace stores:\n"; 460 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 461 dbgs() << *Range.TheStores[i]; 462 dbgs() << "With: " << *C); C=C; 463 464 // Don't invalidate the iterator 465 BBI = BI; 466 467 // Zap all the stores. 468 for (SmallVector<StoreInst*, 16>::const_iterator 469 SI = Range.TheStores.begin(), 470 SE = Range.TheStores.end(); SI != SE; ++SI) 471 (*SI)->eraseFromParent(); 472 ++NumMemSetInfer; 473 MadeChange = true; 474 } 475 476 return MadeChange; 477} 478 479 480/// performCallSlotOptzn - takes a memcpy and a call that it depends on, 481/// and checks for the possibility of a call slot optimization by having 482/// the call write its result directly into the destination of the memcpy. 483bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { 484 // The general transformation to keep in mind is 485 // 486 // call @func(..., src, ...) 487 // memcpy(dest, src, ...) 488 // 489 // -> 490 // 491 // memcpy(dest, src, ...) 492 // call @func(..., dest, ...) 493 // 494 // Since moving the memcpy is technically awkward, we additionally check that 495 // src only holds uninitialized values at the moment of the call, meaning that 496 // the memcpy can be discarded rather than moved. 497 498 // Deliberately get the source and destination with bitcasts stripped away, 499 // because we'll need to do type comparisons based on the underlying type. 500 Value *cpyDest = cpy->getDest(); 501 Value *cpySrc = cpy->getSource(); 502 CallSite CS = CallSite::get(C); 503 504 // We need to be able to reason about the size of the memcpy, so we require 505 // that it be a constant. 506 ConstantInt *cpyLength = dyn_cast<ConstantInt>(cpy->getLength()); 507 if (!cpyLength) 508 return false; 509 510 // Require that src be an alloca. This simplifies the reasoning considerably. 511 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 512 if (!srcAlloca) 513 return false; 514 515 // Check that all of src is copied to dest. 516 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 517 if (!TD) return false; 518 519 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 520 if (!srcArraySize) 521 return false; 522 523 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * 524 srcArraySize->getZExtValue(); 525 526 if (cpyLength->getZExtValue() < srcSize) 527 return false; 528 529 // Check that accessing the first srcSize bytes of dest will not cause a 530 // trap. Otherwise the transform is invalid since it might cause a trap 531 // to occur earlier than it otherwise would. 532 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 533 // The destination is an alloca. Check it is larger than srcSize. 534 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 535 if (!destArraySize) 536 return false; 537 538 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * 539 destArraySize->getZExtValue(); 540 541 if (destSize < srcSize) 542 return false; 543 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 544 // If the destination is an sret parameter then only accesses that are 545 // outside of the returned struct type can trap. 546 if (!A->hasStructRetAttr()) 547 return false; 548 549 const Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 550 uint64_t destSize = TD->getTypeAllocSize(StructTy); 551 552 if (destSize < srcSize) 553 return false; 554 } else { 555 return false; 556 } 557 558 // Check that src is not accessed except via the call and the memcpy. This 559 // guarantees that it holds only undefined values when passed in (so the final 560 // memcpy can be dropped), that it is not read or written between the call and 561 // the memcpy, and that writing beyond the end of it is undefined. 562 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 563 srcAlloca->use_end()); 564 while (!srcUseList.empty()) { 565 User *UI = srcUseList.back(); 566 srcUseList.pop_back(); 567 568 if (isa<BitCastInst>(UI)) { 569 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 570 I != E; ++I) 571 srcUseList.push_back(*I); 572 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { 573 if (G->hasAllZeroIndices()) 574 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 575 I != E; ++I) 576 srcUseList.push_back(*I); 577 else 578 return false; 579 } else if (UI != C && UI != cpy) { 580 return false; 581 } 582 } 583 584 // Since we're changing the parameter to the callsite, we need to make sure 585 // that what would be the new parameter dominates the callsite. 586 DominatorTree &DT = getAnalysis<DominatorTree>(); 587 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 588 if (!DT.dominates(cpyDestInst, C)) 589 return false; 590 591 // In addition to knowing that the call does not access src in some 592 // unexpected manner, for example via a global, which we deduce from 593 // the use analysis, we also need to know that it does not sneakily 594 // access dest. We rely on AA to figure this out for us. 595 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 596 if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) != 597 AliasAnalysis::NoModRef) 598 return false; 599 600 // All the checks have passed, so do the transformation. 601 bool changedArgument = false; 602 for (unsigned i = 0; i < CS.arg_size(); ++i) 603 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 604 if (cpySrc->getType() != cpyDest->getType()) 605 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 606 cpyDest->getName(), C); 607 changedArgument = true; 608 if (CS.getArgument(i)->getType() == cpyDest->getType()) 609 CS.setArgument(i, cpyDest); 610 else 611 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, 612 CS.getArgument(i)->getType(), cpyDest->getName(), C)); 613 } 614 615 if (!changedArgument) 616 return false; 617 618 // Drop any cached information about the call, because we may have changed 619 // its dependence information by changing its parameter. 620 MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>(); 621 MD.removeInstruction(C); 622 623 // Remove the memcpy 624 MD.removeInstruction(cpy); 625 cpy->eraseFromParent(); 626 NumMemCpyInstr++; 627 628 return true; 629} 630 631/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which 632/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be 633/// a memcpy from X to Z (or potentially a memmove, depending on circumstances). 634/// This allows later passes to remove the first memcpy altogether. 635bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 636 MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>(); 637 638 // The are two possible optimizations we can do for memcpy: 639 // a) memcpy-memcpy xform which exposes redundance for DSE. 640 // b) call-memcpy xform for return slot optimization. 641 MemDepResult dep = MD.getDependency(M); 642 if (!dep.isClobber()) 643 return false; 644 if (!isa<MemCpyInst>(dep.getInst())) { 645 if (CallInst *C = dyn_cast<CallInst>(dep.getInst())) 646 return performCallSlotOptzn(M, C); 647 return false; 648 } 649 650 MemCpyInst *MDep = cast<MemCpyInst>(dep.getInst()); 651 652 // We can only transforms memcpy's where the dest of one is the source of the 653 // other 654 if (M->getSource() != MDep->getDest()) 655 return false; 656 657 // Second, the length of the memcpy's must be the same, or the preceeding one 658 // must be larger than the following one. 659 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 660 ConstantInt *C2 = dyn_cast<ConstantInt>(M->getLength()); 661 if (!C1 || !C2) 662 return false; 663 664 uint64_t DepSize = C1->getValue().getZExtValue(); 665 uint64_t CpySize = C2->getValue().getZExtValue(); 666 667 if (DepSize < CpySize) 668 return false; 669 670 // Finally, we have to make sure that the dest of the second does not 671 // alias the source of the first 672 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 673 if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) != 674 AliasAnalysis::NoAlias) 675 return false; 676 else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) != 677 AliasAnalysis::NoAlias) 678 return false; 679 else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize) 680 != AliasAnalysis::NoAlias) 681 return false; 682 683 // If all checks passed, then we can transform these memcpy's 684 const Type *Ty = M->getLength()->getType(); 685 Function *MemCpyFun = Intrinsic::getDeclaration( 686 M->getParent()->getParent()->getParent(), 687 M->getIntrinsicID(), &Ty, 1); 688 689 Value *Args[4] = { 690 M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst() 691 }; 692 693 CallInst *C = CallInst::Create(MemCpyFun, Args, Args+4, "", M); 694 695 696 // If C and M don't interfere, then this is a valid transformation. If they 697 // did, this would mean that the two sources overlap, which would be bad. 698 if (MD.getDependency(C) == dep) { 699 MD.removeInstruction(M); 700 M->eraseFromParent(); 701 NumMemCpyInstr++; 702 return true; 703 } 704 705 // Otherwise, there was no point in doing this, so we remove the call we 706 // inserted and act like nothing happened. 707 MD.removeInstruction(C); 708 C->eraseFromParent(); 709 return false; 710} 711 712/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 713/// are guaranteed not to alias. 714bool MemCpyOpt::processMemMove(MemMoveInst *M) { 715 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 716 717 // If the memmove is a constant size, use it for the alias query, this allows 718 // us to optimize things like: memmove(P, P+64, 64); 719 uint64_t MemMoveSize = ~0ULL; 720 if (ConstantInt *Len = dyn_cast<ConstantInt>(M->getLength())) 721 MemMoveSize = Len->getZExtValue(); 722 723 // See if the pointers alias. 724 if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) != 725 AliasAnalysis::NoAlias) 726 return false; 727 728 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 729 730 // If not, then we know we can transform this. 731 Module *Mod = M->getParent()->getParent()->getParent(); 732 const Type *Ty = M->getLength()->getType(); 733 M->setOperand(0, Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, &Ty, 1)); 734 735 // MemDep may have over conservative information about this instruction, just 736 // conservatively flush it from the cache. 737 getAnalysis<MemoryDependenceAnalysis>().removeInstruction(M); 738 739 ++NumMoveToCpy; 740 return true; 741} 742 743 744// MemCpyOpt::iterateOnFunction - Executes one iteration of GVN. 745bool MemCpyOpt::iterateOnFunction(Function &F) { 746 bool MadeChange = false; 747 748 // Walk all instruction in the function. 749 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 750 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 751 BI != BE;) { 752 // Avoid invalidating the iterator. 753 Instruction *I = BI++; 754 755 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 756 MadeChange |= processStore(SI, BI); 757 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 758 MadeChange |= processMemCpy(M); 759 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) { 760 if (processMemMove(M)) { 761 --BI; // Reprocess the new memcpy. 762 MadeChange = true; 763 } 764 } 765 } 766 } 767 768 return MadeChange; 769} 770 771// MemCpyOpt::runOnFunction - This is the main transformation entry point for a 772// function. 773// 774bool MemCpyOpt::runOnFunction(Function &F) { 775 bool MadeChange = false; 776 while (1) { 777 if (!iterateOnFunction(F)) 778 break; 779 MadeChange = true; 780 } 781 782 return MadeChange; 783} 784 785 786 787