MemCpyOptimizer.cpp revision baf3c404409d5e47b13984a7f95bfbd6d1f2e79e
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs various transformations related to eliminating memcpy 11// calls, or transforming sets of stores into memset's. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "memcpyopt" 16#include "llvm/Transforms/Scalar.h" 17#include "llvm/IntrinsicInst.h" 18#include "llvm/Instructions.h" 19#include "llvm/LLVMContext.h" 20#include "llvm/ADT/SmallVector.h" 21#include "llvm/ADT/Statistic.h" 22#include "llvm/Analysis/Dominators.h" 23#include "llvm/Analysis/AliasAnalysis.h" 24#include "llvm/Analysis/MemoryDependenceAnalysis.h" 25#include "llvm/Support/Debug.h" 26#include "llvm/Support/GetElementPtrTypeIterator.h" 27#include "llvm/Target/TargetData.h" 28#include <list> 29using namespace llvm; 30 31STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 32STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 33 34/// isBytewiseValue - If the specified value can be set by repeating the same 35/// byte in memory, return the i8 value that it is represented with. This is 36/// true for all i8 values obviously, but is also true for i32 0, i32 -1, 37/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 38/// byte store (e.g. i16 0x1234), return null. 39static Value *isBytewiseValue(Value *V, LLVMContext& Context) { 40 // All byte-wide stores are splatable, even of arbitrary variables. 41 if (V->getType() == Type::Int8Ty) return V; 42 43 // Constant float and double values can be handled as integer values if the 44 // corresponding integer value is "byteable". An important case is 0.0. 45 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 46 if (CFP->getType() == Type::FloatTy) 47 V = ConstantExpr::getBitCast(CFP, Type::Int32Ty); 48 if (CFP->getType() == Type::DoubleTy) 49 V = ConstantExpr::getBitCast(CFP, Type::Int64Ty); 50 // Don't handle long double formats, which have strange constraints. 51 } 52 53 // We can handle constant integers that are power of two in size and a 54 // multiple of 8 bits. 55 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 56 unsigned Width = CI->getBitWidth(); 57 if (isPowerOf2_32(Width) && Width > 8) { 58 // We can handle this value if the recursive binary decomposition is the 59 // same at all levels. 60 APInt Val = CI->getValue(); 61 APInt Val2; 62 while (Val.getBitWidth() != 8) { 63 unsigned NextWidth = Val.getBitWidth()/2; 64 Val2 = Val.lshr(NextWidth); 65 Val2.trunc(Val.getBitWidth()/2); 66 Val.trunc(Val.getBitWidth()/2); 67 68 // If the top/bottom halves aren't the same, reject it. 69 if (Val != Val2) 70 return 0; 71 } 72 return ConstantInt::get(Context, Val); 73 } 74 } 75 76 // Conceptually, we could handle things like: 77 // %a = zext i8 %X to i16 78 // %b = shl i16 %a, 8 79 // %c = or i16 %a, %b 80 // but until there is an example that actually needs this, it doesn't seem 81 // worth worrying about. 82 return 0; 83} 84 85static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, 86 bool &VariableIdxFound, TargetData &TD) { 87 // Skip over the first indices. 88 gep_type_iterator GTI = gep_type_begin(GEP); 89 for (unsigned i = 1; i != Idx; ++i, ++GTI) 90 /*skip along*/; 91 92 // Compute the offset implied by the rest of the indices. 93 int64_t Offset = 0; 94 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 95 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 96 if (OpC == 0) 97 return VariableIdxFound = true; 98 if (OpC->isZero()) continue; // No offset. 99 100 // Handle struct indices, which add their field offset to the pointer. 101 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 102 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 103 continue; 104 } 105 106 // Otherwise, we have a sequential type like an array or vector. Multiply 107 // the index by the ElementSize. 108 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 109 Offset += Size*OpC->getSExtValue(); 110 } 111 112 return Offset; 113} 114 115/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 116/// constant offset, and return that constant offset. For example, Ptr1 might 117/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 118static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 119 TargetData &TD) { 120 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 121 // base. After that base, they may have some number of common (and 122 // potentially variable) indices. After that they handle some constant 123 // offset, which determines their offset from each other. At this point, we 124 // handle no other case. 125 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 126 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 127 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 128 return false; 129 130 // Skip any common indices and track the GEP types. 131 unsigned Idx = 1; 132 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 133 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 134 break; 135 136 bool VariableIdxFound = false; 137 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 138 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 139 if (VariableIdxFound) return false; 140 141 Offset = Offset2-Offset1; 142 return true; 143} 144 145 146/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 147/// This allows us to analyze stores like: 148/// store 0 -> P+1 149/// store 0 -> P+0 150/// store 0 -> P+3 151/// store 0 -> P+2 152/// which sometimes happens with stores to arrays of structs etc. When we see 153/// the first store, we make a range [1, 2). The second store extends the range 154/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 155/// two ranges into [0, 3) which is memset'able. 156namespace { 157struct MemsetRange { 158 // Start/End - A semi range that describes the span that this range covers. 159 // The range is closed at the start and open at the end: [Start, End). 160 int64_t Start, End; 161 162 /// StartPtr - The getelementptr instruction that points to the start of the 163 /// range. 164 Value *StartPtr; 165 166 /// Alignment - The known alignment of the first store. 167 unsigned Alignment; 168 169 /// TheStores - The actual stores that make up this range. 170 SmallVector<StoreInst*, 16> TheStores; 171 172 bool isProfitableToUseMemset(const TargetData &TD) const; 173 174}; 175} // end anon namespace 176 177bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { 178 // If we found more than 8 stores to merge or 64 bytes, use memset. 179 if (TheStores.size() >= 8 || End-Start >= 64) return true; 180 181 // Assume that the code generator is capable of merging pairs of stores 182 // together if it wants to. 183 if (TheStores.size() <= 2) return false; 184 185 // If we have fewer than 8 stores, it can still be worthwhile to do this. 186 // For example, merging 4 i8 stores into an i32 store is useful almost always. 187 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 188 // memset will be split into 2 32-bit stores anyway) and doing so can 189 // pessimize the llvm optimizer. 190 // 191 // Since we don't have perfect knowledge here, make some assumptions: assume 192 // the maximum GPR width is the same size as the pointer size and assume that 193 // this width can be stored. If so, check to see whether we will end up 194 // actually reducing the number of stores used. 195 unsigned Bytes = unsigned(End-Start); 196 unsigned NumPointerStores = Bytes/TD.getPointerSize(); 197 198 // Assume the remaining bytes if any are done a byte at a time. 199 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); 200 201 // If we will reduce the # stores (according to this heuristic), do the 202 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 203 // etc. 204 return TheStores.size() > NumPointerStores+NumByteStores; 205} 206 207 208namespace { 209class MemsetRanges { 210 /// Ranges - A sorted list of the memset ranges. We use std::list here 211 /// because each element is relatively large and expensive to copy. 212 std::list<MemsetRange> Ranges; 213 typedef std::list<MemsetRange>::iterator range_iterator; 214 TargetData &TD; 215public: 216 MemsetRanges(TargetData &td) : TD(td) {} 217 218 typedef std::list<MemsetRange>::const_iterator const_iterator; 219 const_iterator begin() const { return Ranges.begin(); } 220 const_iterator end() const { return Ranges.end(); } 221 bool empty() const { return Ranges.empty(); } 222 223 void addStore(int64_t OffsetFromFirst, StoreInst *SI); 224}; 225 226} // end anon namespace 227 228 229/// addStore - Add a new store to the MemsetRanges data structure. This adds a 230/// new range for the specified store at the specified offset, merging into 231/// existing ranges as appropriate. 232void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { 233 int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType()); 234 235 // Do a linear search of the ranges to see if this can be joined and/or to 236 // find the insertion point in the list. We keep the ranges sorted for 237 // simplicity here. This is a linear search of a linked list, which is ugly, 238 // however the number of ranges is limited, so this won't get crazy slow. 239 range_iterator I = Ranges.begin(), E = Ranges.end(); 240 241 while (I != E && Start > I->End) 242 ++I; 243 244 // We now know that I == E, in which case we didn't find anything to merge 245 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 246 // to insert a new range. Handle this now. 247 if (I == E || End < I->Start) { 248 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 249 R.Start = Start; 250 R.End = End; 251 R.StartPtr = SI->getPointerOperand(); 252 R.Alignment = SI->getAlignment(); 253 R.TheStores.push_back(SI); 254 return; 255 } 256 257 // This store overlaps with I, add it. 258 I->TheStores.push_back(SI); 259 260 // At this point, we may have an interval that completely contains our store. 261 // If so, just add it to the interval and return. 262 if (I->Start <= Start && I->End >= End) 263 return; 264 265 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 266 // but is not entirely contained within the range. 267 268 // See if the range extends the start of the range. In this case, it couldn't 269 // possibly cause it to join the prior range, because otherwise we would have 270 // stopped on *it*. 271 if (Start < I->Start) { 272 I->Start = Start; 273 I->StartPtr = SI->getPointerOperand(); 274 } 275 276 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 277 // is in or right at the end of I), and that End >= I->Start. Extend I out to 278 // End. 279 if (End > I->End) { 280 I->End = End; 281 range_iterator NextI = I; 282 while (++NextI != E && End >= NextI->Start) { 283 // Merge the range in. 284 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 285 if (NextI->End > I->End) 286 I->End = NextI->End; 287 Ranges.erase(NextI); 288 NextI = I; 289 } 290 } 291} 292 293//===----------------------------------------------------------------------===// 294// MemCpyOpt Pass 295//===----------------------------------------------------------------------===// 296 297namespace { 298 299 class VISIBILITY_HIDDEN MemCpyOpt : public FunctionPass { 300 bool runOnFunction(Function &F); 301 public: 302 static char ID; // Pass identification, replacement for typeid 303 MemCpyOpt() : FunctionPass(&ID) {} 304 305 private: 306 // This transformation requires dominator postdominator info 307 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 308 AU.setPreservesCFG(); 309 AU.addRequired<DominatorTree>(); 310 AU.addRequired<MemoryDependenceAnalysis>(); 311 AU.addRequired<AliasAnalysis>(); 312 AU.addRequired<TargetData>(); 313 AU.addPreserved<AliasAnalysis>(); 314 AU.addPreserved<MemoryDependenceAnalysis>(); 315 AU.addPreserved<TargetData>(); 316 } 317 318 // Helper fuctions 319 bool processStore(StoreInst *SI, BasicBlock::iterator& BBI); 320 bool processMemCpy(MemCpyInst* M); 321 bool performCallSlotOptzn(MemCpyInst* cpy, CallInst* C); 322 bool iterateOnFunction(Function &F); 323 }; 324 325 char MemCpyOpt::ID = 0; 326} 327 328// createMemCpyOptPass - The public interface to this file... 329FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 330 331static RegisterPass<MemCpyOpt> X("memcpyopt", 332 "MemCpy Optimization"); 333 334 335 336/// processStore - When GVN is scanning forward over instructions, we look for 337/// some other patterns to fold away. In particular, this looks for stores to 338/// neighboring locations of memory. If it sees enough consequtive ones 339/// (currently 4) it attempts to merge them together into a memcpy/memset. 340bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) { 341 if (SI->isVolatile()) return false; 342 343 // There are two cases that are interesting for this code to handle: memcpy 344 // and memset. Right now we only handle memset. 345 346 // Ensure that the value being stored is something that can be memset'able a 347 // byte at a time like "0" or "-1" or any width, as well as things like 348 // 0xA0A0A0A0 and 0.0. 349 Value *ByteVal = isBytewiseValue(SI->getOperand(0), SI->getContext()); 350 if (!ByteVal) 351 return false; 352 353 TargetData &TD = getAnalysis<TargetData>(); 354 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 355 LLVMContext &Context = SI->getContext(); 356 Module *M = SI->getParent()->getParent()->getParent(); 357 358 // Okay, so we now have a single store that can be splatable. Scan to find 359 // all subsequent stores of the same value to offset from the same pointer. 360 // Join these together into ranges, so we can decide whether contiguous blocks 361 // are stored. 362 MemsetRanges Ranges(TD); 363 364 Value *StartPtr = SI->getPointerOperand(); 365 366 BasicBlock::iterator BI = SI; 367 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 368 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { 369 // If the call is readnone, ignore it, otherwise bail out. We don't even 370 // allow readonly here because we don't want something like: 371 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 372 if (AA.getModRefBehavior(CallSite::get(BI)) == 373 AliasAnalysis::DoesNotAccessMemory) 374 continue; 375 376 // TODO: If this is a memset, try to join it in. 377 378 break; 379 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI)) 380 break; 381 382 // If this is a non-store instruction it is fine, ignore it. 383 StoreInst *NextStore = dyn_cast<StoreInst>(BI); 384 if (NextStore == 0) continue; 385 386 // If this is a store, see if we can merge it in. 387 if (NextStore->isVolatile()) break; 388 389 // Check to see if this stored value is of the same byte-splattable value. 390 if (ByteVal != isBytewiseValue(NextStore->getOperand(0), 391 NextStore->getContext())) 392 break; 393 394 // Check to see if this store is to a constant offset from the start ptr. 395 int64_t Offset; 396 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, TD)) 397 break; 398 399 Ranges.addStore(Offset, NextStore); 400 } 401 402 // If we have no ranges, then we just had a single store with nothing that 403 // could be merged in. This is a very common case of course. 404 if (Ranges.empty()) 405 return false; 406 407 // If we had at least one store that could be merged in, add the starting 408 // store as well. We try to avoid this unless there is at least something 409 // interesting as a small compile-time optimization. 410 Ranges.addStore(0, SI); 411 412 413 Function *MemSetF = 0; 414 415 // Now that we have full information about ranges, loop over the ranges and 416 // emit memset's for anything big enough to be worthwhile. 417 bool MadeChange = false; 418 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 419 I != E; ++I) { 420 const MemsetRange &Range = *I; 421 422 if (Range.TheStores.size() == 1) continue; 423 424 // If it is profitable to lower this range to memset, do so now. 425 if (!Range.isProfitableToUseMemset(TD)) 426 continue; 427 428 // Otherwise, we do want to transform this! Create a new memset. We put 429 // the memset right before the first instruction that isn't part of this 430 // memset block. This ensure that the memset is dominated by any addressing 431 // instruction needed by the start of the block. 432 BasicBlock::iterator InsertPt = BI; 433 434 if (MemSetF == 0) { 435 const Type *Tys[] = {Type::Int64Ty}; 436 MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, 437 Tys, 1); 438 } 439 440 // Get the starting pointer of the block. 441 StartPtr = Range.StartPtr; 442 443 // Cast the start ptr to be i8* as memset requires. 444 const Type *i8Ptr = Context.getPointerTypeUnqual(Type::Int8Ty); 445 if (StartPtr->getType() != i8Ptr) 446 StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(), 447 InsertPt); 448 449 Value *Ops[] = { 450 StartPtr, ByteVal, // Start, value 451 // size 452 ConstantInt::get(Type::Int64Ty, Range.End-Range.Start), 453 // align 454 ConstantInt::get(Type::Int32Ty, Range.Alignment) 455 }; 456 Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt); 457 DEBUG(cerr << "Replace stores:\n"; 458 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 459 cerr << *Range.TheStores[i]; 460 cerr << "With: " << *C); C=C; 461 462 // Don't invalidate the iterator 463 BBI = BI; 464 465 // Zap all the stores. 466 for (SmallVector<StoreInst*, 16>::const_iterator SI = Range.TheStores.begin(), 467 SE = Range.TheStores.end(); SI != SE; ++SI) 468 (*SI)->eraseFromParent(); 469 ++NumMemSetInfer; 470 MadeChange = true; 471 } 472 473 return MadeChange; 474} 475 476 477/// performCallSlotOptzn - takes a memcpy and a call that it depends on, 478/// and checks for the possibility of a call slot optimization by having 479/// the call write its result directly into the destination of the memcpy. 480bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { 481 // The general transformation to keep in mind is 482 // 483 // call @func(..., src, ...) 484 // memcpy(dest, src, ...) 485 // 486 // -> 487 // 488 // memcpy(dest, src, ...) 489 // call @func(..., dest, ...) 490 // 491 // Since moving the memcpy is technically awkward, we additionally check that 492 // src only holds uninitialized values at the moment of the call, meaning that 493 // the memcpy can be discarded rather than moved. 494 495 // Deliberately get the source and destination with bitcasts stripped away, 496 // because we'll need to do type comparisons based on the underlying type. 497 Value* cpyDest = cpy->getDest(); 498 Value* cpySrc = cpy->getSource(); 499 CallSite CS = CallSite::get(C); 500 501 // We need to be able to reason about the size of the memcpy, so we require 502 // that it be a constant. 503 ConstantInt* cpyLength = dyn_cast<ConstantInt>(cpy->getLength()); 504 if (!cpyLength) 505 return false; 506 507 // Require that src be an alloca. This simplifies the reasoning considerably. 508 AllocaInst* srcAlloca = dyn_cast<AllocaInst>(cpySrc); 509 if (!srcAlloca) 510 return false; 511 512 // Check that all of src is copied to dest. 513 TargetData& TD = getAnalysis<TargetData>(); 514 515 ConstantInt* srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 516 if (!srcArraySize) 517 return false; 518 519 uint64_t srcSize = TD.getTypeAllocSize(srcAlloca->getAllocatedType()) * 520 srcArraySize->getZExtValue(); 521 522 if (cpyLength->getZExtValue() < srcSize) 523 return false; 524 525 // Check that accessing the first srcSize bytes of dest will not cause a 526 // trap. Otherwise the transform is invalid since it might cause a trap 527 // to occur earlier than it otherwise would. 528 if (AllocaInst* A = dyn_cast<AllocaInst>(cpyDest)) { 529 // The destination is an alloca. Check it is larger than srcSize. 530 ConstantInt* destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 531 if (!destArraySize) 532 return false; 533 534 uint64_t destSize = TD.getTypeAllocSize(A->getAllocatedType()) * 535 destArraySize->getZExtValue(); 536 537 if (destSize < srcSize) 538 return false; 539 } else if (Argument* A = dyn_cast<Argument>(cpyDest)) { 540 // If the destination is an sret parameter then only accesses that are 541 // outside of the returned struct type can trap. 542 if (!A->hasStructRetAttr()) 543 return false; 544 545 const Type* StructTy = cast<PointerType>(A->getType())->getElementType(); 546 uint64_t destSize = TD.getTypeAllocSize(StructTy); 547 548 if (destSize < srcSize) 549 return false; 550 } else { 551 return false; 552 } 553 554 // Check that src is not accessed except via the call and the memcpy. This 555 // guarantees that it holds only undefined values when passed in (so the final 556 // memcpy can be dropped), that it is not read or written between the call and 557 // the memcpy, and that writing beyond the end of it is undefined. 558 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 559 srcAlloca->use_end()); 560 while (!srcUseList.empty()) { 561 User* UI = srcUseList.back(); 562 srcUseList.pop_back(); 563 564 if (isa<BitCastInst>(UI)) { 565 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 566 I != E; ++I) 567 srcUseList.push_back(*I); 568 } else if (GetElementPtrInst* G = dyn_cast<GetElementPtrInst>(UI)) { 569 if (G->hasAllZeroIndices()) 570 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 571 I != E; ++I) 572 srcUseList.push_back(*I); 573 else 574 return false; 575 } else if (UI != C && UI != cpy) { 576 return false; 577 } 578 } 579 580 // Since we're changing the parameter to the callsite, we need to make sure 581 // that what would be the new parameter dominates the callsite. 582 DominatorTree& DT = getAnalysis<DominatorTree>(); 583 if (Instruction* cpyDestInst = dyn_cast<Instruction>(cpyDest)) 584 if (!DT.dominates(cpyDestInst, C)) 585 return false; 586 587 // In addition to knowing that the call does not access src in some 588 // unexpected manner, for example via a global, which we deduce from 589 // the use analysis, we also need to know that it does not sneakily 590 // access dest. We rely on AA to figure this out for us. 591 AliasAnalysis& AA = getAnalysis<AliasAnalysis>(); 592 if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) != 593 AliasAnalysis::NoModRef) 594 return false; 595 596 // All the checks have passed, so do the transformation. 597 bool changedArgument = false; 598 for (unsigned i = 0; i < CS.arg_size(); ++i) 599 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 600 if (cpySrc->getType() != cpyDest->getType()) 601 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 602 cpyDest->getName(), C); 603 changedArgument = true; 604 if (CS.getArgument(i)->getType() != cpyDest->getType()) 605 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, 606 CS.getArgument(i)->getType(), cpyDest->getName(), C)); 607 else 608 CS.setArgument(i, cpyDest); 609 } 610 611 if (!changedArgument) 612 return false; 613 614 // Drop any cached information about the call, because we may have changed 615 // its dependence information by changing its parameter. 616 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>(); 617 MD.removeInstruction(C); 618 619 // Remove the memcpy 620 MD.removeInstruction(cpy); 621 cpy->eraseFromParent(); 622 NumMemCpyInstr++; 623 624 return true; 625} 626 627/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which 628/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be 629/// a memcpy from X to Z (or potentially a memmove, depending on circumstances). 630/// This allows later passes to remove the first memcpy altogether. 631bool MemCpyOpt::processMemCpy(MemCpyInst* M) { 632 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>(); 633 634 // The are two possible optimizations we can do for memcpy: 635 // a) memcpy-memcpy xform which exposes redundance for DSE 636 // b) call-memcpy xform for return slot optimization 637 MemDepResult dep = MD.getDependency(M); 638 if (!dep.isClobber()) 639 return false; 640 if (!isa<MemCpyInst>(dep.getInst())) { 641 if (CallInst* C = dyn_cast<CallInst>(dep.getInst())) 642 return performCallSlotOptzn(M, C); 643 return false; 644 } 645 646 MemCpyInst* MDep = cast<MemCpyInst>(dep.getInst()); 647 648 // We can only transforms memcpy's where the dest of one is the source of the 649 // other 650 if (M->getSource() != MDep->getDest()) 651 return false; 652 653 // Second, the length of the memcpy's must be the same, or the preceeding one 654 // must be larger than the following one. 655 ConstantInt* C1 = dyn_cast<ConstantInt>(MDep->getLength()); 656 ConstantInt* C2 = dyn_cast<ConstantInt>(M->getLength()); 657 if (!C1 || !C2) 658 return false; 659 660 uint64_t DepSize = C1->getValue().getZExtValue(); 661 uint64_t CpySize = C2->getValue().getZExtValue(); 662 663 if (DepSize < CpySize) 664 return false; 665 666 // Finally, we have to make sure that the dest of the second does not 667 // alias the source of the first 668 AliasAnalysis& AA = getAnalysis<AliasAnalysis>(); 669 if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) != 670 AliasAnalysis::NoAlias) 671 return false; 672 else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) != 673 AliasAnalysis::NoAlias) 674 return false; 675 else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize) 676 != AliasAnalysis::NoAlias) 677 return false; 678 679 // If all checks passed, then we can transform these memcpy's 680 const Type *Tys[1]; 681 Tys[0] = M->getLength()->getType(); 682 Function* MemCpyFun = Intrinsic::getDeclaration( 683 M->getParent()->getParent()->getParent(), 684 M->getIntrinsicID(), Tys, 1); 685 686 Value *Args[4] = { 687 M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst() 688 }; 689 690 CallInst* C = CallInst::Create(MemCpyFun, Args, Args+4, "", M); 691 692 693 // If C and M don't interfere, then this is a valid transformation. If they 694 // did, this would mean that the two sources overlap, which would be bad. 695 if (MD.getDependency(C) == dep) { 696 MD.removeInstruction(M); 697 M->eraseFromParent(); 698 NumMemCpyInstr++; 699 return true; 700 } 701 702 // Otherwise, there was no point in doing this, so we remove the call we 703 // inserted and act like nothing happened. 704 MD.removeInstruction(C); 705 C->eraseFromParent(); 706 return false; 707} 708 709// MemCpyOpt::runOnFunction - This is the main transformation entry point for a 710// function. 711// 712bool MemCpyOpt::runOnFunction(Function& F) { 713 714 bool changed = false; 715 bool shouldContinue = true; 716 717 while (shouldContinue) { 718 shouldContinue = iterateOnFunction(F); 719 changed |= shouldContinue; 720 } 721 722 return changed; 723} 724 725 726// MemCpyOpt::iterateOnFunction - Executes one iteration of GVN 727bool MemCpyOpt::iterateOnFunction(Function &F) { 728 bool changed_function = false; 729 730 // Walk all instruction in the function 731 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 732 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 733 BI != BE;) { 734 // Avoid invalidating the iterator 735 Instruction* I = BI++; 736 737 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 738 changed_function |= processStore(SI, BI); 739 else if (MemCpyInst* M = dyn_cast<MemCpyInst>(I)) { 740 changed_function |= processMemCpy(M); 741 } 742 } 743 } 744 745 return changed_function; 746} 747