ScalarReplAggregates.cpp revision 704d1347c5009f674408fae6f78343b415891274
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/Module.h" 32#include "llvm/Pass.h" 33#include "llvm/Analysis/DominanceFrontier.h" 34#include "llvm/Analysis/ValueTracking.h" 35#include "llvm/Target/TargetData.h" 36#include "llvm/Transforms/Utils/PromoteMemToReg.h" 37#include "llvm/Transforms/Utils/Local.h" 38#include "llvm/Support/CallSite.h" 39#include "llvm/Support/Debug.h" 40#include "llvm/Support/ErrorHandling.h" 41#include "llvm/Support/GetElementPtrTypeIterator.h" 42#include "llvm/Support/IRBuilder.h" 43#include "llvm/Support/MathExtras.h" 44#include "llvm/Support/raw_ostream.h" 45#include "llvm/ADT/SmallVector.h" 46#include "llvm/ADT/Statistic.h" 47using namespace llvm; 48 49STATISTIC(NumReplaced, "Number of allocas broken up"); 50STATISTIC(NumPromoted, "Number of allocas promoted"); 51STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 52STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 53 54namespace { 55 struct SROA : public FunctionPass { 56 static char ID; // Pass identification, replacement for typeid 57 explicit SROA(signed T = -1) : FunctionPass(ID) { 58 initializeSROAPass(*PassRegistry::getPassRegistry()); 59 if (T == -1) 60 SRThreshold = 128; 61 else 62 SRThreshold = T; 63 } 64 65 bool runOnFunction(Function &F); 66 67 bool performScalarRepl(Function &F); 68 bool performPromotion(Function &F); 69 70 // getAnalysisUsage - This pass does not require any passes, but we know it 71 // will not alter the CFG, so say so. 72 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 73 AU.addRequired<DominatorTree>(); 74 AU.addRequired<DominanceFrontier>(); 75 AU.setPreservesCFG(); 76 } 77 78 private: 79 TargetData *TD; 80 81 /// DeadInsts - Keep track of instructions we have made dead, so that 82 /// we can remove them after we are done working. 83 SmallVector<Value*, 32> DeadInsts; 84 85 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 86 /// information about the uses. All these fields are initialized to false 87 /// and set to true when something is learned. 88 struct AllocaInfo { 89 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 90 bool isUnsafe : 1; 91 92 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 93 bool isMemCpySrc : 1; 94 95 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 96 bool isMemCpyDst : 1; 97 98 AllocaInfo() 99 : isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false) {} 100 }; 101 102 unsigned SRThreshold; 103 104 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 105 106 bool isSafeAllocaToScalarRepl(AllocaInst *AI); 107 108 void isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, 109 AllocaInfo &Info); 110 void isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t &Offset, 111 AllocaInfo &Info); 112 void isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize, 113 const Type *MemOpType, bool isStore, AllocaInfo &Info); 114 bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size); 115 uint64_t FindElementAndOffset(const Type *&T, uint64_t &Offset, 116 const Type *&IdxTy); 117 118 void DoScalarReplacement(AllocaInst *AI, 119 std::vector<AllocaInst*> &WorkList); 120 void DeleteDeadInstructions(); 121 122 void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, 123 SmallVector<AllocaInst*, 32> &NewElts); 124 void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, 125 SmallVector<AllocaInst*, 32> &NewElts); 126 void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, 127 SmallVector<AllocaInst*, 32> &NewElts); 128 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, 129 AllocaInst *AI, 130 SmallVector<AllocaInst*, 32> &NewElts); 131 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, 132 SmallVector<AllocaInst*, 32> &NewElts); 133 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, 134 SmallVector<AllocaInst*, 32> &NewElts); 135 136 static MemTransferInst *isOnlyCopiedFromConstantGlobal(AllocaInst *AI); 137 }; 138} 139 140char SROA::ID = 0; 141INITIALIZE_PASS_BEGIN(SROA, "scalarrepl", 142 "Scalar Replacement of Aggregates", false, false) 143INITIALIZE_PASS_DEPENDENCY(DominatorTree) 144INITIALIZE_PASS_DEPENDENCY(DominanceFrontier) 145INITIALIZE_PASS_END(SROA, "scalarrepl", 146 "Scalar Replacement of Aggregates", false, false) 147 148// Public interface to the ScalarReplAggregates pass 149FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 150 return new SROA(Threshold); 151} 152 153 154//===----------------------------------------------------------------------===// 155// Convert To Scalar Optimization. 156//===----------------------------------------------------------------------===// 157 158namespace { 159/// ConvertToScalarInfo - This class implements the "Convert To Scalar" 160/// optimization, which scans the uses of an alloca and determines if it can 161/// rewrite it in terms of a single new alloca that can be mem2reg'd. 162class ConvertToScalarInfo { 163 /// AllocaSize - The size of the alloca being considered. 164 unsigned AllocaSize; 165 const TargetData &TD; 166 167 /// IsNotTrivial - This is set to true if there is some access to the object 168 /// which means that mem2reg can't promote it. 169 bool IsNotTrivial; 170 171 /// VectorTy - This tracks the type that we should promote the vector to if 172 /// it is possible to turn it into a vector. This starts out null, and if it 173 /// isn't possible to turn into a vector type, it gets set to VoidTy. 174 const Type *VectorTy; 175 176 /// HadAVector - True if there is at least one vector access to the alloca. 177 /// We don't want to turn random arrays into vectors and use vector element 178 /// insert/extract, but if there are element accesses to something that is 179 /// also declared as a vector, we do want to promote to a vector. 180 bool HadAVector; 181 182public: 183 explicit ConvertToScalarInfo(unsigned Size, const TargetData &td) 184 : AllocaSize(Size), TD(td) { 185 IsNotTrivial = false; 186 VectorTy = 0; 187 HadAVector = false; 188 } 189 190 AllocaInst *TryConvert(AllocaInst *AI); 191 192private: 193 bool CanConvertToScalar(Value *V, uint64_t Offset); 194 void MergeInType(const Type *In, uint64_t Offset); 195 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset); 196 197 Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType, 198 uint64_t Offset, IRBuilder<> &Builder); 199 Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, 200 uint64_t Offset, IRBuilder<> &Builder); 201}; 202} // end anonymous namespace. 203 204 205/// IsVerbotenVectorType - Return true if this is a vector type ScalarRepl isn't 206/// allowed to form. We do this to avoid MMX types, which is a complete hack, 207/// but is required until the backend is fixed. 208static bool IsVerbotenVectorType(const VectorType *VTy, const Instruction *I) { 209 StringRef Triple(I->getParent()->getParent()->getParent()->getTargetTriple()); 210 if (!Triple.startswith("i386") && 211 !Triple.startswith("x86_64")) 212 return false; 213 214 // Reject all the MMX vector types. 215 switch (VTy->getNumElements()) { 216 default: return false; 217 case 1: return VTy->getElementType()->isIntegerTy(64); 218 case 2: return VTy->getElementType()->isIntegerTy(32); 219 case 4: return VTy->getElementType()->isIntegerTy(16); 220 case 8: return VTy->getElementType()->isIntegerTy(8); 221 } 222} 223 224 225/// TryConvert - Analyze the specified alloca, and if it is safe to do so, 226/// rewrite it to be a new alloca which is mem2reg'able. This returns the new 227/// alloca if possible or null if not. 228AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) { 229 // If we can't convert this scalar, or if mem2reg can trivially do it, bail 230 // out. 231 if (!CanConvertToScalar(AI, 0) || !IsNotTrivial) 232 return 0; 233 234 // If we were able to find a vector type that can handle this with 235 // insert/extract elements, and if there was at least one use that had 236 // a vector type, promote this to a vector. We don't want to promote 237 // random stuff that doesn't use vectors (e.g. <9 x double>) because then 238 // we just get a lot of insert/extracts. If at least one vector is 239 // involved, then we probably really do have a union of vector/array. 240 const Type *NewTy; 241 if (VectorTy && VectorTy->isVectorTy() && HadAVector && 242 !IsVerbotenVectorType(cast<VectorType>(VectorTy), AI)) { 243 DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = " 244 << *VectorTy << '\n'); 245 NewTy = VectorTy; // Use the vector type. 246 } else { 247 DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"); 248 // Create and insert the integer alloca. 249 NewTy = IntegerType::get(AI->getContext(), AllocaSize*8); 250 } 251 AllocaInst *NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin()); 252 ConvertUsesToScalar(AI, NewAI, 0); 253 return NewAI; 254} 255 256/// MergeInType - Add the 'In' type to the accumulated vector type (VectorTy) 257/// so far at the offset specified by Offset (which is specified in bytes). 258/// 259/// There are two cases we handle here: 260/// 1) A union of vector types of the same size and potentially its elements. 261/// Here we turn element accesses into insert/extract element operations. 262/// This promotes a <4 x float> with a store of float to the third element 263/// into a <4 x float> that uses insert element. 264/// 2) A fully general blob of memory, which we turn into some (potentially 265/// large) integer type with extract and insert operations where the loads 266/// and stores would mutate the memory. We mark this by setting VectorTy 267/// to VoidTy. 268void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) { 269 // If we already decided to turn this into a blob of integer memory, there is 270 // nothing to be done. 271 if (VectorTy && VectorTy->isVoidTy()) 272 return; 273 274 // If this could be contributing to a vector, analyze it. 275 276 // If the In type is a vector that is the same size as the alloca, see if it 277 // matches the existing VecTy. 278 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) { 279 // Remember if we saw a vector type. 280 HadAVector = true; 281 282 if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { 283 // If we're storing/loading a vector of the right size, allow it as a 284 // vector. If this the first vector we see, remember the type so that 285 // we know the element size. If this is a subsequent access, ignore it 286 // even if it is a differing type but the same size. Worst case we can 287 // bitcast the resultant vectors. 288 if (VectorTy == 0) 289 VectorTy = VInTy; 290 return; 291 } 292 } else if (In->isFloatTy() || In->isDoubleTy() || 293 (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 && 294 isPowerOf2_32(In->getPrimitiveSizeInBits()))) { 295 // If we're accessing something that could be an element of a vector, see 296 // if the implied vector agrees with what we already have and if Offset is 297 // compatible with it. 298 unsigned EltSize = In->getPrimitiveSizeInBits()/8; 299 if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 && 300 (VectorTy == 0 || 301 cast<VectorType>(VectorTy)->getElementType() 302 ->getPrimitiveSizeInBits()/8 == EltSize)) { 303 if (VectorTy == 0) 304 VectorTy = VectorType::get(In, AllocaSize/EltSize); 305 return; 306 } 307 } 308 309 // Otherwise, we have a case that we can't handle with an optimized vector 310 // form. We can still turn this into a large integer. 311 VectorTy = Type::getVoidTy(In->getContext()); 312} 313 314/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all 315/// its accesses to a single vector type, return true and set VecTy to 316/// the new type. If we could convert the alloca into a single promotable 317/// integer, return true but set VecTy to VoidTy. Further, if the use is not a 318/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset 319/// is the current offset from the base of the alloca being analyzed. 320/// 321/// If we see at least one access to the value that is as a vector type, set the 322/// SawVec flag. 323bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) { 324 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 325 Instruction *User = cast<Instruction>(*UI); 326 327 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 328 // Don't break volatile loads. 329 if (LI->isVolatile()) 330 return false; 331 // Don't touch MMX operations. 332 if (LI->getType()->isX86_MMXTy()) 333 return false; 334 MergeInType(LI->getType(), Offset); 335 continue; 336 } 337 338 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 339 // Storing the pointer, not into the value? 340 if (SI->getOperand(0) == V || SI->isVolatile()) return false; 341 // Don't touch MMX operations. 342 if (SI->getOperand(0)->getType()->isX86_MMXTy()) 343 return false; 344 MergeInType(SI->getOperand(0)->getType(), Offset); 345 continue; 346 } 347 348 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 349 IsNotTrivial = true; // Can't be mem2reg'd. 350 if (!CanConvertToScalar(BCI, Offset)) 351 return false; 352 continue; 353 } 354 355 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 356 // If this is a GEP with a variable indices, we can't handle it. 357 if (!GEP->hasAllConstantIndices()) 358 return false; 359 360 // Compute the offset that this GEP adds to the pointer. 361 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 362 uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(), 363 &Indices[0], Indices.size()); 364 // See if all uses can be converted. 365 if (!CanConvertToScalar(GEP, Offset+GEPOffset)) 366 return false; 367 IsNotTrivial = true; // Can't be mem2reg'd. 368 continue; 369 } 370 371 // If this is a constant sized memset of a constant value (e.g. 0) we can 372 // handle it. 373 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 374 // Store of constant value and constant size. 375 if (!isa<ConstantInt>(MSI->getValue()) || 376 !isa<ConstantInt>(MSI->getLength())) 377 return false; 378 IsNotTrivial = true; // Can't be mem2reg'd. 379 continue; 380 } 381 382 // If this is a memcpy or memmove into or out of the whole allocation, we 383 // can handle it like a load or store of the scalar type. 384 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { 385 ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()); 386 if (Len == 0 || Len->getZExtValue() != AllocaSize || Offset != 0) 387 return false; 388 389 IsNotTrivial = true; // Can't be mem2reg'd. 390 continue; 391 } 392 393 // Otherwise, we cannot handle this! 394 return false; 395 } 396 397 return true; 398} 399 400/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 401/// directly. This happens when we are converting an "integer union" to a 402/// single integer scalar, or when we are converting a "vector union" to a 403/// vector with insert/extractelement instructions. 404/// 405/// Offset is an offset from the original alloca, in bits that need to be 406/// shifted to the right. By the end of this, there should be no uses of Ptr. 407void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, 408 uint64_t Offset) { 409 while (!Ptr->use_empty()) { 410 Instruction *User = cast<Instruction>(Ptr->use_back()); 411 412 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 413 ConvertUsesToScalar(CI, NewAI, Offset); 414 CI->eraseFromParent(); 415 continue; 416 } 417 418 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 419 // Compute the offset that this GEP adds to the pointer. 420 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 421 uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(), 422 &Indices[0], Indices.size()); 423 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8); 424 GEP->eraseFromParent(); 425 continue; 426 } 427 428 IRBuilder<> Builder(User); 429 430 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 431 // The load is a bit extract from NewAI shifted right by Offset bits. 432 Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp"); 433 Value *NewLoadVal 434 = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder); 435 LI->replaceAllUsesWith(NewLoadVal); 436 LI->eraseFromParent(); 437 continue; 438 } 439 440 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 441 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 442 Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); 443 Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, 444 Builder); 445 Builder.CreateStore(New, NewAI); 446 SI->eraseFromParent(); 447 448 // If the load we just inserted is now dead, then the inserted store 449 // overwrote the entire thing. 450 if (Old->use_empty()) 451 Old->eraseFromParent(); 452 continue; 453 } 454 455 // If this is a constant sized memset of a constant value (e.g. 0) we can 456 // transform it into a store of the expanded constant value. 457 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 458 assert(MSI->getRawDest() == Ptr && "Consistency error!"); 459 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 460 if (NumBytes != 0) { 461 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); 462 463 // Compute the value replicated the right number of times. 464 APInt APVal(NumBytes*8, Val); 465 466 // Splat the value if non-zero. 467 if (Val) 468 for (unsigned i = 1; i != NumBytes; ++i) 469 APVal |= APVal << 8; 470 471 Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); 472 Value *New = ConvertScalar_InsertValue( 473 ConstantInt::get(User->getContext(), APVal), 474 Old, Offset, Builder); 475 Builder.CreateStore(New, NewAI); 476 477 // If the load we just inserted is now dead, then the memset overwrote 478 // the entire thing. 479 if (Old->use_empty()) 480 Old->eraseFromParent(); 481 } 482 MSI->eraseFromParent(); 483 continue; 484 } 485 486 // If this is a memcpy or memmove into or out of the whole allocation, we 487 // can handle it like a load or store of the scalar type. 488 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { 489 assert(Offset == 0 && "must be store to start of alloca"); 490 491 // If the source and destination are both to the same alloca, then this is 492 // a noop copy-to-self, just delete it. Otherwise, emit a load and store 493 // as appropriate. 494 AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, 0)); 495 496 if (GetUnderlyingObject(MTI->getSource(), 0) != OrigAI) { 497 // Dest must be OrigAI, change this to be a load from the original 498 // pointer (bitcasted), then a store to our new alloca. 499 assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?"); 500 Value *SrcPtr = MTI->getSource(); 501 const PointerType* SPTy = cast<PointerType>(SrcPtr->getType()); 502 const PointerType* AIPTy = cast<PointerType>(NewAI->getType()); 503 if (SPTy->getAddressSpace() != AIPTy->getAddressSpace()) { 504 AIPTy = PointerType::get(AIPTy->getElementType(), 505 SPTy->getAddressSpace()); 506 } 507 SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy); 508 509 LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval"); 510 SrcVal->setAlignment(MTI->getAlignment()); 511 Builder.CreateStore(SrcVal, NewAI); 512 } else if (GetUnderlyingObject(MTI->getDest(), 0) != OrigAI) { 513 // Src must be OrigAI, change this to be a load from NewAI then a store 514 // through the original dest pointer (bitcasted). 515 assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?"); 516 LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval"); 517 518 const PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType()); 519 const PointerType* AIPTy = cast<PointerType>(NewAI->getType()); 520 if (DPTy->getAddressSpace() != AIPTy->getAddressSpace()) { 521 AIPTy = PointerType::get(AIPTy->getElementType(), 522 DPTy->getAddressSpace()); 523 } 524 Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy); 525 526 StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr); 527 NewStore->setAlignment(MTI->getAlignment()); 528 } else { 529 // Noop transfer. Src == Dst 530 } 531 532 MTI->eraseFromParent(); 533 continue; 534 } 535 536 llvm_unreachable("Unsupported operation!"); 537 } 538} 539 540/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer 541/// or vector value FromVal, extracting the bits from the offset specified by 542/// Offset. This returns the value, which is of type ToType. 543/// 544/// This happens when we are converting an "integer union" to a single 545/// integer scalar, or when we are converting a "vector union" to a vector with 546/// insert/extractelement instructions. 547/// 548/// Offset is an offset from the original alloca, in bits that need to be 549/// shifted to the right. 550Value *ConvertToScalarInfo:: 551ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, 552 uint64_t Offset, IRBuilder<> &Builder) { 553 // If the load is of the whole new alloca, no conversion is needed. 554 if (FromVal->getType() == ToType && Offset == 0) 555 return FromVal; 556 557 // If the result alloca is a vector type, this is either an element 558 // access or a bitcast to another vector type of the same size. 559 if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) { 560 if (ToType->isVectorTy()) 561 return Builder.CreateBitCast(FromVal, ToType, "tmp"); 562 563 // Otherwise it must be an element access. 564 unsigned Elt = 0; 565 if (Offset) { 566 unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType()); 567 Elt = Offset/EltSize; 568 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); 569 } 570 // Return the element extracted out of it. 571 Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get( 572 Type::getInt32Ty(FromVal->getContext()), Elt), "tmp"); 573 if (V->getType() != ToType) 574 V = Builder.CreateBitCast(V, ToType, "tmp"); 575 return V; 576 } 577 578 // If ToType is a first class aggregate, extract out each of the pieces and 579 // use insertvalue's to form the FCA. 580 if (const StructType *ST = dyn_cast<StructType>(ToType)) { 581 const StructLayout &Layout = *TD.getStructLayout(ST); 582 Value *Res = UndefValue::get(ST); 583 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 584 Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), 585 Offset+Layout.getElementOffsetInBits(i), 586 Builder); 587 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 588 } 589 return Res; 590 } 591 592 if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) { 593 uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType()); 594 Value *Res = UndefValue::get(AT); 595 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 596 Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), 597 Offset+i*EltSize, Builder); 598 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 599 } 600 return Res; 601 } 602 603 // Otherwise, this must be a union that was converted to an integer value. 604 const IntegerType *NTy = cast<IntegerType>(FromVal->getType()); 605 606 // If this is a big-endian system and the load is narrower than the 607 // full alloca type, we need to do a shift to get the right bits. 608 int ShAmt = 0; 609 if (TD.isBigEndian()) { 610 // On big-endian machines, the lowest bit is stored at the bit offset 611 // from the pointer given by getTypeStoreSizeInBits. This matters for 612 // integers with a bitwidth that is not a multiple of 8. 613 ShAmt = TD.getTypeStoreSizeInBits(NTy) - 614 TD.getTypeStoreSizeInBits(ToType) - Offset; 615 } else { 616 ShAmt = Offset; 617 } 618 619 // Note: we support negative bitwidths (with shl) which are not defined. 620 // We do this to support (f.e.) loads off the end of a structure where 621 // only some bits are used. 622 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 623 FromVal = Builder.CreateLShr(FromVal, 624 ConstantInt::get(FromVal->getType(), 625 ShAmt), "tmp"); 626 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 627 FromVal = Builder.CreateShl(FromVal, 628 ConstantInt::get(FromVal->getType(), 629 -ShAmt), "tmp"); 630 631 // Finally, unconditionally truncate the integer to the right width. 632 unsigned LIBitWidth = TD.getTypeSizeInBits(ToType); 633 if (LIBitWidth < NTy->getBitWidth()) 634 FromVal = 635 Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(), 636 LIBitWidth), "tmp"); 637 else if (LIBitWidth > NTy->getBitWidth()) 638 FromVal = 639 Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(), 640 LIBitWidth), "tmp"); 641 642 // If the result is an integer, this is a trunc or bitcast. 643 if (ToType->isIntegerTy()) { 644 // Should be done. 645 } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) { 646 // Just do a bitcast, we know the sizes match up. 647 FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp"); 648 } else { 649 // Otherwise must be a pointer. 650 FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp"); 651 } 652 assert(FromVal->getType() == ToType && "Didn't convert right?"); 653 return FromVal; 654} 655 656/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer 657/// or vector value "Old" at the offset specified by Offset. 658/// 659/// This happens when we are converting an "integer union" to a 660/// single integer scalar, or when we are converting a "vector union" to a 661/// vector with insert/extractelement instructions. 662/// 663/// Offset is an offset from the original alloca, in bits that need to be 664/// shifted to the right. 665Value *ConvertToScalarInfo:: 666ConvertScalar_InsertValue(Value *SV, Value *Old, 667 uint64_t Offset, IRBuilder<> &Builder) { 668 // Convert the stored type to the actual type, shift it left to insert 669 // then 'or' into place. 670 const Type *AllocaType = Old->getType(); 671 LLVMContext &Context = Old->getContext(); 672 673 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { 674 uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy); 675 uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType()); 676 677 // Changing the whole vector with memset or with an access of a different 678 // vector type? 679 if (ValSize == VecSize) 680 return Builder.CreateBitCast(SV, AllocaType, "tmp"); 681 682 uint64_t EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType()); 683 684 // Must be an element insertion. 685 unsigned Elt = Offset/EltSize; 686 687 if (SV->getType() != VTy->getElementType()) 688 SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp"); 689 690 SV = Builder.CreateInsertElement(Old, SV, 691 ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt), 692 "tmp"); 693 return SV; 694 } 695 696 // If SV is a first-class aggregate value, insert each value recursively. 697 if (const StructType *ST = dyn_cast<StructType>(SV->getType())) { 698 const StructLayout &Layout = *TD.getStructLayout(ST); 699 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 700 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 701 Old = ConvertScalar_InsertValue(Elt, Old, 702 Offset+Layout.getElementOffsetInBits(i), 703 Builder); 704 } 705 return Old; 706 } 707 708 if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { 709 uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType()); 710 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 711 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 712 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder); 713 } 714 return Old; 715 } 716 717 // If SV is a float, convert it to the appropriate integer type. 718 // If it is a pointer, do the same. 719 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType()); 720 unsigned DestWidth = TD.getTypeSizeInBits(AllocaType); 721 unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType()); 722 unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType); 723 if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy()) 724 SV = Builder.CreateBitCast(SV, 725 IntegerType::get(SV->getContext(),SrcWidth), "tmp"); 726 else if (SV->getType()->isPointerTy()) 727 SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()), "tmp"); 728 729 // Zero extend or truncate the value if needed. 730 if (SV->getType() != AllocaType) { 731 if (SV->getType()->getPrimitiveSizeInBits() < 732 AllocaType->getPrimitiveSizeInBits()) 733 SV = Builder.CreateZExt(SV, AllocaType, "tmp"); 734 else { 735 // Truncation may be needed if storing more than the alloca can hold 736 // (undefined behavior). 737 SV = Builder.CreateTrunc(SV, AllocaType, "tmp"); 738 SrcWidth = DestWidth; 739 SrcStoreWidth = DestStoreWidth; 740 } 741 } 742 743 // If this is a big-endian system and the store is narrower than the 744 // full alloca type, we need to do a shift to get the right bits. 745 int ShAmt = 0; 746 if (TD.isBigEndian()) { 747 // On big-endian machines, the lowest bit is stored at the bit offset 748 // from the pointer given by getTypeStoreSizeInBits. This matters for 749 // integers with a bitwidth that is not a multiple of 8. 750 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 751 } else { 752 ShAmt = Offset; 753 } 754 755 // Note: we support negative bitwidths (with shr) which are not defined. 756 // We do this to support (f.e.) stores off the end of a structure where 757 // only some bits in the structure are set. 758 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 759 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 760 SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), 761 ShAmt), "tmp"); 762 Mask <<= ShAmt; 763 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 764 SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), 765 -ShAmt), "tmp"); 766 Mask = Mask.lshr(-ShAmt); 767 } 768 769 // Mask out the bits we are about to insert from the old value, and or 770 // in the new bits. 771 if (SrcWidth != DestWidth) { 772 assert(DestWidth > SrcWidth); 773 Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask"); 774 SV = Builder.CreateOr(Old, SV, "ins"); 775 } 776 return SV; 777} 778 779 780//===----------------------------------------------------------------------===// 781// SRoA Driver 782//===----------------------------------------------------------------------===// 783 784 785bool SROA::runOnFunction(Function &F) { 786 TD = getAnalysisIfAvailable<TargetData>(); 787 788 bool Changed = performPromotion(F); 789 790 // FIXME: ScalarRepl currently depends on TargetData more than it 791 // theoretically needs to. It should be refactored in order to support 792 // target-independent IR. Until this is done, just skip the actual 793 // scalar-replacement portion of this pass. 794 if (!TD) return Changed; 795 796 while (1) { 797 bool LocalChange = performScalarRepl(F); 798 if (!LocalChange) break; // No need to repromote if no scalarrepl 799 Changed = true; 800 LocalChange = performPromotion(F); 801 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 802 } 803 804 return Changed; 805} 806 807 808bool SROA::performPromotion(Function &F) { 809 std::vector<AllocaInst*> Allocas; 810 DominatorTree &DT = getAnalysis<DominatorTree>(); 811 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 812 813 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 814 815 bool Changed = false; 816 817 while (1) { 818 Allocas.clear(); 819 820 // Find allocas that are safe to promote, by looking at all instructions in 821 // the entry node 822 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 823 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 824 if (isAllocaPromotable(AI)) 825 Allocas.push_back(AI); 826 827 if (Allocas.empty()) break; 828 829 PromoteMemToReg(Allocas, DT, DF); 830 NumPromoted += Allocas.size(); 831 Changed = true; 832 } 833 834 return Changed; 835} 836 837 838/// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for 839/// SROA. It must be a struct or array type with a small number of elements. 840static bool ShouldAttemptScalarRepl(AllocaInst *AI) { 841 const Type *T = AI->getAllocatedType(); 842 // Do not promote any struct into more than 32 separate vars. 843 if (const StructType *ST = dyn_cast<StructType>(T)) 844 return ST->getNumElements() <= 32; 845 // Arrays are much less likely to be safe for SROA; only consider 846 // them if they are very small. 847 if (const ArrayType *AT = dyn_cast<ArrayType>(T)) 848 return AT->getNumElements() <= 8; 849 return false; 850} 851 852 853// performScalarRepl - This algorithm is a simple worklist driven algorithm, 854// which runs on all of the malloc/alloca instructions in the function, removing 855// them if they are only used by getelementptr instructions. 856// 857bool SROA::performScalarRepl(Function &F) { 858 std::vector<AllocaInst*> WorkList; 859 860 // Scan the entry basic block, adding allocas to the worklist. 861 BasicBlock &BB = F.getEntryBlock(); 862 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 863 if (AllocaInst *A = dyn_cast<AllocaInst>(I)) 864 WorkList.push_back(A); 865 866 // Process the worklist 867 bool Changed = false; 868 while (!WorkList.empty()) { 869 AllocaInst *AI = WorkList.back(); 870 WorkList.pop_back(); 871 872 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 873 // with unused elements. 874 if (AI->use_empty()) { 875 AI->eraseFromParent(); 876 Changed = true; 877 continue; 878 } 879 880 // If this alloca is impossible for us to promote, reject it early. 881 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) 882 continue; 883 884 // Check to see if this allocation is only modified by a memcpy/memmove from 885 // a constant global. If this is the case, we can change all users to use 886 // the constant global instead. This is commonly produced by the CFE by 887 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 888 // is only subsequently read. 889 if (MemTransferInst *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 890 DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n'); 891 DEBUG(dbgs() << " memcpy = " << *TheCopy << '\n'); 892 Constant *TheSrc = cast<Constant>(TheCopy->getSource()); 893 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 894 TheCopy->eraseFromParent(); // Don't mutate the global. 895 AI->eraseFromParent(); 896 ++NumGlobals; 897 Changed = true; 898 continue; 899 } 900 901 // Check to see if we can perform the core SROA transformation. We cannot 902 // transform the allocation instruction if it is an array allocation 903 // (allocations OF arrays are ok though), and an allocation of a scalar 904 // value cannot be decomposed at all. 905 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 906 907 // Do not promote [0 x %struct]. 908 if (AllocaSize == 0) continue; 909 910 // Do not promote any struct whose size is too big. 911 if (AllocaSize > SRThreshold) continue; 912 913 // If the alloca looks like a good candidate for scalar replacement, and if 914 // all its users can be transformed, then split up the aggregate into its 915 // separate elements. 916 if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) { 917 DoScalarReplacement(AI, WorkList); 918 Changed = true; 919 continue; 920 } 921 922 // If we can turn this aggregate value (potentially with casts) into a 923 // simple scalar value that can be mem2reg'd into a register value. 924 // IsNotTrivial tracks whether this is something that mem2reg could have 925 // promoted itself. If so, we don't want to transform it needlessly. Note 926 // that we can't just check based on the type: the alloca may be of an i32 927 // but that has pointer arithmetic to set byte 3 of it or something. 928 if (AllocaInst *NewAI = 929 ConvertToScalarInfo((unsigned)AllocaSize, *TD).TryConvert(AI)) { 930 NewAI->takeName(AI); 931 AI->eraseFromParent(); 932 ++NumConverted; 933 Changed = true; 934 continue; 935 } 936 937 // Otherwise, couldn't process this alloca. 938 } 939 940 return Changed; 941} 942 943/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 944/// predicate, do SROA now. 945void SROA::DoScalarReplacement(AllocaInst *AI, 946 std::vector<AllocaInst*> &WorkList) { 947 DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n'); 948 SmallVector<AllocaInst*, 32> ElementAllocas; 949 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 950 ElementAllocas.reserve(ST->getNumContainedTypes()); 951 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 952 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 953 AI->getAlignment(), 954 AI->getName() + "." + Twine(i), AI); 955 ElementAllocas.push_back(NA); 956 WorkList.push_back(NA); // Add to worklist for recursive processing 957 } 958 } else { 959 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 960 ElementAllocas.reserve(AT->getNumElements()); 961 const Type *ElTy = AT->getElementType(); 962 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 963 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 964 AI->getName() + "." + Twine(i), AI); 965 ElementAllocas.push_back(NA); 966 WorkList.push_back(NA); // Add to worklist for recursive processing 967 } 968 } 969 970 // Now that we have created the new alloca instructions, rewrite all the 971 // uses of the old alloca. 972 RewriteForScalarRepl(AI, AI, 0, ElementAllocas); 973 974 // Now erase any instructions that were made dead while rewriting the alloca. 975 DeleteDeadInstructions(); 976 AI->eraseFromParent(); 977 978 ++NumReplaced; 979} 980 981/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list, 982/// recursively including all their operands that become trivially dead. 983void SROA::DeleteDeadInstructions() { 984 while (!DeadInsts.empty()) { 985 Instruction *I = cast<Instruction>(DeadInsts.pop_back_val()); 986 987 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 988 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 989 // Zero out the operand and see if it becomes trivially dead. 990 // (But, don't add allocas to the dead instruction list -- they are 991 // already on the worklist and will be deleted separately.) 992 *OI = 0; 993 if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U)) 994 DeadInsts.push_back(U); 995 } 996 997 I->eraseFromParent(); 998 } 999} 1000 1001/// isSafeForScalarRepl - Check if instruction I is a safe use with regard to 1002/// performing scalar replacement of alloca AI. The results are flagged in 1003/// the Info parameter. Offset indicates the position within AI that is 1004/// referenced by this instruction. 1005void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, 1006 AllocaInfo &Info) { 1007 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) { 1008 Instruction *User = cast<Instruction>(*UI); 1009 1010 if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { 1011 isSafeForScalarRepl(BC, AI, Offset, Info); 1012 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1013 uint64_t GEPOffset = Offset; 1014 isSafeGEP(GEPI, AI, GEPOffset, Info); 1015 if (!Info.isUnsafe) 1016 isSafeForScalarRepl(GEPI, AI, GEPOffset, Info); 1017 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 1018 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1019 if (Length) 1020 isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0, 1021 UI.getOperandNo() == 0, Info); 1022 else 1023 MarkUnsafe(Info); 1024 } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1025 if (!LI->isVolatile()) { 1026 const Type *LIType = LI->getType(); 1027 isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(LIType), 1028 LIType, false, Info); 1029 } else 1030 MarkUnsafe(Info); 1031 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1032 // Store is ok if storing INTO the pointer, not storing the pointer 1033 if (!SI->isVolatile() && SI->getOperand(0) != I) { 1034 const Type *SIType = SI->getOperand(0)->getType(); 1035 isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(SIType), 1036 SIType, true, Info); 1037 } else 1038 MarkUnsafe(Info); 1039 } else { 1040 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); 1041 MarkUnsafe(Info); 1042 } 1043 if (Info.isUnsafe) return; 1044 } 1045} 1046 1047/// isSafeGEP - Check if a GEP instruction can be handled for scalar 1048/// replacement. It is safe when all the indices are constant, in-bounds 1049/// references, and when the resulting offset corresponds to an element within 1050/// the alloca type. The results are flagged in the Info parameter. Upon 1051/// return, Offset is adjusted as specified by the GEP indices. 1052void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, 1053 uint64_t &Offset, AllocaInfo &Info) { 1054 gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI); 1055 if (GEPIt == E) 1056 return; 1057 1058 // Walk through the GEP type indices, checking the types that this indexes 1059 // into. 1060 for (; GEPIt != E; ++GEPIt) { 1061 // Ignore struct elements, no extra checking needed for these. 1062 if ((*GEPIt)->isStructTy()) 1063 continue; 1064 1065 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); 1066 if (!IdxVal) 1067 return MarkUnsafe(Info); 1068 } 1069 1070 // Compute the offset due to this GEP and check if the alloca has a 1071 // component element at that offset. 1072 SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); 1073 Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), 1074 &Indices[0], Indices.size()); 1075 if (!TypeHasComponent(AI->getAllocatedType(), Offset, 0)) 1076 MarkUnsafe(Info); 1077} 1078 1079/// isHomogeneousAggregate - Check if type T is a struct or array containing 1080/// elements of the same type (which is always true for arrays). If so, 1081/// return true with NumElts and EltTy set to the number of elements and the 1082/// element type, respectively. 1083static bool isHomogeneousAggregate(const Type *T, unsigned &NumElts, 1084 const Type *&EltTy) { 1085 if (const ArrayType *AT = dyn_cast<ArrayType>(T)) { 1086 NumElts = AT->getNumElements(); 1087 EltTy = AT->getElementType(); 1088 return true; 1089 } 1090 if (const StructType *ST = dyn_cast<StructType>(T)) { 1091 NumElts = ST->getNumContainedTypes(); 1092 EltTy = ST->getContainedType(0); 1093 for (unsigned n = 1; n < NumElts; ++n) { 1094 if (ST->getContainedType(n) != EltTy) 1095 return false; 1096 } 1097 return true; 1098 } 1099 return false; 1100} 1101 1102/// isCompatibleAggregate - Check if T1 and T2 are either the same type or are 1103/// "homogeneous" aggregates with the same element type and number of elements. 1104static bool isCompatibleAggregate(const Type *T1, const Type *T2) { 1105 if (T1 == T2) 1106 return true; 1107 1108 unsigned NumElts1, NumElts2; 1109 const Type *EltTy1, *EltTy2; 1110 if (isHomogeneousAggregate(T1, NumElts1, EltTy1) && 1111 isHomogeneousAggregate(T2, NumElts2, EltTy2) && 1112 NumElts1 == NumElts2 && 1113 EltTy1 == EltTy2) 1114 return true; 1115 1116 return false; 1117} 1118 1119/// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI 1120/// alloca or has an offset and size that corresponds to a component element 1121/// within it. The offset checked here may have been formed from a GEP with a 1122/// pointer bitcasted to a different type. 1123void SROA::isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize, 1124 const Type *MemOpType, bool isStore, 1125 AllocaInfo &Info) { 1126 // Check if this is a load/store of the entire alloca. 1127 if (Offset == 0 && MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) { 1128 // This can be safe for MemIntrinsics (where MemOpType is 0) and integer 1129 // loads/stores (which are essentially the same as the MemIntrinsics with 1130 // regard to copying padding between elements). But, if an alloca is 1131 // flagged as both a source and destination of such operations, we'll need 1132 // to check later for padding between elements. 1133 if (!MemOpType || MemOpType->isIntegerTy()) { 1134 if (isStore) 1135 Info.isMemCpyDst = true; 1136 else 1137 Info.isMemCpySrc = true; 1138 return; 1139 } 1140 // This is also safe for references using a type that is compatible with 1141 // the type of the alloca, so that loads/stores can be rewritten using 1142 // insertvalue/extractvalue. 1143 if (isCompatibleAggregate(MemOpType, AI->getAllocatedType())) 1144 return; 1145 } 1146 // Check if the offset/size correspond to a component within the alloca type. 1147 const Type *T = AI->getAllocatedType(); 1148 if (TypeHasComponent(T, Offset, MemSize)) 1149 return; 1150 1151 return MarkUnsafe(Info); 1152} 1153 1154/// TypeHasComponent - Return true if T has a component type with the 1155/// specified offset and size. If Size is zero, do not check the size. 1156bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) { 1157 const Type *EltTy; 1158 uint64_t EltSize; 1159 if (const StructType *ST = dyn_cast<StructType>(T)) { 1160 const StructLayout *Layout = TD->getStructLayout(ST); 1161 unsigned EltIdx = Layout->getElementContainingOffset(Offset); 1162 EltTy = ST->getContainedType(EltIdx); 1163 EltSize = TD->getTypeAllocSize(EltTy); 1164 Offset -= Layout->getElementOffset(EltIdx); 1165 } else if (const ArrayType *AT = dyn_cast<ArrayType>(T)) { 1166 EltTy = AT->getElementType(); 1167 EltSize = TD->getTypeAllocSize(EltTy); 1168 if (Offset >= AT->getNumElements() * EltSize) 1169 return false; 1170 Offset %= EltSize; 1171 } else { 1172 return false; 1173 } 1174 if (Offset == 0 && (Size == 0 || EltSize == Size)) 1175 return true; 1176 // Check if the component spans multiple elements. 1177 if (Offset + Size > EltSize) 1178 return false; 1179 return TypeHasComponent(EltTy, Offset, Size); 1180} 1181 1182/// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite 1183/// the instruction I, which references it, to use the separate elements. 1184/// Offset indicates the position within AI that is referenced by this 1185/// instruction. 1186void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, 1187 SmallVector<AllocaInst*, 32> &NewElts) { 1188 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) { 1189 Instruction *User = cast<Instruction>(*UI); 1190 1191 if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { 1192 RewriteBitCast(BC, AI, Offset, NewElts); 1193 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1194 RewriteGEP(GEPI, AI, Offset, NewElts); 1195 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 1196 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1197 uint64_t MemSize = Length->getZExtValue(); 1198 if (Offset == 0 && 1199 MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) 1200 RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts); 1201 // Otherwise the intrinsic can only touch a single element and the 1202 // address operand will be updated, so nothing else needs to be done. 1203 } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1204 const Type *LIType = LI->getType(); 1205 if (isCompatibleAggregate(LIType, AI->getAllocatedType())) { 1206 // Replace: 1207 // %res = load { i32, i32 }* %alloc 1208 // with: 1209 // %load.0 = load i32* %alloc.0 1210 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 1211 // %load.1 = load i32* %alloc.1 1212 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 1213 // (Also works for arrays instead of structs) 1214 Value *Insert = UndefValue::get(LIType); 1215 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1216 Value *Load = new LoadInst(NewElts[i], "load", LI); 1217 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); 1218 } 1219 LI->replaceAllUsesWith(Insert); 1220 DeadInsts.push_back(LI); 1221 } else if (LIType->isIntegerTy() && 1222 TD->getTypeAllocSize(LIType) == 1223 TD->getTypeAllocSize(AI->getAllocatedType())) { 1224 // If this is a load of the entire alloca to an integer, rewrite it. 1225 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); 1226 } 1227 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1228 Value *Val = SI->getOperand(0); 1229 const Type *SIType = Val->getType(); 1230 if (isCompatibleAggregate(SIType, AI->getAllocatedType())) { 1231 // Replace: 1232 // store { i32, i32 } %val, { i32, i32 }* %alloc 1233 // with: 1234 // %val.0 = extractvalue { i32, i32 } %val, 0 1235 // store i32 %val.0, i32* %alloc.0 1236 // %val.1 = extractvalue { i32, i32 } %val, 1 1237 // store i32 %val.1, i32* %alloc.1 1238 // (Also works for arrays instead of structs) 1239 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1240 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); 1241 new StoreInst(Extract, NewElts[i], SI); 1242 } 1243 DeadInsts.push_back(SI); 1244 } else if (SIType->isIntegerTy() && 1245 TD->getTypeAllocSize(SIType) == 1246 TD->getTypeAllocSize(AI->getAllocatedType())) { 1247 // If this is a store of the entire alloca from an integer, rewrite it. 1248 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); 1249 } 1250 } 1251 } 1252} 1253 1254/// RewriteBitCast - Update a bitcast reference to the alloca being replaced 1255/// and recursively continue updating all of its uses. 1256void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, 1257 SmallVector<AllocaInst*, 32> &NewElts) { 1258 RewriteForScalarRepl(BC, AI, Offset, NewElts); 1259 if (BC->getOperand(0) != AI) 1260 return; 1261 1262 // The bitcast references the original alloca. Replace its uses with 1263 // references to the first new element alloca. 1264 Instruction *Val = NewElts[0]; 1265 if (Val->getType() != BC->getDestTy()) { 1266 Val = new BitCastInst(Val, BC->getDestTy(), "", BC); 1267 Val->takeName(BC); 1268 } 1269 BC->replaceAllUsesWith(Val); 1270 DeadInsts.push_back(BC); 1271} 1272 1273/// FindElementAndOffset - Return the index of the element containing Offset 1274/// within the specified type, which must be either a struct or an array. 1275/// Sets T to the type of the element and Offset to the offset within that 1276/// element. IdxTy is set to the type of the index result to be used in a 1277/// GEP instruction. 1278uint64_t SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset, 1279 const Type *&IdxTy) { 1280 uint64_t Idx = 0; 1281 if (const StructType *ST = dyn_cast<StructType>(T)) { 1282 const StructLayout *Layout = TD->getStructLayout(ST); 1283 Idx = Layout->getElementContainingOffset(Offset); 1284 T = ST->getContainedType(Idx); 1285 Offset -= Layout->getElementOffset(Idx); 1286 IdxTy = Type::getInt32Ty(T->getContext()); 1287 return Idx; 1288 } 1289 const ArrayType *AT = cast<ArrayType>(T); 1290 T = AT->getElementType(); 1291 uint64_t EltSize = TD->getTypeAllocSize(T); 1292 Idx = Offset / EltSize; 1293 Offset -= Idx * EltSize; 1294 IdxTy = Type::getInt64Ty(T->getContext()); 1295 return Idx; 1296} 1297 1298/// RewriteGEP - Check if this GEP instruction moves the pointer across 1299/// elements of the alloca that are being split apart, and if so, rewrite 1300/// the GEP to be relative to the new element. 1301void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, 1302 SmallVector<AllocaInst*, 32> &NewElts) { 1303 uint64_t OldOffset = Offset; 1304 SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); 1305 Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), 1306 &Indices[0], Indices.size()); 1307 1308 RewriteForScalarRepl(GEPI, AI, Offset, NewElts); 1309 1310 const Type *T = AI->getAllocatedType(); 1311 const Type *IdxTy; 1312 uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy); 1313 if (GEPI->getOperand(0) == AI) 1314 OldIdx = ~0ULL; // Force the GEP to be rewritten. 1315 1316 T = AI->getAllocatedType(); 1317 uint64_t EltOffset = Offset; 1318 uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy); 1319 1320 // If this GEP does not move the pointer across elements of the alloca 1321 // being split, then it does not needs to be rewritten. 1322 if (Idx == OldIdx) 1323 return; 1324 1325 const Type *i32Ty = Type::getInt32Ty(AI->getContext()); 1326 SmallVector<Value*, 8> NewArgs; 1327 NewArgs.push_back(Constant::getNullValue(i32Ty)); 1328 while (EltOffset != 0) { 1329 uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy); 1330 NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx)); 1331 } 1332 Instruction *Val = NewElts[Idx]; 1333 if (NewArgs.size() > 1) { 1334 Val = GetElementPtrInst::CreateInBounds(Val, NewArgs.begin(), 1335 NewArgs.end(), "", GEPI); 1336 Val->takeName(GEPI); 1337 } 1338 if (Val->getType() != GEPI->getType()) 1339 Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI); 1340 GEPI->replaceAllUsesWith(Val); 1341 DeadInsts.push_back(GEPI); 1342} 1343 1344/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. 1345/// Rewrite it to copy or set the elements of the scalarized memory. 1346void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, 1347 AllocaInst *AI, 1348 SmallVector<AllocaInst*, 32> &NewElts) { 1349 // If this is a memcpy/memmove, construct the other pointer as the 1350 // appropriate type. The "Other" pointer is the pointer that goes to memory 1351 // that doesn't have anything to do with the alloca that we are promoting. For 1352 // memset, this Value* stays null. 1353 Value *OtherPtr = 0; 1354 unsigned MemAlignment = MI->getAlignment(); 1355 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy 1356 if (Inst == MTI->getRawDest()) 1357 OtherPtr = MTI->getRawSource(); 1358 else { 1359 assert(Inst == MTI->getRawSource()); 1360 OtherPtr = MTI->getRawDest(); 1361 } 1362 } 1363 1364 // If there is an other pointer, we want to convert it to the same pointer 1365 // type as AI has, so we can GEP through it safely. 1366 if (OtherPtr) { 1367 unsigned AddrSpace = 1368 cast<PointerType>(OtherPtr->getType())->getAddressSpace(); 1369 1370 // Remove bitcasts and all-zero GEPs from OtherPtr. This is an 1371 // optimization, but it's also required to detect the corner case where 1372 // both pointer operands are referencing the same memory, and where 1373 // OtherPtr may be a bitcast or GEP that currently being rewritten. (This 1374 // function is only called for mem intrinsics that access the whole 1375 // aggregate, so non-zero GEPs are not an issue here.) 1376 OtherPtr = OtherPtr->stripPointerCasts(); 1377 1378 // Copying the alloca to itself is a no-op: just delete it. 1379 if (OtherPtr == AI || OtherPtr == NewElts[0]) { 1380 // This code will run twice for a no-op memcpy -- once for each operand. 1381 // Put only one reference to MI on the DeadInsts list. 1382 for (SmallVector<Value*, 32>::const_iterator I = DeadInsts.begin(), 1383 E = DeadInsts.end(); I != E; ++I) 1384 if (*I == MI) return; 1385 DeadInsts.push_back(MI); 1386 return; 1387 } 1388 1389 // If the pointer is not the right type, insert a bitcast to the right 1390 // type. 1391 const Type *NewTy = 1392 PointerType::get(AI->getType()->getElementType(), AddrSpace); 1393 1394 if (OtherPtr->getType() != NewTy) 1395 OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI); 1396 } 1397 1398 // Process each element of the aggregate. 1399 bool SROADest = MI->getRawDest() == Inst; 1400 1401 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext())); 1402 1403 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1404 // If this is a memcpy/memmove, emit a GEP of the other element address. 1405 Value *OtherElt = 0; 1406 unsigned OtherEltAlign = MemAlignment; 1407 1408 if (OtherPtr) { 1409 Value *Idx[2] = { Zero, 1410 ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) }; 1411 OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, Idx + 2, 1412 OtherPtr->getName()+"."+Twine(i), 1413 MI); 1414 uint64_t EltOffset; 1415 const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); 1416 const Type *OtherTy = OtherPtrTy->getElementType(); 1417 if (const StructType *ST = dyn_cast<StructType>(OtherTy)) { 1418 EltOffset = TD->getStructLayout(ST)->getElementOffset(i); 1419 } else { 1420 const Type *EltTy = cast<SequentialType>(OtherTy)->getElementType(); 1421 EltOffset = TD->getTypeAllocSize(EltTy)*i; 1422 } 1423 1424 // The alignment of the other pointer is the guaranteed alignment of the 1425 // element, which is affected by both the known alignment of the whole 1426 // mem intrinsic and the alignment of the element. If the alignment of 1427 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the 1428 // known alignment is just 4 bytes. 1429 OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset); 1430 } 1431 1432 Value *EltPtr = NewElts[i]; 1433 const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType(); 1434 1435 // If we got down to a scalar, insert a load or store as appropriate. 1436 if (EltTy->isSingleValueType()) { 1437 if (isa<MemTransferInst>(MI)) { 1438 if (SROADest) { 1439 // From Other to Alloca. 1440 Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI); 1441 new StoreInst(Elt, EltPtr, MI); 1442 } else { 1443 // From Alloca to Other. 1444 Value *Elt = new LoadInst(EltPtr, "tmp", MI); 1445 new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI); 1446 } 1447 continue; 1448 } 1449 assert(isa<MemSetInst>(MI)); 1450 1451 // If the stored element is zero (common case), just store a null 1452 // constant. 1453 Constant *StoreVal; 1454 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) { 1455 if (CI->isZero()) { 1456 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 1457 } else { 1458 // If EltTy is a vector type, get the element type. 1459 const Type *ValTy = EltTy->getScalarType(); 1460 1461 // Construct an integer with the right value. 1462 unsigned EltSize = TD->getTypeSizeInBits(ValTy); 1463 APInt OneVal(EltSize, CI->getZExtValue()); 1464 APInt TotalVal(OneVal); 1465 // Set each byte. 1466 for (unsigned i = 0; 8*i < EltSize; ++i) { 1467 TotalVal = TotalVal.shl(8); 1468 TotalVal |= OneVal; 1469 } 1470 1471 // Convert the integer value to the appropriate type. 1472 StoreVal = ConstantInt::get(CI->getContext(), TotalVal); 1473 if (ValTy->isPointerTy()) 1474 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 1475 else if (ValTy->isFloatingPointTy()) 1476 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 1477 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 1478 1479 // If the requested value was a vector constant, create it. 1480 if (EltTy != ValTy) { 1481 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 1482 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 1483 StoreVal = ConstantVector::get(&Elts[0], NumElts); 1484 } 1485 } 1486 new StoreInst(StoreVal, EltPtr, MI); 1487 continue; 1488 } 1489 // Otherwise, if we're storing a byte variable, use a memset call for 1490 // this element. 1491 } 1492 1493 unsigned EltSize = TD->getTypeAllocSize(EltTy); 1494 1495 IRBuilder<> Builder(MI); 1496 1497 // Finally, insert the meminst for this element. 1498 if (isa<MemSetInst>(MI)) { 1499 Builder.CreateMemSet(EltPtr, MI->getArgOperand(1), EltSize, 1500 MI->isVolatile()); 1501 } else { 1502 assert(isa<MemTransferInst>(MI)); 1503 Value *Dst = SROADest ? EltPtr : OtherElt; // Dest ptr 1504 Value *Src = SROADest ? OtherElt : EltPtr; // Src ptr 1505 1506 if (isa<MemCpyInst>(MI)) 1507 Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign,MI->isVolatile()); 1508 else 1509 Builder.CreateMemMove(Dst, Src, EltSize,OtherEltAlign,MI->isVolatile()); 1510 } 1511 } 1512 DeadInsts.push_back(MI); 1513} 1514 1515/// RewriteStoreUserOfWholeAlloca - We found a store of an integer that 1516/// overwrites the entire allocation. Extract out the pieces of the stored 1517/// integer and store them individually. 1518void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, 1519 SmallVector<AllocaInst*, 32> &NewElts){ 1520 // Extract each element out of the integer according to its structure offset 1521 // and store the element value to the individual alloca. 1522 Value *SrcVal = SI->getOperand(0); 1523 const Type *AllocaEltTy = AI->getAllocatedType(); 1524 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); 1525 1526 // Handle tail padding by extending the operand 1527 if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) 1528 SrcVal = new ZExtInst(SrcVal, 1529 IntegerType::get(SI->getContext(), AllocaSizeBits), 1530 "", SI); 1531 1532 DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI 1533 << '\n'); 1534 1535 // There are two forms here: AI could be an array or struct. Both cases 1536 // have different ways to compute the element offset. 1537 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 1538 const StructLayout *Layout = TD->getStructLayout(EltSTy); 1539 1540 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1541 // Get the number of bits to shift SrcVal to get the value. 1542 const Type *FieldTy = EltSTy->getElementType(i); 1543 uint64_t Shift = Layout->getElementOffsetInBits(i); 1544 1545 if (TD->isBigEndian()) 1546 Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy); 1547 1548 Value *EltVal = SrcVal; 1549 if (Shift) { 1550 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 1551 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 1552 "sroa.store.elt", SI); 1553 } 1554 1555 // Truncate down to an integer of the right size. 1556 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 1557 1558 // Ignore zero sized fields like {}, they obviously contain no data. 1559 if (FieldSizeBits == 0) continue; 1560 1561 if (FieldSizeBits != AllocaSizeBits) 1562 EltVal = new TruncInst(EltVal, 1563 IntegerType::get(SI->getContext(), FieldSizeBits), 1564 "", SI); 1565 Value *DestField = NewElts[i]; 1566 if (EltVal->getType() == FieldTy) { 1567 // Storing to an integer field of this size, just do it. 1568 } else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) { 1569 // Bitcast to the right element type (for fp/vector values). 1570 EltVal = new BitCastInst(EltVal, FieldTy, "", SI); 1571 } else { 1572 // Otherwise, bitcast the dest pointer (for aggregates). 1573 DestField = new BitCastInst(DestField, 1574 PointerType::getUnqual(EltVal->getType()), 1575 "", SI); 1576 } 1577 new StoreInst(EltVal, DestField, SI); 1578 } 1579 1580 } else { 1581 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy); 1582 const Type *ArrayEltTy = ATy->getElementType(); 1583 uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); 1584 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); 1585 1586 uint64_t Shift; 1587 1588 if (TD->isBigEndian()) 1589 Shift = AllocaSizeBits-ElementOffset; 1590 else 1591 Shift = 0; 1592 1593 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1594 // Ignore zero sized fields like {}, they obviously contain no data. 1595 if (ElementSizeBits == 0) continue; 1596 1597 Value *EltVal = SrcVal; 1598 if (Shift) { 1599 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 1600 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 1601 "sroa.store.elt", SI); 1602 } 1603 1604 // Truncate down to an integer of the right size. 1605 if (ElementSizeBits != AllocaSizeBits) 1606 EltVal = new TruncInst(EltVal, 1607 IntegerType::get(SI->getContext(), 1608 ElementSizeBits),"",SI); 1609 Value *DestField = NewElts[i]; 1610 if (EltVal->getType() == ArrayEltTy) { 1611 // Storing to an integer field of this size, just do it. 1612 } else if (ArrayEltTy->isFloatingPointTy() || 1613 ArrayEltTy->isVectorTy()) { 1614 // Bitcast to the right element type (for fp/vector values). 1615 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI); 1616 } else { 1617 // Otherwise, bitcast the dest pointer (for aggregates). 1618 DestField = new BitCastInst(DestField, 1619 PointerType::getUnqual(EltVal->getType()), 1620 "", SI); 1621 } 1622 new StoreInst(EltVal, DestField, SI); 1623 1624 if (TD->isBigEndian()) 1625 Shift -= ElementOffset; 1626 else 1627 Shift += ElementOffset; 1628 } 1629 } 1630 1631 DeadInsts.push_back(SI); 1632} 1633 1634/// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to 1635/// an integer. Load the individual pieces to form the aggregate value. 1636void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, 1637 SmallVector<AllocaInst*, 32> &NewElts) { 1638 // Extract each element out of the NewElts according to its structure offset 1639 // and form the result value. 1640 const Type *AllocaEltTy = AI->getAllocatedType(); 1641 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); 1642 1643 DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI 1644 << '\n'); 1645 1646 // There are two forms here: AI could be an array or struct. Both cases 1647 // have different ways to compute the element offset. 1648 const StructLayout *Layout = 0; 1649 uint64_t ArrayEltBitOffset = 0; 1650 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 1651 Layout = TD->getStructLayout(EltSTy); 1652 } else { 1653 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); 1654 ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); 1655 } 1656 1657 Value *ResultVal = 1658 Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits)); 1659 1660 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1661 // Load the value from the alloca. If the NewElt is an aggregate, cast 1662 // the pointer to an integer of the same size before doing the load. 1663 Value *SrcField = NewElts[i]; 1664 const Type *FieldTy = 1665 cast<PointerType>(SrcField->getType())->getElementType(); 1666 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 1667 1668 // Ignore zero sized fields like {}, they obviously contain no data. 1669 if (FieldSizeBits == 0) continue; 1670 1671 const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(), 1672 FieldSizeBits); 1673 if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() && 1674 !FieldTy->isVectorTy()) 1675 SrcField = new BitCastInst(SrcField, 1676 PointerType::getUnqual(FieldIntTy), 1677 "", LI); 1678 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); 1679 1680 // If SrcField is a fp or vector of the right size but that isn't an 1681 // integer type, bitcast to an integer so we can shift it. 1682 if (SrcField->getType() != FieldIntTy) 1683 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); 1684 1685 // Zero extend the field to be the same size as the final alloca so that 1686 // we can shift and insert it. 1687 if (SrcField->getType() != ResultVal->getType()) 1688 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); 1689 1690 // Determine the number of bits to shift SrcField. 1691 uint64_t Shift; 1692 if (Layout) // Struct case. 1693 Shift = Layout->getElementOffsetInBits(i); 1694 else // Array case. 1695 Shift = i*ArrayEltBitOffset; 1696 1697 if (TD->isBigEndian()) 1698 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); 1699 1700 if (Shift) { 1701 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); 1702 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); 1703 } 1704 1705 // Don't create an 'or x, 0' on the first iteration. 1706 if (!isa<Constant>(ResultVal) || 1707 !cast<Constant>(ResultVal)->isNullValue()) 1708 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); 1709 else 1710 ResultVal = SrcField; 1711 } 1712 1713 // Handle tail padding by truncating the result 1714 if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits) 1715 ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI); 1716 1717 LI->replaceAllUsesWith(ResultVal); 1718 DeadInsts.push_back(LI); 1719} 1720 1721/// HasPadding - Return true if the specified type has any structure or 1722/// alignment padding in between the elements that would be split apart 1723/// by SROA; return false otherwise. 1724static bool HasPadding(const Type *Ty, const TargetData &TD) { 1725 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1726 Ty = ATy->getElementType(); 1727 return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty); 1728 } 1729 1730 // SROA currently handles only Arrays and Structs. 1731 const StructType *STy = cast<StructType>(Ty); 1732 const StructLayout *SL = TD.getStructLayout(STy); 1733 unsigned PrevFieldBitOffset = 0; 1734 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1735 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 1736 1737 // Check to see if there is any padding between this element and the 1738 // previous one. 1739 if (i) { 1740 unsigned PrevFieldEnd = 1741 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 1742 if (PrevFieldEnd < FieldBitOffset) 1743 return true; 1744 } 1745 PrevFieldBitOffset = FieldBitOffset; 1746 } 1747 // Check for tail padding. 1748 if (unsigned EltCount = STy->getNumElements()) { 1749 unsigned PrevFieldEnd = PrevFieldBitOffset + 1750 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 1751 if (PrevFieldEnd < SL->getSizeInBits()) 1752 return true; 1753 } 1754 return false; 1755} 1756 1757/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 1758/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 1759/// or 1 if safe after canonicalization has been performed. 1760bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { 1761 // Loop over the use list of the alloca. We can only transform it if all of 1762 // the users are safe to transform. 1763 AllocaInfo Info; 1764 1765 isSafeForScalarRepl(AI, AI, 0, Info); 1766 if (Info.isUnsafe) { 1767 DEBUG(dbgs() << "Cannot transform: " << *AI << '\n'); 1768 return false; 1769 } 1770 1771 // Okay, we know all the users are promotable. If the aggregate is a memcpy 1772 // source and destination, we have to be careful. In particular, the memcpy 1773 // could be moving around elements that live in structure padding of the LLVM 1774 // types, but may actually be used. In these cases, we refuse to promote the 1775 // struct. 1776 if (Info.isMemCpySrc && Info.isMemCpyDst && 1777 HasPadding(AI->getAllocatedType(), *TD)) 1778 return false; 1779 1780 return true; 1781} 1782 1783 1784 1785/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1786/// some part of a constant global variable. This intentionally only accepts 1787/// constant expressions because we don't can't rewrite arbitrary instructions. 1788static bool PointsToConstantGlobal(Value *V) { 1789 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1790 return GV->isConstant(); 1791 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1792 if (CE->getOpcode() == Instruction::BitCast || 1793 CE->getOpcode() == Instruction::GetElementPtr) 1794 return PointsToConstantGlobal(CE->getOperand(0)); 1795 return false; 1796} 1797 1798/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1799/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1800/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1801/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1802/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1803/// the alloca, and if the source pointer is a pointer to a constant global, we 1804/// can optimize this. 1805static bool isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, 1806 bool isOffset) { 1807 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1808 User *U = cast<Instruction>(*UI); 1809 1810 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 1811 // Ignore non-volatile loads, they are always ok. 1812 if (LI->isVolatile()) return false; 1813 continue; 1814 } 1815 1816 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 1817 // If uses of the bitcast are ok, we are ok. 1818 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1819 return false; 1820 continue; 1821 } 1822 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 1823 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1824 // doesn't, it does. 1825 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1826 isOffset || !GEP->hasAllZeroIndices())) 1827 return false; 1828 continue; 1829 } 1830 1831 if (CallSite CS = U) { 1832 // If this is a readonly/readnone call site, then we know it is just a 1833 // load and we can ignore it. 1834 if (CS.onlyReadsMemory()) 1835 continue; 1836 1837 // If this is the function being called then we treat it like a load and 1838 // ignore it. 1839 if (CS.isCallee(UI)) 1840 continue; 1841 1842 // If this is being passed as a byval argument, the caller is making a 1843 // copy, so it is only a read of the alloca. 1844 unsigned ArgNo = CS.getArgumentNo(UI); 1845 if (CS.paramHasAttr(ArgNo+1, Attribute::ByVal)) 1846 continue; 1847 } 1848 1849 // If this is isn't our memcpy/memmove, reject it as something we can't 1850 // handle. 1851 MemTransferInst *MI = dyn_cast<MemTransferInst>(U); 1852 if (MI == 0) 1853 return false; 1854 1855 // If the transfer is using the alloca as a source of the transfer, then 1856 // ignore it since it is a load (unless the transfer is volatile). 1857 if (UI.getOperandNo() == 1) { 1858 if (MI->isVolatile()) return false; 1859 continue; 1860 } 1861 1862 // If we already have seen a copy, reject the second one. 1863 if (TheCopy) return false; 1864 1865 // If the pointer has been offset from the start of the alloca, we can't 1866 // safely handle this. 1867 if (isOffset) return false; 1868 1869 // If the memintrinsic isn't using the alloca as the dest, reject it. 1870 if (UI.getOperandNo() != 0) return false; 1871 1872 // If the source of the memcpy/move is not a constant global, reject it. 1873 if (!PointsToConstantGlobal(MI->getSource())) 1874 return false; 1875 1876 // Otherwise, the transform is safe. Remember the copy instruction. 1877 TheCopy = MI; 1878 } 1879 return true; 1880} 1881 1882/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1883/// modified by a copy from a constant global. If we can prove this, we can 1884/// replace any uses of the alloca with uses of the global directly. 1885MemTransferInst *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) { 1886 MemTransferInst *TheCopy = 0; 1887 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1888 return TheCopy; 1889 return 0; 1890} 1891