ScalarReplAggregates.cpp revision 9bc67da0a9982f2f7597d1d46cf18f079e4f8f98
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36#include "llvm/Support/IRBuilder.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Compiler.h" 39#include "llvm/ADT/SmallVector.h" 40#include "llvm/ADT/Statistic.h" 41#include "llvm/ADT/StringExtras.h" 42using namespace llvm; 43 44STATISTIC(NumReplaced, "Number of allocas broken up"); 45STATISTIC(NumPromoted, "Number of allocas promoted"); 46STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 47STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 48 49namespace { 50 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 51 static char ID; // Pass identification, replacement for typeid 52 explicit SROA(signed T = -1) : FunctionPass(&ID) { 53 if (T == -1) 54 SRThreshold = 128; 55 else 56 SRThreshold = T; 57 } 58 59 bool runOnFunction(Function &F); 60 61 bool performScalarRepl(Function &F); 62 bool performPromotion(Function &F); 63 64 // getAnalysisUsage - This pass does not require any passes, but we know it 65 // will not alter the CFG, so say so. 66 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 67 AU.addRequired<DominatorTree>(); 68 AU.addRequired<DominanceFrontier>(); 69 AU.addRequired<TargetData>(); 70 AU.setPreservesCFG(); 71 } 72 73 private: 74 TargetData *TD; 75 76 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 77 /// information about the uses. All these fields are initialized to false 78 /// and set to true when something is learned. 79 struct AllocaInfo { 80 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 81 bool isUnsafe : 1; 82 83 /// needsCanon - This is set to true if there is some use of the alloca 84 /// that requires canonicalization. 85 bool needsCanon : 1; 86 87 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 88 bool isMemCpySrc : 1; 89 90 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 91 bool isMemCpyDst : 1; 92 93 AllocaInfo() 94 : isUnsafe(false), needsCanon(false), 95 isMemCpySrc(false), isMemCpyDst(false) {} 96 }; 97 98 unsigned SRThreshold; 99 100 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 101 102 int isSafeAllocaToScalarRepl(AllocationInst *AI); 103 104 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 105 AllocaInfo &Info); 106 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 107 AllocaInfo &Info); 108 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 109 unsigned OpNo, AllocaInfo &Info); 110 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, 111 AllocaInfo &Info); 112 113 void DoScalarReplacement(AllocationInst *AI, 114 std::vector<AllocationInst*> &WorkList); 115 void CanonicalizeAllocaUsers(AllocationInst *AI); 116 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 117 118 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 119 SmallVector<AllocaInst*, 32> &NewElts); 120 121 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 122 AllocationInst *AI, 123 SmallVector<AllocaInst*, 32> &NewElts); 124 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI, 125 SmallVector<AllocaInst*, 32> &NewElts); 126 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 127 SmallVector<AllocaInst*, 32> &NewElts); 128 129 bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 130 bool &SawVec, uint64_t Offset, unsigned AllocaSize); 131 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset); 132 Value *ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 133 uint64_t Offset, IRBuilder<> &Builder); 134 Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, 135 uint64_t Offset, IRBuilder<> &Builder); 136 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 137 }; 138} 139 140char SROA::ID = 0; 141static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 142 143// Public interface to the ScalarReplAggregates pass 144FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 145 return new SROA(Threshold); 146} 147 148 149bool SROA::runOnFunction(Function &F) { 150 TD = &getAnalysis<TargetData>(); 151 152 bool Changed = performPromotion(F); 153 while (1) { 154 bool LocalChange = performScalarRepl(F); 155 if (!LocalChange) break; // No need to repromote if no scalarrepl 156 Changed = true; 157 LocalChange = performPromotion(F); 158 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 159 } 160 161 return Changed; 162} 163 164 165bool SROA::performPromotion(Function &F) { 166 std::vector<AllocaInst*> Allocas; 167 DominatorTree &DT = getAnalysis<DominatorTree>(); 168 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 169 170 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 171 172 bool Changed = false; 173 174 while (1) { 175 Allocas.clear(); 176 177 // Find allocas that are safe to promote, by looking at all instructions in 178 // the entry node 179 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 180 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 181 if (isAllocaPromotable(AI)) 182 Allocas.push_back(AI); 183 184 if (Allocas.empty()) break; 185 186 PromoteMemToReg(Allocas, DT, DF); 187 NumPromoted += Allocas.size(); 188 Changed = true; 189 } 190 191 return Changed; 192} 193 194/// getNumSAElements - Return the number of elements in the specific struct or 195/// array. 196static uint64_t getNumSAElements(const Type *T) { 197 if (const StructType *ST = dyn_cast<StructType>(T)) 198 return ST->getNumElements(); 199 return cast<ArrayType>(T)->getNumElements(); 200} 201 202// performScalarRepl - This algorithm is a simple worklist driven algorithm, 203// which runs on all of the malloc/alloca instructions in the function, removing 204// them if they are only used by getelementptr instructions. 205// 206bool SROA::performScalarRepl(Function &F) { 207 std::vector<AllocationInst*> WorkList; 208 209 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 210 BasicBlock &BB = F.getEntryBlock(); 211 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 212 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 213 WorkList.push_back(A); 214 215 // Process the worklist 216 bool Changed = false; 217 while (!WorkList.empty()) { 218 AllocationInst *AI = WorkList.back(); 219 WorkList.pop_back(); 220 221 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 222 // with unused elements. 223 if (AI->use_empty()) { 224 AI->eraseFromParent(); 225 continue; 226 } 227 228 // If this alloca is impossible for us to promote, reject it early. 229 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) 230 continue; 231 232 // Check to see if this allocation is only modified by a memcpy/memmove from 233 // a constant global. If this is the case, we can change all users to use 234 // the constant global instead. This is commonly produced by the CFE by 235 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 236 // is only subsequently read. 237 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 238 DOUT << "Found alloca equal to global: " << *AI; 239 DOUT << " memcpy = " << *TheCopy; 240 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 241 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 242 TheCopy->eraseFromParent(); // Don't mutate the global. 243 AI->eraseFromParent(); 244 ++NumGlobals; 245 Changed = true; 246 continue; 247 } 248 249 // Check to see if we can perform the core SROA transformation. We cannot 250 // transform the allocation instruction if it is an array allocation 251 // (allocations OF arrays are ok though), and an allocation of a scalar 252 // value cannot be decomposed at all. 253 uint64_t AllocaSize = TD->getTypePaddedSize(AI->getAllocatedType()); 254 255 if ((isa<StructType>(AI->getAllocatedType()) || 256 isa<ArrayType>(AI->getAllocatedType())) && 257 // Do not promote any struct whose size is too big. 258 AllocaSize < SRThreshold && 259 // Do not promote any struct into more than "32" separate vars. 260 getNumSAElements(AI->getAllocatedType()) < SRThreshold/4) { 261 // Check that all of the users of the allocation are capable of being 262 // transformed. 263 switch (isSafeAllocaToScalarRepl(AI)) { 264 default: assert(0 && "Unexpected value!"); 265 case 0: // Not safe to scalar replace. 266 break; 267 case 1: // Safe, but requires cleanup/canonicalizations first 268 CanonicalizeAllocaUsers(AI); 269 // FALL THROUGH. 270 case 3: // Safe to scalar replace. 271 DoScalarReplacement(AI, WorkList); 272 Changed = true; 273 continue; 274 } 275 } 276 277 // If we can turn this aggregate value (potentially with casts) into a 278 // simple scalar value that can be mem2reg'd into a register value. 279 // IsNotTrivial tracks whether this is something that mem2reg could have 280 // promoted itself. If so, we don't want to transform it needlessly. Note 281 // that we can't just check based on the type: the alloca may be of an i32 282 // but that has pointer arithmetic to set byte 3 of it or something. 283 bool IsNotTrivial = false; 284 const Type *VectorTy = 0; 285 bool HadAVector = false; 286 if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector, 287 0, unsigned(AllocaSize)) && IsNotTrivial) { 288 AllocaInst *NewAI; 289 // If we were able to find a vector type that can handle this with 290 // insert/extract elements, and if there was at least one use that had 291 // a vector type, promote this to a vector. We don't want to promote 292 // random stuff that doesn't use vectors (e.g. <9 x double>) because then 293 // we just get a lot of insert/extracts. If at least one vector is 294 // involved, then we probably really do have a union of vector/array. 295 if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) { 296 DOUT << "CONVERT TO VECTOR: " << *AI << " TYPE = " << *VectorTy <<"\n"; 297 298 // Create and insert the vector alloca. 299 NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin()); 300 ConvertUsesToScalar(AI, NewAI, 0); 301 } else { 302 DOUT << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"; 303 304 // Create and insert the integer alloca. 305 const Type *NewTy = IntegerType::get(AllocaSize*8); 306 NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin()); 307 ConvertUsesToScalar(AI, NewAI, 0); 308 } 309 NewAI->takeName(AI); 310 AI->eraseFromParent(); 311 ++NumConverted; 312 Changed = true; 313 continue; 314 } 315 316 // Otherwise, couldn't process this alloca. 317 } 318 319 return Changed; 320} 321 322/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 323/// predicate, do SROA now. 324void SROA::DoScalarReplacement(AllocationInst *AI, 325 std::vector<AllocationInst*> &WorkList) { 326 DOUT << "Found inst to SROA: " << *AI; 327 SmallVector<AllocaInst*, 32> ElementAllocas; 328 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 329 ElementAllocas.reserve(ST->getNumContainedTypes()); 330 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 331 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 332 AI->getAlignment(), 333 AI->getName() + "." + utostr(i), AI); 334 ElementAllocas.push_back(NA); 335 WorkList.push_back(NA); // Add to worklist for recursive processing 336 } 337 } else { 338 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 339 ElementAllocas.reserve(AT->getNumElements()); 340 const Type *ElTy = AT->getElementType(); 341 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 342 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 343 AI->getName() + "." + utostr(i), AI); 344 ElementAllocas.push_back(NA); 345 WorkList.push_back(NA); // Add to worklist for recursive processing 346 } 347 } 348 349 // Now that we have created the alloca instructions that we want to use, 350 // expand the getelementptr instructions to use them. 351 // 352 while (!AI->use_empty()) { 353 Instruction *User = cast<Instruction>(AI->use_back()); 354 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 355 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 356 BCInst->eraseFromParent(); 357 continue; 358 } 359 360 // Replace: 361 // %res = load { i32, i32 }* %alloc 362 // with: 363 // %load.0 = load i32* %alloc.0 364 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 365 // %load.1 = load i32* %alloc.1 366 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 367 // (Also works for arrays instead of structs) 368 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 369 Value *Insert = UndefValue::get(LI->getType()); 370 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 371 Value *Load = new LoadInst(ElementAllocas[i], "load", LI); 372 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); 373 } 374 LI->replaceAllUsesWith(Insert); 375 LI->eraseFromParent(); 376 continue; 377 } 378 379 // Replace: 380 // store { i32, i32 } %val, { i32, i32 }* %alloc 381 // with: 382 // %val.0 = extractvalue { i32, i32 } %val, 0 383 // store i32 %val.0, i32* %alloc.0 384 // %val.1 = extractvalue { i32, i32 } %val, 1 385 // store i32 %val.1, i32* %alloc.1 386 // (Also works for arrays instead of structs) 387 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 388 Value *Val = SI->getOperand(0); 389 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 390 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); 391 new StoreInst(Extract, ElementAllocas[i], SI); 392 } 393 SI->eraseFromParent(); 394 continue; 395 } 396 397 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 398 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 399 unsigned Idx = 400 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 401 402 assert(Idx < ElementAllocas.size() && "Index out of range?"); 403 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 404 405 Value *RepValue; 406 if (GEPI->getNumOperands() == 3) { 407 // Do not insert a new getelementptr instruction with zero indices, only 408 // to have it optimized out later. 409 RepValue = AllocaToUse; 410 } else { 411 // We are indexing deeply into the structure, so we still need a 412 // getelement ptr instruction to finish the indexing. This may be 413 // expanded itself once the worklist is rerun. 414 // 415 SmallVector<Value*, 8> NewArgs; 416 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 417 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 418 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), 419 NewArgs.end(), "", GEPI); 420 RepValue->takeName(GEPI); 421 } 422 423 // If this GEP is to the start of the aggregate, check for memcpys. 424 if (Idx == 0 && GEPI->hasAllZeroIndices()) 425 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 426 427 // Move all of the users over to the new GEP. 428 GEPI->replaceAllUsesWith(RepValue); 429 // Delete the old GEP 430 GEPI->eraseFromParent(); 431 } 432 433 // Finally, delete the Alloca instruction 434 AI->eraseFromParent(); 435 NumReplaced++; 436} 437 438 439/// isSafeElementUse - Check to see if this use is an allowed use for a 440/// getelementptr instruction of an array aggregate allocation. isFirstElt 441/// indicates whether Ptr is known to the start of the aggregate. 442/// 443void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 444 AllocaInfo &Info) { 445 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 446 I != E; ++I) { 447 Instruction *User = cast<Instruction>(*I); 448 switch (User->getOpcode()) { 449 case Instruction::Load: break; 450 case Instruction::Store: 451 // Store is ok if storing INTO the pointer, not storing the pointer 452 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 453 break; 454 case Instruction::GetElementPtr: { 455 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 456 bool AreAllZeroIndices = isFirstElt; 457 if (GEP->getNumOperands() > 1) { 458 if (!isa<ConstantInt>(GEP->getOperand(1)) || 459 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 460 // Using pointer arithmetic to navigate the array. 461 return MarkUnsafe(Info); 462 463 if (AreAllZeroIndices) 464 AreAllZeroIndices = GEP->hasAllZeroIndices(); 465 } 466 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 467 if (Info.isUnsafe) return; 468 break; 469 } 470 case Instruction::BitCast: 471 if (isFirstElt) { 472 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 473 if (Info.isUnsafe) return; 474 break; 475 } 476 DOUT << " Transformation preventing inst: " << *User; 477 return MarkUnsafe(Info); 478 case Instruction::Call: 479 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 480 if (isFirstElt) { 481 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 482 if (Info.isUnsafe) return; 483 break; 484 } 485 } 486 DOUT << " Transformation preventing inst: " << *User; 487 return MarkUnsafe(Info); 488 default: 489 DOUT << " Transformation preventing inst: " << *User; 490 return MarkUnsafe(Info); 491 } 492 } 493 return; // All users look ok :) 494} 495 496/// AllUsersAreLoads - Return true if all users of this value are loads. 497static bool AllUsersAreLoads(Value *Ptr) { 498 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 499 I != E; ++I) 500 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 501 return false; 502 return true; 503} 504 505/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 506/// aggregate allocation. 507/// 508void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 509 AllocaInfo &Info) { 510 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 511 return isSafeUseOfBitCastedAllocation(C, AI, Info); 512 513 if (LoadInst *LI = dyn_cast<LoadInst>(User)) 514 if (!LI->isVolatile()) 515 return;// Loads (returning a first class aggregrate) are always rewritable 516 517 if (StoreInst *SI = dyn_cast<StoreInst>(User)) 518 if (!SI->isVolatile() && SI->getOperand(0) != AI) 519 return;// Store is ok if storing INTO the pointer, not storing the pointer 520 521 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 522 if (GEPI == 0) 523 return MarkUnsafe(Info); 524 525 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 526 527 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 528 if (I == E || 529 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 530 return MarkUnsafe(Info); 531 } 532 533 ++I; 534 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 535 536 bool IsAllZeroIndices = true; 537 538 // If the first index is a non-constant index into an array, see if we can 539 // handle it as a special case. 540 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 541 if (!isa<ConstantInt>(I.getOperand())) { 542 IsAllZeroIndices = 0; 543 uint64_t NumElements = AT->getNumElements(); 544 545 // If this is an array index and the index is not constant, we cannot 546 // promote... that is unless the array has exactly one or two elements in 547 // it, in which case we CAN promote it, but we have to canonicalize this 548 // out if this is the only problem. 549 if ((NumElements == 1 || NumElements == 2) && 550 AllUsersAreLoads(GEPI)) { 551 Info.needsCanon = true; 552 return; // Canonicalization required! 553 } 554 return MarkUnsafe(Info); 555 } 556 } 557 558 // Walk through the GEP type indices, checking the types that this indexes 559 // into. 560 for (; I != E; ++I) { 561 // Ignore struct elements, no extra checking needed for these. 562 if (isa<StructType>(*I)) 563 continue; 564 565 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 566 if (!IdxVal) return MarkUnsafe(Info); 567 568 // Are all indices still zero? 569 IsAllZeroIndices &= IdxVal->isZero(); 570 571 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 572 // This GEP indexes an array. Verify that this is an in-range constant 573 // integer. Specifically, consider A[0][i]. We cannot know that the user 574 // isn't doing invalid things like allowing i to index an out-of-range 575 // subscript that accesses A[1]. Because of this, we have to reject SROA 576 // of any accesses into structs where any of the components are variables. 577 if (IdxVal->getZExtValue() >= AT->getNumElements()) 578 return MarkUnsafe(Info); 579 } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) { 580 if (IdxVal->getZExtValue() >= VT->getNumElements()) 581 return MarkUnsafe(Info); 582 } 583 } 584 585 // If there are any non-simple uses of this getelementptr, make sure to reject 586 // them. 587 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 588} 589 590/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 591/// intrinsic can be promoted by SROA. At this point, we know that the operand 592/// of the memintrinsic is a pointer to the beginning of the allocation. 593void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 594 unsigned OpNo, AllocaInfo &Info) { 595 // If not constant length, give up. 596 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 597 if (!Length) return MarkUnsafe(Info); 598 599 // If not the whole aggregate, give up. 600 if (Length->getZExtValue() != 601 TD->getTypePaddedSize(AI->getType()->getElementType())) 602 return MarkUnsafe(Info); 603 604 // We only know about memcpy/memset/memmove. 605 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 606 return MarkUnsafe(Info); 607 608 // Otherwise, we can transform it. Determine whether this is a memcpy/set 609 // into or out of the aggregate. 610 if (OpNo == 1) 611 Info.isMemCpyDst = true; 612 else { 613 assert(OpNo == 2); 614 Info.isMemCpySrc = true; 615 } 616} 617 618/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 619/// are 620void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, 621 AllocaInfo &Info) { 622 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 623 UI != E; ++UI) { 624 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 625 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 626 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 627 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 628 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 629 if (SI->isVolatile()) 630 return MarkUnsafe(Info); 631 632 // If storing the entire alloca in one chunk through a bitcasted pointer 633 // to integer, we can transform it. This happens (for example) when you 634 // cast a {i32,i32}* to i64* and store through it. This is similar to the 635 // memcpy case and occurs in various "byval" cases and emulated memcpys. 636 if (isa<IntegerType>(SI->getOperand(0)->getType()) && 637 TD->getTypePaddedSize(SI->getOperand(0)->getType()) == 638 TD->getTypePaddedSize(AI->getType()->getElementType())) { 639 Info.isMemCpyDst = true; 640 continue; 641 } 642 return MarkUnsafe(Info); 643 } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { 644 if (LI->isVolatile()) 645 return MarkUnsafe(Info); 646 647 // If loading the entire alloca in one chunk through a bitcasted pointer 648 // to integer, we can transform it. This happens (for example) when you 649 // cast a {i32,i32}* to i64* and load through it. This is similar to the 650 // memcpy case and occurs in various "byval" cases and emulated memcpys. 651 if (isa<IntegerType>(LI->getType()) && 652 TD->getTypePaddedSize(LI->getType()) == 653 TD->getTypePaddedSize(AI->getType()->getElementType())) { 654 Info.isMemCpySrc = true; 655 continue; 656 } 657 return MarkUnsafe(Info); 658 } else { 659 return MarkUnsafe(Info); 660 } 661 if (Info.isUnsafe) return; 662 } 663} 664 665/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 666/// to its first element. Transform users of the cast to use the new values 667/// instead. 668void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 669 SmallVector<AllocaInst*, 32> &NewElts) { 670 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 671 while (UI != UE) { 672 Instruction *User = cast<Instruction>(*UI++); 673 if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) { 674 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 675 if (BCU->use_empty()) BCU->eraseFromParent(); 676 continue; 677 } 678 679 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 680 // This must be memcpy/memmove/memset of the entire aggregate. 681 // Split into one per element. 682 RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts); 683 continue; 684 } 685 686 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 687 // If this is a store of the entire alloca from an integer, rewrite it. 688 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); 689 continue; 690 } 691 692 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 693 // If this is a load of the entire alloca to an integer, rewrite it. 694 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); 695 continue; 696 } 697 698 // Otherwise it must be some other user of a gep of the first pointer. Just 699 // leave these alone. 700 continue; 701 } 702} 703 704/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. 705/// Rewrite it to copy or set the elements of the scalarized memory. 706void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 707 AllocationInst *AI, 708 SmallVector<AllocaInst*, 32> &NewElts) { 709 710 // If this is a memcpy/memmove, construct the other pointer as the 711 // appropriate type. 712 Value *OtherPtr = 0; 713 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 714 if (BCInst == MCI->getRawDest()) 715 OtherPtr = MCI->getRawSource(); 716 else { 717 assert(BCInst == MCI->getRawSource()); 718 OtherPtr = MCI->getRawDest(); 719 } 720 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 721 if (BCInst == MMI->getRawDest()) 722 OtherPtr = MMI->getRawSource(); 723 else { 724 assert(BCInst == MMI->getRawSource()); 725 OtherPtr = MMI->getRawDest(); 726 } 727 } 728 729 // If there is an other pointer, we want to convert it to the same pointer 730 // type as AI has, so we can GEP through it safely. 731 if (OtherPtr) { 732 // It is likely that OtherPtr is a bitcast, if so, remove it. 733 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 734 OtherPtr = BC->getOperand(0); 735 // All zero GEPs are effectively bitcasts. 736 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) 737 if (GEP->hasAllZeroIndices()) 738 OtherPtr = GEP->getOperand(0); 739 740 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 741 if (BCE->getOpcode() == Instruction::BitCast) 742 OtherPtr = BCE->getOperand(0); 743 744 // If the pointer is not the right type, insert a bitcast to the right 745 // type. 746 if (OtherPtr->getType() != AI->getType()) 747 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 748 MI); 749 } 750 751 // Process each element of the aggregate. 752 Value *TheFn = MI->getOperand(0); 753 const Type *BytePtrTy = MI->getRawDest()->getType(); 754 bool SROADest = MI->getRawDest() == BCInst; 755 756 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 757 758 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 759 // If this is a memcpy/memmove, emit a GEP of the other element address. 760 Value *OtherElt = 0; 761 if (OtherPtr) { 762 Value *Idx[2] = { Zero, ConstantInt::get(Type::Int32Ty, i) }; 763 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, 764 OtherPtr->getNameStr()+"."+utostr(i), 765 MI); 766 } 767 768 Value *EltPtr = NewElts[i]; 769 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 770 771 // If we got down to a scalar, insert a load or store as appropriate. 772 if (EltTy->isSingleValueType()) { 773 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 774 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 775 MI); 776 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 777 continue; 778 } 779 assert(isa<MemSetInst>(MI)); 780 781 // If the stored element is zero (common case), just store a null 782 // constant. 783 Constant *StoreVal; 784 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 785 if (CI->isZero()) { 786 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 787 } else { 788 // If EltTy is a vector type, get the element type. 789 const Type *ValTy = EltTy; 790 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 791 ValTy = VTy->getElementType(); 792 793 // Construct an integer with the right value. 794 unsigned EltSize = TD->getTypeSizeInBits(ValTy); 795 APInt OneVal(EltSize, CI->getZExtValue()); 796 APInt TotalVal(OneVal); 797 // Set each byte. 798 for (unsigned i = 0; 8*i < EltSize; ++i) { 799 TotalVal = TotalVal.shl(8); 800 TotalVal |= OneVal; 801 } 802 803 // Convert the integer value to the appropriate type. 804 StoreVal = ConstantInt::get(TotalVal); 805 if (isa<PointerType>(ValTy)) 806 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 807 else if (ValTy->isFloatingPoint()) 808 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 809 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 810 811 // If the requested value was a vector constant, create it. 812 if (EltTy != ValTy) { 813 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 814 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 815 StoreVal = ConstantVector::get(&Elts[0], NumElts); 816 } 817 } 818 new StoreInst(StoreVal, EltPtr, MI); 819 continue; 820 } 821 // Otherwise, if we're storing a byte variable, use a memset call for 822 // this element. 823 } 824 825 // Cast the element pointer to BytePtrTy. 826 if (EltPtr->getType() != BytePtrTy) 827 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 828 829 // Cast the other pointer (if we have one) to BytePtrTy. 830 if (OtherElt && OtherElt->getType() != BytePtrTy) 831 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 832 MI); 833 834 unsigned EltSize = TD->getTypePaddedSize(EltTy); 835 836 // Finally, insert the meminst for this element. 837 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 838 Value *Ops[] = { 839 SROADest ? EltPtr : OtherElt, // Dest ptr 840 SROADest ? OtherElt : EltPtr, // Src ptr 841 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 842 Zero // Align 843 }; 844 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 845 } else { 846 assert(isa<MemSetInst>(MI)); 847 Value *Ops[] = { 848 EltPtr, MI->getOperand(2), // Dest, Value, 849 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 850 Zero // Align 851 }; 852 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 853 } 854 } 855 MI->eraseFromParent(); 856} 857 858/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that 859/// overwrites the entire allocation. Extract out the pieces of the stored 860/// integer and store them individually. 861void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, 862 AllocationInst *AI, 863 SmallVector<AllocaInst*, 32> &NewElts){ 864 // Extract each element out of the integer according to its structure offset 865 // and store the element value to the individual alloca. 866 Value *SrcVal = SI->getOperand(0); 867 const Type *AllocaEltTy = AI->getType()->getElementType(); 868 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); 869 870 // If this isn't a store of an integer to the whole alloca, it may be a store 871 // to the first element. Just ignore the store in this case and normal SROA 872 // will handle it. 873 if (!isa<IntegerType>(SrcVal->getType()) || 874 TD->getTypePaddedSizeInBits(SrcVal->getType()) != AllocaSizeBits) 875 return; 876 877 DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI; 878 879 // There are two forms here: AI could be an array or struct. Both cases 880 // have different ways to compute the element offset. 881 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 882 const StructLayout *Layout = TD->getStructLayout(EltSTy); 883 884 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 885 // Get the number of bits to shift SrcVal to get the value. 886 const Type *FieldTy = EltSTy->getElementType(i); 887 uint64_t Shift = Layout->getElementOffsetInBits(i); 888 889 if (TD->isBigEndian()) 890 Shift = AllocaSizeBits-Shift-TD->getTypePaddedSizeInBits(FieldTy); 891 892 Value *EltVal = SrcVal; 893 if (Shift) { 894 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 895 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 896 "sroa.store.elt", SI); 897 } 898 899 // Truncate down to an integer of the right size. 900 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 901 902 // Ignore zero sized fields like {}, they obviously contain no data. 903 if (FieldSizeBits == 0) continue; 904 905 if (FieldSizeBits != AllocaSizeBits) 906 EltVal = new TruncInst(EltVal, IntegerType::get(FieldSizeBits), "", SI); 907 Value *DestField = NewElts[i]; 908 if (EltVal->getType() == FieldTy) { 909 // Storing to an integer field of this size, just do it. 910 } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) { 911 // Bitcast to the right element type (for fp/vector values). 912 EltVal = new BitCastInst(EltVal, FieldTy, "", SI); 913 } else { 914 // Otherwise, bitcast the dest pointer (for aggregates). 915 DestField = new BitCastInst(DestField, 916 PointerType::getUnqual(EltVal->getType()), 917 "", SI); 918 } 919 new StoreInst(EltVal, DestField, SI); 920 } 921 922 } else { 923 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy); 924 const Type *ArrayEltTy = ATy->getElementType(); 925 uint64_t ElementOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); 926 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); 927 928 uint64_t Shift; 929 930 if (TD->isBigEndian()) 931 Shift = AllocaSizeBits-ElementOffset; 932 else 933 Shift = 0; 934 935 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 936 // Ignore zero sized fields like {}, they obviously contain no data. 937 if (ElementSizeBits == 0) continue; 938 939 Value *EltVal = SrcVal; 940 if (Shift) { 941 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 942 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 943 "sroa.store.elt", SI); 944 } 945 946 // Truncate down to an integer of the right size. 947 if (ElementSizeBits != AllocaSizeBits) 948 EltVal = new TruncInst(EltVal, IntegerType::get(ElementSizeBits),"",SI); 949 Value *DestField = NewElts[i]; 950 if (EltVal->getType() == ArrayEltTy) { 951 // Storing to an integer field of this size, just do it. 952 } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) { 953 // Bitcast to the right element type (for fp/vector values). 954 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI); 955 } else { 956 // Otherwise, bitcast the dest pointer (for aggregates). 957 DestField = new BitCastInst(DestField, 958 PointerType::getUnqual(EltVal->getType()), 959 "", SI); 960 } 961 new StoreInst(EltVal, DestField, SI); 962 963 if (TD->isBigEndian()) 964 Shift -= ElementOffset; 965 else 966 Shift += ElementOffset; 967 } 968 } 969 970 SI->eraseFromParent(); 971} 972 973/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to 974/// an integer. Load the individual pieces to form the aggregate value. 975void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 976 SmallVector<AllocaInst*, 32> &NewElts) { 977 // Extract each element out of the NewElts according to its structure offset 978 // and form the result value. 979 const Type *AllocaEltTy = AI->getType()->getElementType(); 980 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); 981 982 // If this isn't a load of the whole alloca to an integer, it may be a load 983 // of the first element. Just ignore the load in this case and normal SROA 984 // will handle it. 985 if (!isa<IntegerType>(LI->getType()) || 986 TD->getTypePaddedSizeInBits(LI->getType()) != AllocaSizeBits) 987 return; 988 989 DOUT << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI; 990 991 // There are two forms here: AI could be an array or struct. Both cases 992 // have different ways to compute the element offset. 993 const StructLayout *Layout = 0; 994 uint64_t ArrayEltBitOffset = 0; 995 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 996 Layout = TD->getStructLayout(EltSTy); 997 } else { 998 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); 999 ArrayEltBitOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); 1000 } 1001 1002 Value *ResultVal = Constant::getNullValue(LI->getType()); 1003 1004 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1005 // Load the value from the alloca. If the NewElt is an aggregate, cast 1006 // the pointer to an integer of the same size before doing the load. 1007 Value *SrcField = NewElts[i]; 1008 const Type *FieldTy = 1009 cast<PointerType>(SrcField->getType())->getElementType(); 1010 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 1011 1012 // Ignore zero sized fields like {}, they obviously contain no data. 1013 if (FieldSizeBits == 0) continue; 1014 1015 const IntegerType *FieldIntTy = IntegerType::get(FieldSizeBits); 1016 if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() && 1017 !isa<VectorType>(FieldTy)) 1018 SrcField = new BitCastInst(SrcField, PointerType::getUnqual(FieldIntTy), 1019 "", LI); 1020 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); 1021 1022 // If SrcField is a fp or vector of the right size but that isn't an 1023 // integer type, bitcast to an integer so we can shift it. 1024 if (SrcField->getType() != FieldIntTy) 1025 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); 1026 1027 // Zero extend the field to be the same size as the final alloca so that 1028 // we can shift and insert it. 1029 if (SrcField->getType() != ResultVal->getType()) 1030 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); 1031 1032 // Determine the number of bits to shift SrcField. 1033 uint64_t Shift; 1034 if (Layout) // Struct case. 1035 Shift = Layout->getElementOffsetInBits(i); 1036 else // Array case. 1037 Shift = i*ArrayEltBitOffset; 1038 1039 if (TD->isBigEndian()) 1040 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); 1041 1042 if (Shift) { 1043 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); 1044 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); 1045 } 1046 1047 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); 1048 } 1049 1050 LI->replaceAllUsesWith(ResultVal); 1051 LI->eraseFromParent(); 1052} 1053 1054 1055/// HasPadding - Return true if the specified type has any structure or 1056/// alignment padding, false otherwise. 1057static bool HasPadding(const Type *Ty, const TargetData &TD) { 1058 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1059 const StructLayout *SL = TD.getStructLayout(STy); 1060 unsigned PrevFieldBitOffset = 0; 1061 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1062 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 1063 1064 // Padding in sub-elements? 1065 if (HasPadding(STy->getElementType(i), TD)) 1066 return true; 1067 1068 // Check to see if there is any padding between this element and the 1069 // previous one. 1070 if (i) { 1071 unsigned PrevFieldEnd = 1072 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 1073 if (PrevFieldEnd < FieldBitOffset) 1074 return true; 1075 } 1076 1077 PrevFieldBitOffset = FieldBitOffset; 1078 } 1079 1080 // Check for tail padding. 1081 if (unsigned EltCount = STy->getNumElements()) { 1082 unsigned PrevFieldEnd = PrevFieldBitOffset + 1083 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 1084 if (PrevFieldEnd < SL->getSizeInBits()) 1085 return true; 1086 } 1087 1088 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1089 return HasPadding(ATy->getElementType(), TD); 1090 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1091 return HasPadding(VTy->getElementType(), TD); 1092 } 1093 return TD.getTypeSizeInBits(Ty) != TD.getTypePaddedSizeInBits(Ty); 1094} 1095 1096/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 1097/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 1098/// or 1 if safe after canonicalization has been performed. 1099/// 1100int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 1101 // Loop over the use list of the alloca. We can only transform it if all of 1102 // the users are safe to transform. 1103 AllocaInfo Info; 1104 1105 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 1106 I != E; ++I) { 1107 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 1108 if (Info.isUnsafe) { 1109 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 1110 return 0; 1111 } 1112 } 1113 1114 // Okay, we know all the users are promotable. If the aggregate is a memcpy 1115 // source and destination, we have to be careful. In particular, the memcpy 1116 // could be moving around elements that live in structure padding of the LLVM 1117 // types, but may actually be used. In these cases, we refuse to promote the 1118 // struct. 1119 if (Info.isMemCpySrc && Info.isMemCpyDst && 1120 HasPadding(AI->getType()->getElementType(), *TD)) 1121 return 0; 1122 1123 // If we require cleanup, return 1, otherwise return 3. 1124 return Info.needsCanon ? 1 : 3; 1125} 1126 1127/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 1128/// allocation, but only if cleaned up, perform the cleanups required. 1129void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 1130 // At this point, we know that the end result will be SROA'd and promoted, so 1131 // we can insert ugly code if required so long as sroa+mem2reg will clean it 1132 // up. 1133 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 1134 UI != E; ) { 1135 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++); 1136 if (!GEPI) continue; 1137 gep_type_iterator I = gep_type_begin(GEPI); 1138 ++I; 1139 1140 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 1141 uint64_t NumElements = AT->getNumElements(); 1142 1143 if (!isa<ConstantInt>(I.getOperand())) { 1144 if (NumElements == 1) { 1145 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 1146 } else { 1147 assert(NumElements == 2 && "Unhandled case!"); 1148 // All users of the GEP must be loads. At each use of the GEP, insert 1149 // two loads of the appropriate indexed GEP and select between them. 1150 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 1151 Constant::getNullValue(I.getOperand()->getType()), 1152 "isone", GEPI); 1153 // Insert the new GEP instructions, which are properly indexed. 1154 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 1155 Indices[1] = Constant::getNullValue(Type::Int32Ty); 1156 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1157 Indices.begin(), 1158 Indices.end(), 1159 GEPI->getName()+".0", GEPI); 1160 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 1161 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1162 Indices.begin(), 1163 Indices.end(), 1164 GEPI->getName()+".1", GEPI); 1165 // Replace all loads of the variable index GEP with loads from both 1166 // indexes and a select. 1167 while (!GEPI->use_empty()) { 1168 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 1169 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 1170 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 1171 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI); 1172 LI->replaceAllUsesWith(R); 1173 LI->eraseFromParent(); 1174 } 1175 GEPI->eraseFromParent(); 1176 } 1177 } 1178 } 1179 } 1180} 1181 1182/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at 1183/// the offset specified by Offset (which is specified in bytes). 1184/// 1185/// There are two cases we handle here: 1186/// 1) A union of vector types of the same size and potentially its elements. 1187/// Here we turn element accesses into insert/extract element operations. 1188/// This promotes a <4 x float> with a store of float to the third element 1189/// into a <4 x float> that uses insert element. 1190/// 2) A fully general blob of memory, which we turn into some (potentially 1191/// large) integer type with extract and insert operations where the loads 1192/// and stores would mutate the memory. 1193static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy, 1194 unsigned AllocaSize, const TargetData &TD) { 1195 // If this could be contributing to a vector, analyze it. 1196 if (VecTy != Type::VoidTy) { // either null or a vector type. 1197 1198 // If the In type is a vector that is the same size as the alloca, see if it 1199 // matches the existing VecTy. 1200 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) { 1201 if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { 1202 // If we're storing/loading a vector of the right size, allow it as a 1203 // vector. If this the first vector we see, remember the type so that 1204 // we know the element size. 1205 if (VecTy == 0) 1206 VecTy = VInTy; 1207 return; 1208 } 1209 } else if (In == Type::FloatTy || In == Type::DoubleTy || 1210 (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 && 1211 isPowerOf2_32(In->getPrimitiveSizeInBits()))) { 1212 // If we're accessing something that could be an element of a vector, see 1213 // if the implied vector agrees with what we already have and if Offset is 1214 // compatible with it. 1215 unsigned EltSize = In->getPrimitiveSizeInBits()/8; 1216 if (Offset % EltSize == 0 && 1217 AllocaSize % EltSize == 0 && 1218 (VecTy == 0 || 1219 cast<VectorType>(VecTy)->getElementType() 1220 ->getPrimitiveSizeInBits()/8 == EltSize)) { 1221 if (VecTy == 0) 1222 VecTy = VectorType::get(In, AllocaSize/EltSize); 1223 return; 1224 } 1225 } 1226 } 1227 1228 // Otherwise, we have a case that we can't handle with an optimized vector 1229 // form. We can still turn this into a large integer. 1230 VecTy = Type::VoidTy; 1231} 1232 1233/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all 1234/// its accesses to use a to single vector type, return true, and set VecTy to 1235/// the new type. If we could convert the alloca into a single promotable 1236/// integer, return true but set VecTy to VoidTy. Further, if the use is not a 1237/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset 1238/// is the current offset from the base of the alloca being analyzed. 1239/// 1240/// If we see at least one access to the value that is as a vector type, set the 1241/// SawVec flag. 1242/// 1243bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 1244 bool &SawVec, uint64_t Offset, 1245 unsigned AllocaSize) { 1246 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1247 Instruction *User = cast<Instruction>(*UI); 1248 1249 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1250 // Don't break volatile loads. 1251 if (LI->isVolatile()) 1252 return false; 1253 MergeInType(LI->getType(), Offset, VecTy, AllocaSize, *TD); 1254 SawVec |= isa<VectorType>(LI->getType()); 1255 continue; 1256 } 1257 1258 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1259 // Storing the pointer, not into the value? 1260 if (SI->getOperand(0) == V || SI->isVolatile()) return 0; 1261 MergeInType(SI->getOperand(0)->getType(), Offset, VecTy, AllocaSize, *TD); 1262 SawVec |= isa<VectorType>(SI->getOperand(0)->getType()); 1263 continue; 1264 } 1265 1266 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 1267 if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset, 1268 AllocaSize)) 1269 return false; 1270 IsNotTrivial = true; 1271 continue; 1272 } 1273 1274 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1275 // If this is a GEP with a variable indices, we can't handle it. 1276 if (!GEP->hasAllConstantIndices()) 1277 return false; 1278 1279 // Compute the offset that this GEP adds to the pointer. 1280 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1281 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1282 &Indices[0], Indices.size()); 1283 // See if all uses can be converted. 1284 if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset, 1285 AllocaSize)) 1286 return false; 1287 IsNotTrivial = true; 1288 continue; 1289 } 1290 1291 // If this is a constant sized memset of a constant value (e.g. 0) we can 1292 // handle it. 1293 if (isa<MemSetInst>(User) && 1294 // Store of constant value. 1295 isa<ConstantInt>(User->getOperand(2)) && 1296 // Store with constant size. 1297 isa<ConstantInt>(User->getOperand(3))) { 1298 VecTy = Type::VoidTy; 1299 IsNotTrivial = true; 1300 continue; 1301 } 1302 1303 // Otherwise, we cannot handle this! 1304 return false; 1305 } 1306 1307 return true; 1308} 1309 1310 1311/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1312/// directly. This happens when we are converting an "integer union" to a 1313/// single integer scalar, or when we are converting a "vector union" to a 1314/// vector with insert/extractelement instructions. 1315/// 1316/// Offset is an offset from the original alloca, in bits that need to be 1317/// shifted to the right. By the end of this, there should be no uses of Ptr. 1318void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { 1319 while (!Ptr->use_empty()) { 1320 Instruction *User = cast<Instruction>(Ptr->use_back()); 1321 1322 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1323 ConvertUsesToScalar(CI, NewAI, Offset); 1324 CI->eraseFromParent(); 1325 continue; 1326 } 1327 1328 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1329 // Compute the offset that this GEP adds to the pointer. 1330 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1331 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1332 &Indices[0], Indices.size()); 1333 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8); 1334 GEP->eraseFromParent(); 1335 continue; 1336 } 1337 1338 IRBuilder<> Builder(User->getParent(), User); 1339 1340 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1341 Value *LoadVal = ConvertUsesOfLoadToScalar(LI, NewAI, Offset, Builder); 1342 LI->replaceAllUsesWith(LoadVal); 1343 LI->eraseFromParent(); 1344 continue; 1345 } 1346 1347 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1348 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1349 Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str()); 1350 Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, 1351 Builder); 1352 Builder.CreateStore(New, NewAI); 1353 SI->eraseFromParent(); 1354 continue; 1355 } 1356 1357 // If this is a constant sized memset of a constant value (e.g. 0) we can 1358 // transform it into a store of the expanded constant value. 1359 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 1360 assert(MSI->getRawDest() == Ptr && "Consistency error!"); 1361 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 1362 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); 1363 1364 // Compute the value replicated the right number of times. 1365 APInt APVal(NumBytes*8, Val); 1366 1367 // Splat the value if non-zero. 1368 if (Val) 1369 for (unsigned i = 1; i != NumBytes; ++i) 1370 APVal |= APVal << 8; 1371 1372 Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str()); 1373 Value *New = ConvertScalar_InsertValue(ConstantInt::get(APVal), Old, 1374 Offset, Builder); 1375 Builder.CreateStore(New, NewAI); 1376 MSI->eraseFromParent(); 1377 continue; 1378 } 1379 1380 1381 assert(0 && "Unsupported operation!"); 1382 abort(); 1383 } 1384} 1385 1386/// ConvertUsesOfLoadToScalar - Convert all of the users of the specified load 1387/// to use the new alloca directly, returning the value that should replace the 1388/// load. This happens when we are converting an "integer union" to a single 1389/// integer scalar, or when we are converting a "vector union" to a vector with 1390/// insert/extractelement instructions. 1391/// 1392/// Offset is an offset from the original alloca, in bits that need to be 1393/// shifted to the right. By the end of this, there should be no uses of Ptr. 1394Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 1395 uint64_t Offset, IRBuilder<> &Builder) { 1396 // The load is a bit extract from NewAI shifted right by Offset bits. 1397 Value *NV = Builder.CreateLoad(NewAI, "tmp"); 1398 1399 // If the load is of the whole new alloca, no conversion is needed. 1400 if (NV->getType() == LI->getType() && Offset == 0) 1401 return NV; 1402 1403 // If the result alloca is a vector type, this is either an element 1404 // access or a bitcast to another vector type of the same size. 1405 if (const VectorType *VTy = dyn_cast<VectorType>(NV->getType())) { 1406 if (isa<VectorType>(LI->getType())) 1407 return Builder.CreateBitCast(NV, LI->getType(), "tmp"); 1408 1409 // Otherwise it must be an element access. 1410 unsigned Elt = 0; 1411 if (Offset) { 1412 unsigned EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType()); 1413 Elt = Offset/EltSize; 1414 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); 1415 } 1416 // Return the element extracted out of it. 1417 Value *V = Builder.CreateExtractElement(NV, 1418 ConstantInt::get(Type::Int32Ty,Elt), 1419 "tmp"); 1420 if (V->getType() != LI->getType()) 1421 V = Builder.CreateBitCast(V, LI->getType(), "tmp"); 1422 return V; 1423 } 1424 1425 // Otherwise, this must be a union that was converted to an integer value. 1426 const IntegerType *NTy = cast<IntegerType>(NV->getType()); 1427 1428 // If this is a big-endian system and the load is narrower than the 1429 // full alloca type, we need to do a shift to get the right bits. 1430 int ShAmt = 0; 1431 if (TD->isBigEndian()) { 1432 // On big-endian machines, the lowest bit is stored at the bit offset 1433 // from the pointer given by getTypeStoreSizeInBits. This matters for 1434 // integers with a bitwidth that is not a multiple of 8. 1435 ShAmt = TD->getTypeStoreSizeInBits(NTy) - 1436 TD->getTypeStoreSizeInBits(LI->getType()) - Offset; 1437 } else { 1438 ShAmt = Offset; 1439 } 1440 1441 // Note: we support negative bitwidths (with shl) which are not defined. 1442 // We do this to support (f.e.) loads off the end of a structure where 1443 // only some bits are used. 1444 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1445 NV = Builder.CreateLShr(NV, ConstantInt::get(NV->getType(), ShAmt), "tmp"); 1446 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1447 NV = Builder.CreateShl(NV, ConstantInt::get(NV->getType(), -ShAmt), "tmp"); 1448 1449 // Finally, unconditionally truncate the integer to the right width. 1450 unsigned LIBitWidth = TD->getTypeSizeInBits(LI->getType()); 1451 if (LIBitWidth < NTy->getBitWidth()) 1452 NV = Builder.CreateTrunc(NV, IntegerType::get(LIBitWidth), "tmp"); 1453 else if (LIBitWidth > NTy->getBitWidth()) 1454 NV = Builder.CreateZExt(NV, IntegerType::get(LIBitWidth), "tmp"); 1455 1456 // If the result is an integer, this is a trunc or bitcast. 1457 if (isa<IntegerType>(LI->getType())) { 1458 // Should be done. 1459 } else if (LI->getType()->isFloatingPoint() || 1460 isa<VectorType>(LI->getType())) { 1461 // Just do a bitcast, we know the sizes match up. 1462 NV = Builder.CreateBitCast(NV, LI->getType(), "tmp"); 1463 } else { 1464 // Otherwise must be a pointer. 1465 NV = Builder.CreateIntToPtr(NV, LI->getType(), "tmp"); 1466 } 1467 assert(NV->getType() == LI->getType() && "Didn't convert right?"); 1468 return NV; 1469} 1470 1471 1472/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer 1473/// or vector value "Old" at the offset specified by Offset. 1474/// 1475/// This happens when we are converting an "integer union" to a 1476/// single integer scalar, or when we are converting a "vector union" to a 1477/// vector with insert/extractelement instructions. 1478/// 1479/// Offset is an offset from the original alloca, in bits that need to be 1480/// shifted to the right. 1481Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, 1482 uint64_t Offset, IRBuilder<> &Builder) { 1483 1484 // Convert the stored type to the actual type, shift it left to insert 1485 // then 'or' into place. 1486 const Type *AllocaType = Old->getType(); 1487 1488 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { 1489 // If the result alloca is a vector type, this is either an element 1490 // access or a bitcast to another vector type. 1491 if (isa<VectorType>(SV->getType())) { 1492 SV = Builder.CreateBitCast(SV, AllocaType, "tmp"); 1493 } else { 1494 // Must be an element insertion. 1495 unsigned Elt = Offset/TD->getTypePaddedSizeInBits(VTy->getElementType()); 1496 1497 if (SV->getType() != VTy->getElementType()) 1498 SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp"); 1499 1500 SV = Builder.CreateInsertElement(Old, SV, 1501 ConstantInt::get(Type::Int32Ty, Elt), 1502 "tmp"); 1503 } 1504 return SV; 1505 } 1506 1507 // If SV is a first-class aggregate value, insert each value recursively. 1508 if (const StructType *ST = dyn_cast<StructType>(SV->getType())) { 1509 const StructLayout &Layout = *TD->getStructLayout(ST); 1510 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1511 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1512 Old = ConvertScalar_InsertValue(Elt, Old, 1513 Offset+Layout.getElementOffset(i), 1514 Builder); 1515 } 1516 return Old; 1517 } 1518 1519 if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { 1520 uint64_t EltSize = TD->getTypePaddedSizeInBits(AT->getElementType()); 1521 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 1522 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1523 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder); 1524 } 1525 return Old; 1526 } 1527 1528 // If SV is a float, convert it to the appropriate integer type. 1529 // If it is a pointer, do the same. 1530 unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType()); 1531 unsigned DestWidth = TD->getTypeSizeInBits(AllocaType); 1532 unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType()); 1533 unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType); 1534 if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType())) 1535 SV = Builder.CreateBitCast(SV, IntegerType::get(SrcWidth), "tmp"); 1536 else if (isa<PointerType>(SV->getType())) 1537 SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(), "tmp"); 1538 1539 // Zero extend or truncate the value if needed. 1540 if (SV->getType() != AllocaType) { 1541 if (SV->getType()->getPrimitiveSizeInBits() < 1542 AllocaType->getPrimitiveSizeInBits()) 1543 SV = Builder.CreateZExt(SV, AllocaType, "tmp"); 1544 else { 1545 // Truncation may be needed if storing more than the alloca can hold 1546 // (undefined behavior). 1547 SV = Builder.CreateTrunc(SV, AllocaType, "tmp"); 1548 SrcWidth = DestWidth; 1549 SrcStoreWidth = DestStoreWidth; 1550 } 1551 } 1552 1553 // If this is a big-endian system and the store is narrower than the 1554 // full alloca type, we need to do a shift to get the right bits. 1555 int ShAmt = 0; 1556 if (TD->isBigEndian()) { 1557 // On big-endian machines, the lowest bit is stored at the bit offset 1558 // from the pointer given by getTypeStoreSizeInBits. This matters for 1559 // integers with a bitwidth that is not a multiple of 8. 1560 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 1561 } else { 1562 ShAmt = Offset; 1563 } 1564 1565 // Note: we support negative bitwidths (with shr) which are not defined. 1566 // We do this to support (f.e.) stores off the end of a structure where 1567 // only some bits in the structure are set. 1568 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1569 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1570 SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), ShAmt), "tmp"); 1571 Mask <<= ShAmt; 1572 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1573 SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), -ShAmt), "tmp"); 1574 Mask = Mask.lshr(-ShAmt); 1575 } 1576 1577 // Mask out the bits we are about to insert from the old value, and or 1578 // in the new bits. 1579 if (SrcWidth != DestWidth) { 1580 assert(DestWidth > SrcWidth); 1581 Old = Builder.CreateAnd(Old, ConstantInt::get(~Mask), "mask"); 1582 SV = Builder.CreateOr(Old, SV, "ins"); 1583 } 1584 return SV; 1585} 1586 1587 1588 1589/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1590/// some part of a constant global variable. This intentionally only accepts 1591/// constant expressions because we don't can't rewrite arbitrary instructions. 1592static bool PointsToConstantGlobal(Value *V) { 1593 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1594 return GV->isConstant(); 1595 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1596 if (CE->getOpcode() == Instruction::BitCast || 1597 CE->getOpcode() == Instruction::GetElementPtr) 1598 return PointsToConstantGlobal(CE->getOperand(0)); 1599 return false; 1600} 1601 1602/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1603/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1604/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1605/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1606/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1607/// the alloca, and if the source pointer is a pointer to a constant global, we 1608/// can optimize this. 1609static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1610 bool isOffset) { 1611 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1612 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 1613 // Ignore non-volatile loads, they are always ok. 1614 if (!LI->isVolatile()) 1615 continue; 1616 1617 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1618 // If uses of the bitcast are ok, we are ok. 1619 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1620 return false; 1621 continue; 1622 } 1623 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1624 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1625 // doesn't, it does. 1626 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1627 isOffset || !GEP->hasAllZeroIndices())) 1628 return false; 1629 continue; 1630 } 1631 1632 // If this is isn't our memcpy/memmove, reject it as something we can't 1633 // handle. 1634 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1635 return false; 1636 1637 // If we already have seen a copy, reject the second one. 1638 if (TheCopy) return false; 1639 1640 // If the pointer has been offset from the start of the alloca, we can't 1641 // safely handle this. 1642 if (isOffset) return false; 1643 1644 // If the memintrinsic isn't using the alloca as the dest, reject it. 1645 if (UI.getOperandNo() != 1) return false; 1646 1647 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1648 1649 // If the source of the memcpy/move is not a constant global, reject it. 1650 if (!PointsToConstantGlobal(MI->getOperand(2))) 1651 return false; 1652 1653 // Otherwise, the transform is safe. Remember the copy instruction. 1654 TheCopy = MI; 1655 } 1656 return true; 1657} 1658 1659/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1660/// modified by a copy from a constant global. If we can prove this, we can 1661/// replace any uses of the alloca with uses of the global directly. 1662Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1663 Instruction *TheCopy = 0; 1664 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1665 return TheCopy; 1666 return 0; 1667} 1668