ScalarReplAggregates.cpp revision 1a3257bbf53eff4c7cfcbef972dd382f7baa7592
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/Compiler.h" 38#include "llvm/ADT/SmallVector.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/ADT/StringExtras.h" 41using namespace llvm; 42 43STATISTIC(NumReplaced, "Number of allocas broken up"); 44STATISTIC(NumPromoted, "Number of allocas promoted"); 45STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 46STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 47 48namespace { 49 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 50 static char ID; // Pass identification, replacement for typeid 51 explicit SROA(signed T = -1) : FunctionPass(&ID) { 52 if (T == -1) 53 SRThreshold = 128; 54 else 55 SRThreshold = T; 56 } 57 58 bool runOnFunction(Function &F); 59 60 bool performScalarRepl(Function &F); 61 bool performPromotion(Function &F); 62 63 // getAnalysisUsage - This pass does not require any passes, but we know it 64 // will not alter the CFG, so say so. 65 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 66 AU.addRequired<DominatorTree>(); 67 AU.addRequired<DominanceFrontier>(); 68 AU.addRequired<TargetData>(); 69 AU.setPreservesCFG(); 70 } 71 72 private: 73 TargetData *TD; 74 75 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 76 /// information about the uses. All these fields are initialized to false 77 /// and set to true when something is learned. 78 struct AllocaInfo { 79 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 80 bool isUnsafe : 1; 81 82 /// needsCanon - This is set to true if there is some use of the alloca 83 /// that requires canonicalization. 84 bool needsCanon : 1; 85 86 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 87 bool isMemCpySrc : 1; 88 89 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 90 bool isMemCpyDst : 1; 91 92 AllocaInfo() 93 : isUnsafe(false), needsCanon(false), 94 isMemCpySrc(false), isMemCpyDst(false) {} 95 }; 96 97 unsigned SRThreshold; 98 99 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 100 101 int isSafeAllocaToScalarRepl(AllocationInst *AI); 102 103 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 104 AllocaInfo &Info); 105 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 106 AllocaInfo &Info); 107 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 108 unsigned OpNo, AllocaInfo &Info); 109 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, 110 AllocaInfo &Info); 111 112 void DoScalarReplacement(AllocationInst *AI, 113 std::vector<AllocationInst*> &WorkList); 114 void CanonicalizeAllocaUsers(AllocationInst *AI); 115 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 116 117 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 118 SmallVector<AllocaInst*, 32> &NewElts); 119 120 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 121 AllocationInst *AI, 122 SmallVector<AllocaInst*, 32> &NewElts); 123 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI, 124 SmallVector<AllocaInst*, 32> &NewElts); 125 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 126 SmallVector<AllocaInst*, 32> &NewElts); 127 128 bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 129 bool &SawVec, uint64_t Offset, unsigned AllocaSize); 130 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset); 131 Value *ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 132 uint64_t Offset); 133 Value *ConvertUsesOfStoreToScalar(Value *StoredVal, AllocaInst *NewAI, 134 uint64_t Offset, Instruction *InsertPt); 135 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 136 }; 137} 138 139char SROA::ID = 0; 140static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 141 142// Public interface to the ScalarReplAggregates pass 143FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 144 return new SROA(Threshold); 145} 146 147 148bool SROA::runOnFunction(Function &F) { 149 TD = &getAnalysis<TargetData>(); 150 151 bool Changed = performPromotion(F); 152 while (1) { 153 bool LocalChange = performScalarRepl(F); 154 if (!LocalChange) break; // No need to repromote if no scalarrepl 155 Changed = true; 156 LocalChange = performPromotion(F); 157 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 158 } 159 160 return Changed; 161} 162 163 164bool SROA::performPromotion(Function &F) { 165 std::vector<AllocaInst*> Allocas; 166 DominatorTree &DT = getAnalysis<DominatorTree>(); 167 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 168 169 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 170 171 bool Changed = false; 172 173 while (1) { 174 Allocas.clear(); 175 176 // Find allocas that are safe to promote, by looking at all instructions in 177 // the entry node 178 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 179 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 180 if (isAllocaPromotable(AI)) 181 Allocas.push_back(AI); 182 183 if (Allocas.empty()) break; 184 185 PromoteMemToReg(Allocas, DT, DF); 186 NumPromoted += Allocas.size(); 187 Changed = true; 188 } 189 190 return Changed; 191} 192 193/// getNumSAElements - Return the number of elements in the specific struct or 194/// array. 195static uint64_t getNumSAElements(const Type *T) { 196 if (const StructType *ST = dyn_cast<StructType>(T)) 197 return ST->getNumElements(); 198 return cast<ArrayType>(T)->getNumElements(); 199} 200 201// performScalarRepl - This algorithm is a simple worklist driven algorithm, 202// which runs on all of the malloc/alloca instructions in the function, removing 203// them if they are only used by getelementptr instructions. 204// 205bool SROA::performScalarRepl(Function &F) { 206 std::vector<AllocationInst*> WorkList; 207 208 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 209 BasicBlock &BB = F.getEntryBlock(); 210 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 211 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 212 WorkList.push_back(A); 213 214 // Process the worklist 215 bool Changed = false; 216 while (!WorkList.empty()) { 217 AllocationInst *AI = WorkList.back(); 218 WorkList.pop_back(); 219 220 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 221 // with unused elements. 222 if (AI->use_empty()) { 223 AI->eraseFromParent(); 224 continue; 225 } 226 227 // If this alloca is impossible for us to promote, reject it early. 228 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) 229 continue; 230 231 // Check to see if this allocation is only modified by a memcpy/memmove from 232 // a constant global. If this is the case, we can change all users to use 233 // the constant global instead. This is commonly produced by the CFE by 234 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 235 // is only subsequently read. 236 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 237 DOUT << "Found alloca equal to global: " << *AI; 238 DOUT << " memcpy = " << *TheCopy; 239 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 240 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 241 TheCopy->eraseFromParent(); // Don't mutate the global. 242 AI->eraseFromParent(); 243 ++NumGlobals; 244 Changed = true; 245 continue; 246 } 247 248 // Check to see if we can perform the core SROA transformation. We cannot 249 // transform the allocation instruction if it is an array allocation 250 // (allocations OF arrays are ok though), and an allocation of a scalar 251 // value cannot be decomposed at all. 252 uint64_t AllocaSize = TD->getTypePaddedSize(AI->getAllocatedType()); 253 254 if ((isa<StructType>(AI->getAllocatedType()) || 255 isa<ArrayType>(AI->getAllocatedType())) && 256 // Do not promote any struct whose size is too big. 257 AllocaSize < SRThreshold && 258 // Do not promote any struct into more than "32" separate vars. 259 getNumSAElements(AI->getAllocatedType()) < SRThreshold/4) { 260 // Check that all of the users of the allocation are capable of being 261 // transformed. 262 switch (isSafeAllocaToScalarRepl(AI)) { 263 default: assert(0 && "Unexpected value!"); 264 case 0: // Not safe to scalar replace. 265 break; 266 case 1: // Safe, but requires cleanup/canonicalizations first 267 CanonicalizeAllocaUsers(AI); 268 // FALL THROUGH. 269 case 3: // Safe to scalar replace. 270 DoScalarReplacement(AI, WorkList); 271 Changed = true; 272 continue; 273 } 274 } 275 276 // If we can turn this aggregate value (potentially with casts) into a 277 // simple scalar value that can be mem2reg'd into a register value. 278 // IsNotTrivial tracks whether this is something that mem2reg could have 279 // promoted itself. If so, we don't want to transform it needlessly. Note 280 // that we can't just check based on the type: the alloca may be of an i32 281 // but that has pointer arithmetic to set byte 3 of it or something. 282 bool IsNotTrivial = false; 283 const Type *VectorTy = 0; 284 bool HadAVector = false; 285 if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector, 286 0, unsigned(AllocaSize)) && IsNotTrivial) { 287 AllocaInst *NewAI; 288 // If we were able to find a vector type that can handle this with 289 // insert/extract elements, and if there was at least one use that had 290 // a vector type, promote this to a vector. We don't want to promote 291 // random stuff that doesn't use vectors (e.g. <9 x double>) because then 292 // we just get a lot of insert/extracts. If at least one vector is 293 // involved, then we probably really do have a union of vector/array. 294 if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) { 295 DOUT << "CONVERT TO VECTOR: " << *AI << " TYPE = " << *VectorTy <<"\n"; 296 297 // Create and insert the vector alloca. 298 NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin()); 299 ConvertUsesToScalar(AI, NewAI, 0); 300 } else { 301 DOUT << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"; 302 303 // Create and insert the integer alloca. 304 const Type *NewTy = IntegerType::get(AllocaSize*8); 305 NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin()); 306 ConvertUsesToScalar(AI, NewAI, 0); 307 } 308 NewAI->takeName(AI); 309 AI->eraseFromParent(); 310 ++NumConverted; 311 Changed = true; 312 continue; 313 } 314 315 // Otherwise, couldn't process this alloca. 316 } 317 318 return Changed; 319} 320 321/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 322/// predicate, do SROA now. 323void SROA::DoScalarReplacement(AllocationInst *AI, 324 std::vector<AllocationInst*> &WorkList) { 325 DOUT << "Found inst to SROA: " << *AI; 326 SmallVector<AllocaInst*, 32> ElementAllocas; 327 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 328 ElementAllocas.reserve(ST->getNumContainedTypes()); 329 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 330 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 331 AI->getAlignment(), 332 AI->getName() + "." + utostr(i), AI); 333 ElementAllocas.push_back(NA); 334 WorkList.push_back(NA); // Add to worklist for recursive processing 335 } 336 } else { 337 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 338 ElementAllocas.reserve(AT->getNumElements()); 339 const Type *ElTy = AT->getElementType(); 340 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 341 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 342 AI->getName() + "." + utostr(i), AI); 343 ElementAllocas.push_back(NA); 344 WorkList.push_back(NA); // Add to worklist for recursive processing 345 } 346 } 347 348 // Now that we have created the alloca instructions that we want to use, 349 // expand the getelementptr instructions to use them. 350 // 351 while (!AI->use_empty()) { 352 Instruction *User = cast<Instruction>(AI->use_back()); 353 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 354 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 355 BCInst->eraseFromParent(); 356 continue; 357 } 358 359 // Replace: 360 // %res = load { i32, i32 }* %alloc 361 // with: 362 // %load.0 = load i32* %alloc.0 363 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 364 // %load.1 = load i32* %alloc.1 365 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 366 // (Also works for arrays instead of structs) 367 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 368 Value *Insert = UndefValue::get(LI->getType()); 369 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 370 Value *Load = new LoadInst(ElementAllocas[i], "load", LI); 371 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); 372 } 373 LI->replaceAllUsesWith(Insert); 374 LI->eraseFromParent(); 375 continue; 376 } 377 378 // Replace: 379 // store { i32, i32 } %val, { i32, i32 }* %alloc 380 // with: 381 // %val.0 = extractvalue { i32, i32 } %val, 0 382 // store i32 %val.0, i32* %alloc.0 383 // %val.1 = extractvalue { i32, i32 } %val, 1 384 // store i32 %val.1, i32* %alloc.1 385 // (Also works for arrays instead of structs) 386 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 387 Value *Val = SI->getOperand(0); 388 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 389 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); 390 new StoreInst(Extract, ElementAllocas[i], SI); 391 } 392 SI->eraseFromParent(); 393 continue; 394 } 395 396 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 397 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 398 unsigned Idx = 399 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 400 401 assert(Idx < ElementAllocas.size() && "Index out of range?"); 402 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 403 404 Value *RepValue; 405 if (GEPI->getNumOperands() == 3) { 406 // Do not insert a new getelementptr instruction with zero indices, only 407 // to have it optimized out later. 408 RepValue = AllocaToUse; 409 } else { 410 // We are indexing deeply into the structure, so we still need a 411 // getelement ptr instruction to finish the indexing. This may be 412 // expanded itself once the worklist is rerun. 413 // 414 SmallVector<Value*, 8> NewArgs; 415 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 416 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 417 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), 418 NewArgs.end(), "", GEPI); 419 RepValue->takeName(GEPI); 420 } 421 422 // If this GEP is to the start of the aggregate, check for memcpys. 423 if (Idx == 0 && GEPI->hasAllZeroIndices()) 424 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 425 426 // Move all of the users over to the new GEP. 427 GEPI->replaceAllUsesWith(RepValue); 428 // Delete the old GEP 429 GEPI->eraseFromParent(); 430 } 431 432 // Finally, delete the Alloca instruction 433 AI->eraseFromParent(); 434 NumReplaced++; 435} 436 437 438/// isSafeElementUse - Check to see if this use is an allowed use for a 439/// getelementptr instruction of an array aggregate allocation. isFirstElt 440/// indicates whether Ptr is known to the start of the aggregate. 441/// 442void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 443 AllocaInfo &Info) { 444 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 445 I != E; ++I) { 446 Instruction *User = cast<Instruction>(*I); 447 switch (User->getOpcode()) { 448 case Instruction::Load: break; 449 case Instruction::Store: 450 // Store is ok if storing INTO the pointer, not storing the pointer 451 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 452 break; 453 case Instruction::GetElementPtr: { 454 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 455 bool AreAllZeroIndices = isFirstElt; 456 if (GEP->getNumOperands() > 1) { 457 if (!isa<ConstantInt>(GEP->getOperand(1)) || 458 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 459 // Using pointer arithmetic to navigate the array. 460 return MarkUnsafe(Info); 461 462 if (AreAllZeroIndices) 463 AreAllZeroIndices = GEP->hasAllZeroIndices(); 464 } 465 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 466 if (Info.isUnsafe) return; 467 break; 468 } 469 case Instruction::BitCast: 470 if (isFirstElt) { 471 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 472 if (Info.isUnsafe) return; 473 break; 474 } 475 DOUT << " Transformation preventing inst: " << *User; 476 return MarkUnsafe(Info); 477 case Instruction::Call: 478 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 479 if (isFirstElt) { 480 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 481 if (Info.isUnsafe) return; 482 break; 483 } 484 } 485 DOUT << " Transformation preventing inst: " << *User; 486 return MarkUnsafe(Info); 487 default: 488 DOUT << " Transformation preventing inst: " << *User; 489 return MarkUnsafe(Info); 490 } 491 } 492 return; // All users look ok :) 493} 494 495/// AllUsersAreLoads - Return true if all users of this value are loads. 496static bool AllUsersAreLoads(Value *Ptr) { 497 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 498 I != E; ++I) 499 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 500 return false; 501 return true; 502} 503 504/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 505/// aggregate allocation. 506/// 507void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 508 AllocaInfo &Info) { 509 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 510 return isSafeUseOfBitCastedAllocation(C, AI, Info); 511 512 if (LoadInst *LI = dyn_cast<LoadInst>(User)) 513 if (!LI->isVolatile()) 514 return;// Loads (returning a first class aggregrate) are always rewritable 515 516 if (StoreInst *SI = dyn_cast<StoreInst>(User)) 517 if (!SI->isVolatile() && SI->getOperand(0) != AI) 518 return;// Store is ok if storing INTO the pointer, not storing the pointer 519 520 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 521 if (GEPI == 0) 522 return MarkUnsafe(Info); 523 524 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 525 526 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 527 if (I == E || 528 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 529 return MarkUnsafe(Info); 530 } 531 532 ++I; 533 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 534 535 bool IsAllZeroIndices = true; 536 537 // If the first index is a non-constant index into an array, see if we can 538 // handle it as a special case. 539 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 540 if (!isa<ConstantInt>(I.getOperand())) { 541 IsAllZeroIndices = 0; 542 uint64_t NumElements = AT->getNumElements(); 543 544 // If this is an array index and the index is not constant, we cannot 545 // promote... that is unless the array has exactly one or two elements in 546 // it, in which case we CAN promote it, but we have to canonicalize this 547 // out if this is the only problem. 548 if ((NumElements == 1 || NumElements == 2) && 549 AllUsersAreLoads(GEPI)) { 550 Info.needsCanon = true; 551 return; // Canonicalization required! 552 } 553 return MarkUnsafe(Info); 554 } 555 } 556 557 // Walk through the GEP type indices, checking the types that this indexes 558 // into. 559 for (; I != E; ++I) { 560 // Ignore struct elements, no extra checking needed for these. 561 if (isa<StructType>(*I)) 562 continue; 563 564 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 565 if (!IdxVal) return MarkUnsafe(Info); 566 567 // Are all indices still zero? 568 IsAllZeroIndices &= IdxVal->isZero(); 569 570 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 571 // This GEP indexes an array. Verify that this is an in-range constant 572 // integer. Specifically, consider A[0][i]. We cannot know that the user 573 // isn't doing invalid things like allowing i to index an out-of-range 574 // subscript that accesses A[1]. Because of this, we have to reject SROA 575 // of any accesses into structs where any of the components are variables. 576 if (IdxVal->getZExtValue() >= AT->getNumElements()) 577 return MarkUnsafe(Info); 578 } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) { 579 if (IdxVal->getZExtValue() >= VT->getNumElements()) 580 return MarkUnsafe(Info); 581 } 582 } 583 584 // If there are any non-simple uses of this getelementptr, make sure to reject 585 // them. 586 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 587} 588 589/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 590/// intrinsic can be promoted by SROA. At this point, we know that the operand 591/// of the memintrinsic is a pointer to the beginning of the allocation. 592void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 593 unsigned OpNo, AllocaInfo &Info) { 594 // If not constant length, give up. 595 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 596 if (!Length) return MarkUnsafe(Info); 597 598 // If not the whole aggregate, give up. 599 if (Length->getZExtValue() != 600 TD->getTypePaddedSize(AI->getType()->getElementType())) 601 return MarkUnsafe(Info); 602 603 // We only know about memcpy/memset/memmove. 604 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 605 return MarkUnsafe(Info); 606 607 // Otherwise, we can transform it. Determine whether this is a memcpy/set 608 // into or out of the aggregate. 609 if (OpNo == 1) 610 Info.isMemCpyDst = true; 611 else { 612 assert(OpNo == 2); 613 Info.isMemCpySrc = true; 614 } 615} 616 617/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 618/// are 619void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, 620 AllocaInfo &Info) { 621 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 622 UI != E; ++UI) { 623 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 624 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 625 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 626 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 627 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 628 if (SI->isVolatile()) 629 return MarkUnsafe(Info); 630 631 // If storing the entire alloca in one chunk through a bitcasted pointer 632 // to integer, we can transform it. This happens (for example) when you 633 // cast a {i32,i32}* to i64* and store through it. This is similar to the 634 // memcpy case and occurs in various "byval" cases and emulated memcpys. 635 if (isa<IntegerType>(SI->getOperand(0)->getType()) && 636 TD->getTypePaddedSize(SI->getOperand(0)->getType()) == 637 TD->getTypePaddedSize(AI->getType()->getElementType())) { 638 Info.isMemCpyDst = true; 639 continue; 640 } 641 return MarkUnsafe(Info); 642 } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { 643 if (LI->isVolatile()) 644 return MarkUnsafe(Info); 645 646 // If loading the entire alloca in one chunk through a bitcasted pointer 647 // to integer, we can transform it. This happens (for example) when you 648 // cast a {i32,i32}* to i64* and load through it. This is similar to the 649 // memcpy case and occurs in various "byval" cases and emulated memcpys. 650 if (isa<IntegerType>(LI->getType()) && 651 TD->getTypePaddedSize(LI->getType()) == 652 TD->getTypePaddedSize(AI->getType()->getElementType())) { 653 Info.isMemCpySrc = true; 654 continue; 655 } 656 return MarkUnsafe(Info); 657 } else { 658 return MarkUnsafe(Info); 659 } 660 if (Info.isUnsafe) return; 661 } 662} 663 664/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 665/// to its first element. Transform users of the cast to use the new values 666/// instead. 667void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 668 SmallVector<AllocaInst*, 32> &NewElts) { 669 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 670 while (UI != UE) { 671 Instruction *User = cast<Instruction>(*UI++); 672 if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) { 673 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 674 if (BCU->use_empty()) BCU->eraseFromParent(); 675 continue; 676 } 677 678 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 679 // This must be memcpy/memmove/memset of the entire aggregate. 680 // Split into one per element. 681 RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts); 682 continue; 683 } 684 685 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 686 // If this is a store of the entire alloca from an integer, rewrite it. 687 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); 688 continue; 689 } 690 691 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 692 // If this is a load of the entire alloca to an integer, rewrite it. 693 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); 694 continue; 695 } 696 697 // Otherwise it must be some other user of a gep of the first pointer. Just 698 // leave these alone. 699 continue; 700 } 701} 702 703/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. 704/// Rewrite it to copy or set the elements of the scalarized memory. 705void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 706 AllocationInst *AI, 707 SmallVector<AllocaInst*, 32> &NewElts) { 708 709 // If this is a memcpy/memmove, construct the other pointer as the 710 // appropriate type. 711 Value *OtherPtr = 0; 712 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 713 if (BCInst == MCI->getRawDest()) 714 OtherPtr = MCI->getRawSource(); 715 else { 716 assert(BCInst == MCI->getRawSource()); 717 OtherPtr = MCI->getRawDest(); 718 } 719 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 720 if (BCInst == MMI->getRawDest()) 721 OtherPtr = MMI->getRawSource(); 722 else { 723 assert(BCInst == MMI->getRawSource()); 724 OtherPtr = MMI->getRawDest(); 725 } 726 } 727 728 // If there is an other pointer, we want to convert it to the same pointer 729 // type as AI has, so we can GEP through it safely. 730 if (OtherPtr) { 731 // It is likely that OtherPtr is a bitcast, if so, remove it. 732 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 733 OtherPtr = BC->getOperand(0); 734 // All zero GEPs are effectively bitcasts. 735 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) 736 if (GEP->hasAllZeroIndices()) 737 OtherPtr = GEP->getOperand(0); 738 739 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 740 if (BCE->getOpcode() == Instruction::BitCast) 741 OtherPtr = BCE->getOperand(0); 742 743 // If the pointer is not the right type, insert a bitcast to the right 744 // type. 745 if (OtherPtr->getType() != AI->getType()) 746 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 747 MI); 748 } 749 750 // Process each element of the aggregate. 751 Value *TheFn = MI->getOperand(0); 752 const Type *BytePtrTy = MI->getRawDest()->getType(); 753 bool SROADest = MI->getRawDest() == BCInst; 754 755 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 756 757 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 758 // If this is a memcpy/memmove, emit a GEP of the other element address. 759 Value *OtherElt = 0; 760 if (OtherPtr) { 761 Value *Idx[2] = { Zero, ConstantInt::get(Type::Int32Ty, i) }; 762 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, 763 OtherPtr->getNameStr()+"."+utostr(i), 764 MI); 765 } 766 767 Value *EltPtr = NewElts[i]; 768 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 769 770 // If we got down to a scalar, insert a load or store as appropriate. 771 if (EltTy->isSingleValueType()) { 772 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 773 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 774 MI); 775 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 776 continue; 777 } 778 assert(isa<MemSetInst>(MI)); 779 780 // If the stored element is zero (common case), just store a null 781 // constant. 782 Constant *StoreVal; 783 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 784 if (CI->isZero()) { 785 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 786 } else { 787 // If EltTy is a vector type, get the element type. 788 const Type *ValTy = EltTy; 789 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 790 ValTy = VTy->getElementType(); 791 792 // Construct an integer with the right value. 793 unsigned EltSize = TD->getTypeSizeInBits(ValTy); 794 APInt OneVal(EltSize, CI->getZExtValue()); 795 APInt TotalVal(OneVal); 796 // Set each byte. 797 for (unsigned i = 0; 8*i < EltSize; ++i) { 798 TotalVal = TotalVal.shl(8); 799 TotalVal |= OneVal; 800 } 801 802 // Convert the integer value to the appropriate type. 803 StoreVal = ConstantInt::get(TotalVal); 804 if (isa<PointerType>(ValTy)) 805 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 806 else if (ValTy->isFloatingPoint()) 807 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 808 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 809 810 // If the requested value was a vector constant, create it. 811 if (EltTy != ValTy) { 812 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 813 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 814 StoreVal = ConstantVector::get(&Elts[0], NumElts); 815 } 816 } 817 new StoreInst(StoreVal, EltPtr, MI); 818 continue; 819 } 820 // Otherwise, if we're storing a byte variable, use a memset call for 821 // this element. 822 } 823 824 // Cast the element pointer to BytePtrTy. 825 if (EltPtr->getType() != BytePtrTy) 826 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 827 828 // Cast the other pointer (if we have one) to BytePtrTy. 829 if (OtherElt && OtherElt->getType() != BytePtrTy) 830 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 831 MI); 832 833 unsigned EltSize = TD->getTypePaddedSize(EltTy); 834 835 // Finally, insert the meminst for this element. 836 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 837 Value *Ops[] = { 838 SROADest ? EltPtr : OtherElt, // Dest ptr 839 SROADest ? OtherElt : EltPtr, // Src ptr 840 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 841 Zero // Align 842 }; 843 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 844 } else { 845 assert(isa<MemSetInst>(MI)); 846 Value *Ops[] = { 847 EltPtr, MI->getOperand(2), // Dest, Value, 848 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 849 Zero // Align 850 }; 851 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 852 } 853 } 854 MI->eraseFromParent(); 855} 856 857/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that 858/// overwrites the entire allocation. Extract out the pieces of the stored 859/// integer and store them individually. 860void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, 861 AllocationInst *AI, 862 SmallVector<AllocaInst*, 32> &NewElts){ 863 // Extract each element out of the integer according to its structure offset 864 // and store the element value to the individual alloca. 865 Value *SrcVal = SI->getOperand(0); 866 const Type *AllocaEltTy = AI->getType()->getElementType(); 867 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); 868 869 // If this isn't a store of an integer to the whole alloca, it may be a store 870 // to the first element. Just ignore the store in this case and normal SROA 871 // will handle it. 872 if (!isa<IntegerType>(SrcVal->getType()) || 873 TD->getTypePaddedSizeInBits(SrcVal->getType()) != AllocaSizeBits) 874 return; 875 876 DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI; 877 878 // There are two forms here: AI could be an array or struct. Both cases 879 // have different ways to compute the element offset. 880 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 881 const StructLayout *Layout = TD->getStructLayout(EltSTy); 882 883 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 884 // Get the number of bits to shift SrcVal to get the value. 885 const Type *FieldTy = EltSTy->getElementType(i); 886 uint64_t Shift = Layout->getElementOffsetInBits(i); 887 888 if (TD->isBigEndian()) 889 Shift = AllocaSizeBits-Shift-TD->getTypePaddedSizeInBits(FieldTy); 890 891 Value *EltVal = SrcVal; 892 if (Shift) { 893 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 894 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 895 "sroa.store.elt", SI); 896 } 897 898 // Truncate down to an integer of the right size. 899 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 900 901 // Ignore zero sized fields like {}, they obviously contain no data. 902 if (FieldSizeBits == 0) continue; 903 904 if (FieldSizeBits != AllocaSizeBits) 905 EltVal = new TruncInst(EltVal, IntegerType::get(FieldSizeBits), "", SI); 906 Value *DestField = NewElts[i]; 907 if (EltVal->getType() == FieldTy) { 908 // Storing to an integer field of this size, just do it. 909 } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) { 910 // Bitcast to the right element type (for fp/vector values). 911 EltVal = new BitCastInst(EltVal, FieldTy, "", SI); 912 } else { 913 // Otherwise, bitcast the dest pointer (for aggregates). 914 DestField = new BitCastInst(DestField, 915 PointerType::getUnqual(EltVal->getType()), 916 "", SI); 917 } 918 new StoreInst(EltVal, DestField, SI); 919 } 920 921 } else { 922 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy); 923 const Type *ArrayEltTy = ATy->getElementType(); 924 uint64_t ElementOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); 925 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); 926 927 uint64_t Shift; 928 929 if (TD->isBigEndian()) 930 Shift = AllocaSizeBits-ElementOffset; 931 else 932 Shift = 0; 933 934 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 935 // Ignore zero sized fields like {}, they obviously contain no data. 936 if (ElementSizeBits == 0) continue; 937 938 Value *EltVal = SrcVal; 939 if (Shift) { 940 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 941 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 942 "sroa.store.elt", SI); 943 } 944 945 // Truncate down to an integer of the right size. 946 if (ElementSizeBits != AllocaSizeBits) 947 EltVal = new TruncInst(EltVal, IntegerType::get(ElementSizeBits),"",SI); 948 Value *DestField = NewElts[i]; 949 if (EltVal->getType() == ArrayEltTy) { 950 // Storing to an integer field of this size, just do it. 951 } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) { 952 // Bitcast to the right element type (for fp/vector values). 953 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI); 954 } else { 955 // Otherwise, bitcast the dest pointer (for aggregates). 956 DestField = new BitCastInst(DestField, 957 PointerType::getUnqual(EltVal->getType()), 958 "", SI); 959 } 960 new StoreInst(EltVal, DestField, SI); 961 962 if (TD->isBigEndian()) 963 Shift -= ElementOffset; 964 else 965 Shift += ElementOffset; 966 } 967 } 968 969 SI->eraseFromParent(); 970} 971 972/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to 973/// an integer. Load the individual pieces to form the aggregate value. 974void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 975 SmallVector<AllocaInst*, 32> &NewElts) { 976 // Extract each element out of the NewElts according to its structure offset 977 // and form the result value. 978 const Type *AllocaEltTy = AI->getType()->getElementType(); 979 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); 980 981 // If this isn't a load of the whole alloca to an integer, it may be a load 982 // of the first element. Just ignore the load in this case and normal SROA 983 // will handle it. 984 if (!isa<IntegerType>(LI->getType()) || 985 TD->getTypePaddedSizeInBits(LI->getType()) != AllocaSizeBits) 986 return; 987 988 DOUT << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI; 989 990 // There are two forms here: AI could be an array or struct. Both cases 991 // have different ways to compute the element offset. 992 const StructLayout *Layout = 0; 993 uint64_t ArrayEltBitOffset = 0; 994 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 995 Layout = TD->getStructLayout(EltSTy); 996 } else { 997 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); 998 ArrayEltBitOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); 999 } 1000 1001 Value *ResultVal = Constant::getNullValue(LI->getType()); 1002 1003 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1004 // Load the value from the alloca. If the NewElt is an aggregate, cast 1005 // the pointer to an integer of the same size before doing the load. 1006 Value *SrcField = NewElts[i]; 1007 const Type *FieldTy = 1008 cast<PointerType>(SrcField->getType())->getElementType(); 1009 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 1010 1011 // Ignore zero sized fields like {}, they obviously contain no data. 1012 if (FieldSizeBits == 0) continue; 1013 1014 const IntegerType *FieldIntTy = IntegerType::get(FieldSizeBits); 1015 if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() && 1016 !isa<VectorType>(FieldTy)) 1017 SrcField = new BitCastInst(SrcField, PointerType::getUnqual(FieldIntTy), 1018 "", LI); 1019 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); 1020 1021 // If SrcField is a fp or vector of the right size but that isn't an 1022 // integer type, bitcast to an integer so we can shift it. 1023 if (SrcField->getType() != FieldIntTy) 1024 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); 1025 1026 // Zero extend the field to be the same size as the final alloca so that 1027 // we can shift and insert it. 1028 if (SrcField->getType() != ResultVal->getType()) 1029 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); 1030 1031 // Determine the number of bits to shift SrcField. 1032 uint64_t Shift; 1033 if (Layout) // Struct case. 1034 Shift = Layout->getElementOffsetInBits(i); 1035 else // Array case. 1036 Shift = i*ArrayEltBitOffset; 1037 1038 if (TD->isBigEndian()) 1039 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); 1040 1041 if (Shift) { 1042 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); 1043 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); 1044 } 1045 1046 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); 1047 } 1048 1049 LI->replaceAllUsesWith(ResultVal); 1050 LI->eraseFromParent(); 1051} 1052 1053 1054/// HasPadding - Return true if the specified type has any structure or 1055/// alignment padding, false otherwise. 1056static bool HasPadding(const Type *Ty, const TargetData &TD) { 1057 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1058 const StructLayout *SL = TD.getStructLayout(STy); 1059 unsigned PrevFieldBitOffset = 0; 1060 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1061 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 1062 1063 // Padding in sub-elements? 1064 if (HasPadding(STy->getElementType(i), TD)) 1065 return true; 1066 1067 // Check to see if there is any padding between this element and the 1068 // previous one. 1069 if (i) { 1070 unsigned PrevFieldEnd = 1071 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 1072 if (PrevFieldEnd < FieldBitOffset) 1073 return true; 1074 } 1075 1076 PrevFieldBitOffset = FieldBitOffset; 1077 } 1078 1079 // Check for tail padding. 1080 if (unsigned EltCount = STy->getNumElements()) { 1081 unsigned PrevFieldEnd = PrevFieldBitOffset + 1082 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 1083 if (PrevFieldEnd < SL->getSizeInBits()) 1084 return true; 1085 } 1086 1087 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1088 return HasPadding(ATy->getElementType(), TD); 1089 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1090 return HasPadding(VTy->getElementType(), TD); 1091 } 1092 return TD.getTypeSizeInBits(Ty) != TD.getTypePaddedSizeInBits(Ty); 1093} 1094 1095/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 1096/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 1097/// or 1 if safe after canonicalization has been performed. 1098/// 1099int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 1100 // Loop over the use list of the alloca. We can only transform it if all of 1101 // the users are safe to transform. 1102 AllocaInfo Info; 1103 1104 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 1105 I != E; ++I) { 1106 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 1107 if (Info.isUnsafe) { 1108 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 1109 return 0; 1110 } 1111 } 1112 1113 // Okay, we know all the users are promotable. If the aggregate is a memcpy 1114 // source and destination, we have to be careful. In particular, the memcpy 1115 // could be moving around elements that live in structure padding of the LLVM 1116 // types, but may actually be used. In these cases, we refuse to promote the 1117 // struct. 1118 if (Info.isMemCpySrc && Info.isMemCpyDst && 1119 HasPadding(AI->getType()->getElementType(), *TD)) 1120 return 0; 1121 1122 // If we require cleanup, return 1, otherwise return 3. 1123 return Info.needsCanon ? 1 : 3; 1124} 1125 1126/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 1127/// allocation, but only if cleaned up, perform the cleanups required. 1128void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 1129 // At this point, we know that the end result will be SROA'd and promoted, so 1130 // we can insert ugly code if required so long as sroa+mem2reg will clean it 1131 // up. 1132 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 1133 UI != E; ) { 1134 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++); 1135 if (!GEPI) continue; 1136 gep_type_iterator I = gep_type_begin(GEPI); 1137 ++I; 1138 1139 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 1140 uint64_t NumElements = AT->getNumElements(); 1141 1142 if (!isa<ConstantInt>(I.getOperand())) { 1143 if (NumElements == 1) { 1144 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 1145 } else { 1146 assert(NumElements == 2 && "Unhandled case!"); 1147 // All users of the GEP must be loads. At each use of the GEP, insert 1148 // two loads of the appropriate indexed GEP and select between them. 1149 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 1150 Constant::getNullValue(I.getOperand()->getType()), 1151 "isone", GEPI); 1152 // Insert the new GEP instructions, which are properly indexed. 1153 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 1154 Indices[1] = Constant::getNullValue(Type::Int32Ty); 1155 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1156 Indices.begin(), 1157 Indices.end(), 1158 GEPI->getName()+".0", GEPI); 1159 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 1160 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1161 Indices.begin(), 1162 Indices.end(), 1163 GEPI->getName()+".1", GEPI); 1164 // Replace all loads of the variable index GEP with loads from both 1165 // indexes and a select. 1166 while (!GEPI->use_empty()) { 1167 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 1168 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 1169 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 1170 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI); 1171 LI->replaceAllUsesWith(R); 1172 LI->eraseFromParent(); 1173 } 1174 GEPI->eraseFromParent(); 1175 } 1176 } 1177 } 1178 } 1179} 1180 1181/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at 1182/// the offset specified by Offset (which is specified in bytes). 1183/// 1184/// There are two cases we handle here: 1185/// 1) A union of vector types of the same size and potentially its elements. 1186/// Here we turn element accesses into insert/extract element operations. 1187/// This promotes a <4 x float> with a store of float to the third element 1188/// into a <4 x float> that uses insert element. 1189/// 2) A fully general blob of memory, which we turn into some (potentially 1190/// large) integer type with extract and insert operations where the loads 1191/// and stores would mutate the memory. 1192static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy, 1193 unsigned AllocaSize, const TargetData &TD) { 1194 // If this could be contributing to a vector, analyze it. 1195 if (VecTy != Type::VoidTy) { // either null or a vector type. 1196 1197 // If the In type is a vector that is the same size as the alloca, see if it 1198 // matches the existing VecTy. 1199 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) { 1200 if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { 1201 // If we're storing/loading a vector of the right size, allow it as a 1202 // vector. If this the first vector we see, remember the type so that 1203 // we know the element size. 1204 if (VecTy == 0) 1205 VecTy = VInTy; 1206 return; 1207 } 1208 } else if (In == Type::FloatTy || In == Type::DoubleTy || 1209 (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 && 1210 isPowerOf2_32(In->getPrimitiveSizeInBits()))) { 1211 // If we're accessing something that could be an element of a vector, see 1212 // if the implied vector agrees with what we already have and if Offset is 1213 // compatible with it. 1214 unsigned EltSize = In->getPrimitiveSizeInBits()/8; 1215 if (Offset % EltSize == 0 && 1216 AllocaSize % EltSize == 0 && 1217 (VecTy == 0 || 1218 cast<VectorType>(VecTy)->getElementType() 1219 ->getPrimitiveSizeInBits()/8 == EltSize)) { 1220 if (VecTy == 0) 1221 VecTy = VectorType::get(In, AllocaSize/EltSize); 1222 return; 1223 } 1224 } 1225 } 1226 1227 // Otherwise, we have a case that we can't handle with an optimized vector 1228 // form. We can still turn this into a large integer. 1229 VecTy = Type::VoidTy; 1230} 1231 1232/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all 1233/// its accesses to use a to single vector type, return true, and set VecTy to 1234/// the new type. If we could convert the alloca into a single promotable 1235/// integer, return true but set VecTy to VoidTy. Further, if the use is not a 1236/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset 1237/// is the current offset from the base of the alloca being analyzed. 1238/// 1239/// If we see at least one access to the value that is as a vector type, set the 1240/// SawVec flag. 1241/// 1242bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 1243 bool &SawVec, uint64_t Offset, 1244 unsigned AllocaSize) { 1245 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1246 Instruction *User = cast<Instruction>(*UI); 1247 1248 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1249 // Don't break volatile loads. 1250 if (LI->isVolatile()) 1251 return false; 1252 MergeInType(LI->getType(), Offset, VecTy, AllocaSize, *TD); 1253 SawVec |= isa<VectorType>(LI->getType()); 1254 continue; 1255 } 1256 1257 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1258 // Storing the pointer, not into the value? 1259 if (SI->getOperand(0) == V || SI->isVolatile()) return 0; 1260 MergeInType(SI->getOperand(0)->getType(), Offset, VecTy, AllocaSize, *TD); 1261 SawVec |= isa<VectorType>(SI->getOperand(0)->getType()); 1262 continue; 1263 } 1264 1265 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 1266 if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset, 1267 AllocaSize)) 1268 return false; 1269 IsNotTrivial = true; 1270 continue; 1271 } 1272 1273 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1274 // If this is a GEP with a variable indices, we can't handle it. 1275 if (!GEP->hasAllConstantIndices()) 1276 return false; 1277 1278 // Compute the offset that this GEP adds to the pointer. 1279 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1280 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1281 &Indices[0], Indices.size()); 1282 // See if all uses can be converted. 1283 if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset, 1284 AllocaSize)) 1285 return false; 1286 IsNotTrivial = true; 1287 continue; 1288 } 1289 1290 // If this is a constant sized memset of a constant value (e.g. 0) we can 1291 // handle it. 1292 if (isa<MemSetInst>(User) && 1293 // Store of constant value. 1294 isa<ConstantInt>(User->getOperand(2)) && 1295 // Store with constant size. 1296 isa<ConstantInt>(User->getOperand(3))) { 1297 VecTy = Type::VoidTy; 1298 IsNotTrivial = true; 1299 continue; 1300 } 1301 1302 // Otherwise, we cannot handle this! 1303 return false; 1304 } 1305 1306 return true; 1307} 1308 1309 1310/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1311/// directly. This happens when we are converting an "integer union" to a 1312/// single integer scalar, or when we are converting a "vector union" to a 1313/// vector with insert/extractelement instructions. 1314/// 1315/// Offset is an offset from the original alloca, in bits that need to be 1316/// shifted to the right. By the end of this, there should be no uses of Ptr. 1317void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { 1318 while (!Ptr->use_empty()) { 1319 Instruction *User = cast<Instruction>(Ptr->use_back()); 1320 1321 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1322 LI->replaceAllUsesWith(ConvertUsesOfLoadToScalar(LI, NewAI, Offset)); 1323 LI->eraseFromParent(); 1324 continue; 1325 } 1326 1327 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1328 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1329 new StoreInst(ConvertUsesOfStoreToScalar(SI->getOperand(0), NewAI, 1330 Offset, SI), NewAI, SI); 1331 SI->eraseFromParent(); 1332 continue; 1333 } 1334 1335 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1336 ConvertUsesToScalar(CI, NewAI, Offset); 1337 CI->eraseFromParent(); 1338 continue; 1339 } 1340 1341 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1342 // Compute the offset that this GEP adds to the pointer. 1343 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1344 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1345 &Indices[0], Indices.size()); 1346 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8); 1347 GEP->eraseFromParent(); 1348 continue; 1349 } 1350 1351 // If this is a constant sized memset of a constant value (e.g. 0) we can 1352 // transform it into a store of the expanded constant value. 1353 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 1354 assert(MSI->getRawDest() == Ptr && "Consistency error!"); 1355 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 1356 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); 1357 1358 // Compute the value replicated the right number of times. 1359 APInt APVal(NumBytes*8, Val); 1360 1361 // Splat the value if non-zero. 1362 if (Val) 1363 for (unsigned i = 1; i != NumBytes; ++i) 1364 APVal |= APVal << 8; 1365 1366 new StoreInst(ConvertUsesOfStoreToScalar(ConstantInt::get(APVal), NewAI, 1367 Offset, MSI), NewAI, MSI); 1368 MSI->eraseFromParent(); 1369 continue; 1370 } 1371 1372 1373 assert(0 && "Unsupported operation!"); 1374 abort(); 1375 } 1376} 1377 1378/// ConvertUsesOfLoadToScalar - Convert all of the users of the specified load 1379/// to use the new alloca directly, returning the value that should replace the 1380/// load. This happens when we are converting an "integer union" to a single 1381/// integer scalar, or when we are converting a "vector union" to a vector with 1382/// insert/extractelement instructions. 1383/// 1384/// Offset is an offset from the original alloca, in bits that need to be 1385/// shifted to the right. By the end of this, there should be no uses of Ptr. 1386Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 1387 uint64_t Offset) { 1388 // The load is a bit extract from NewAI shifted right by Offset bits. 1389 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 1390 1391 // If the load is of the whole new alloca, no conversion is needed. 1392 if (NV->getType() == LI->getType() && Offset == 0) 1393 return NV; 1394 1395 // If the result alloca is a vector type, this is either an element 1396 // access or a bitcast to another vector type of the same size. 1397 if (const VectorType *VTy = dyn_cast<VectorType>(NV->getType())) { 1398 if (isa<VectorType>(LI->getType())) 1399 return new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1400 1401 // Otherwise it must be an element access. 1402 unsigned Elt = 0; 1403 if (Offset) { 1404 unsigned EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType()); 1405 Elt = Offset/EltSize; 1406 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); 1407 } 1408 // Return the element extracted out of it. 1409 Value *V = new ExtractElementInst(NV, ConstantInt::get(Type::Int32Ty, Elt), 1410 "tmp", LI); 1411 if (V->getType() != LI->getType()) 1412 V = new BitCastInst(V, LI->getType(), "tmp", LI); 1413 return V; 1414 } 1415 1416 // Otherwise, this must be a union that was converted to an integer value. 1417 const IntegerType *NTy = cast<IntegerType>(NV->getType()); 1418 1419 // If this is a big-endian system and the load is narrower than the 1420 // full alloca type, we need to do a shift to get the right bits. 1421 int ShAmt = 0; 1422 if (TD->isBigEndian()) { 1423 // On big-endian machines, the lowest bit is stored at the bit offset 1424 // from the pointer given by getTypeStoreSizeInBits. This matters for 1425 // integers with a bitwidth that is not a multiple of 8. 1426 ShAmt = TD->getTypeStoreSizeInBits(NTy) - 1427 TD->getTypeStoreSizeInBits(LI->getType()) - Offset; 1428 } else { 1429 ShAmt = Offset; 1430 } 1431 1432 // Note: we support negative bitwidths (with shl) which are not defined. 1433 // We do this to support (f.e.) loads off the end of a structure where 1434 // only some bits are used. 1435 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1436 NV = BinaryOperator::CreateLShr(NV, 1437 ConstantInt::get(NV->getType(), ShAmt), 1438 LI->getName(), LI); 1439 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1440 NV = BinaryOperator::CreateShl(NV, 1441 ConstantInt::get(NV->getType(), -ShAmt), 1442 LI->getName(), LI); 1443 1444 // Finally, unconditionally truncate the integer to the right width. 1445 unsigned LIBitWidth = TD->getTypeSizeInBits(LI->getType()); 1446 if (LIBitWidth < NTy->getBitWidth()) 1447 NV = new TruncInst(NV, IntegerType::get(LIBitWidth), LI->getName(), LI); 1448 else if (LIBitWidth > NTy->getBitWidth()) 1449 NV = new ZExtInst(NV, IntegerType::get(LIBitWidth), LI->getName(), LI); 1450 1451 // If the result is an integer, this is a trunc or bitcast. 1452 if (isa<IntegerType>(LI->getType())) { 1453 // Should be done. 1454 } else if (LI->getType()->isFloatingPoint() || 1455 isa<VectorType>(LI->getType())) { 1456 // Just do a bitcast, we know the sizes match up. 1457 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1458 } else { 1459 // Otherwise must be a pointer. 1460 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI); 1461 } 1462 assert(NV->getType() == LI->getType() && "Didn't convert right?"); 1463 return NV; 1464} 1465 1466 1467/// ConvertUsesOfStoreToScalar - Convert the specified store to a load+store 1468/// pair of the new alloca directly, returning the value that should be stored 1469/// to the alloca. This happens when we are converting an "integer union" to a 1470/// single integer scalar, or when we are converting a "vector union" to a 1471/// vector with insert/extractelement instructions. 1472/// 1473/// Offset is an offset from the original alloca, in bits that need to be 1474/// shifted to the right. By the end of this, there should be no uses of Ptr. 1475Value *SROA::ConvertUsesOfStoreToScalar(Value *SV, AllocaInst *NewAI, 1476 uint64_t Offset, Instruction *IP) { 1477 1478 // Convert the stored type to the actual type, shift it left to insert 1479 // then 'or' into place. 1480 const Type *AllocaType = NewAI->getType()->getElementType(); 1481 if (SV->getType() == AllocaType && Offset == 0) 1482 return SV; 1483 1484 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { 1485 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", IP); 1486 1487 // If the result alloca is a vector type, this is either an element 1488 // access or a bitcast to another vector type. 1489 if (isa<VectorType>(SV->getType())) { 1490 SV = new BitCastInst(SV, AllocaType, SV->getName(), IP); 1491 } else { 1492 // Must be an element insertion. 1493 unsigned Elt = Offset/TD->getTypePaddedSizeInBits(VTy->getElementType()); 1494 1495 if (SV->getType() != VTy->getElementType()) 1496 SV = new BitCastInst(SV, VTy->getElementType(), "tmp", IP); 1497 1498 SV = InsertElementInst::Create(Old, SV, 1499 ConstantInt::get(Type::Int32Ty, Elt), 1500 "tmp", IP); 1501 } 1502 return SV; 1503 } 1504 1505 1506 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", IP); 1507 1508 // If SV is a float, convert it to the appropriate integer type. 1509 // If it is a pointer, do the same, and also handle ptr->ptr casts 1510 // here. 1511 unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType()); 1512 unsigned DestWidth = TD->getTypeSizeInBits(AllocaType); 1513 unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType()); 1514 unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType); 1515 if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType())) 1516 SV = new BitCastInst(SV, IntegerType::get(SrcWidth), SV->getName(), IP); 1517 else if (isa<PointerType>(SV->getType())) 1518 SV = new PtrToIntInst(SV, TD->getIntPtrType(), SV->getName(), IP); 1519 1520 // Zero extend or truncate the value if needed. 1521 if (SV->getType() != AllocaType) { 1522 if (SV->getType()->getPrimitiveSizeInBits() < 1523 AllocaType->getPrimitiveSizeInBits()) 1524 SV = new ZExtInst(SV, AllocaType, SV->getName(), IP); 1525 else { 1526 // Truncation may be needed if storing more than the alloca can hold 1527 // (undefined behavior). 1528 SV = new TruncInst(SV, AllocaType, SV->getName(), IP); 1529 SrcWidth = DestWidth; 1530 SrcStoreWidth = DestStoreWidth; 1531 } 1532 } 1533 1534 // If this is a big-endian system and the store is narrower than the 1535 // full alloca type, we need to do a shift to get the right bits. 1536 int ShAmt = 0; 1537 if (TD->isBigEndian()) { 1538 // On big-endian machines, the lowest bit is stored at the bit offset 1539 // from the pointer given by getTypeStoreSizeInBits. This matters for 1540 // integers with a bitwidth that is not a multiple of 8. 1541 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 1542 } else { 1543 ShAmt = Offset; 1544 } 1545 1546 // Note: we support negative bitwidths (with shr) which are not defined. 1547 // We do this to support (f.e.) stores off the end of a structure where 1548 // only some bits in the structure are set. 1549 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1550 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1551 SV = BinaryOperator::CreateShl(SV, 1552 ConstantInt::get(SV->getType(), ShAmt), 1553 SV->getName(), IP); 1554 Mask <<= ShAmt; 1555 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1556 SV = BinaryOperator::CreateLShr(SV, 1557 ConstantInt::get(SV->getType(), -ShAmt), 1558 SV->getName(), IP); 1559 Mask = Mask.lshr(-ShAmt); 1560 } 1561 1562 // Mask out the bits we are about to insert from the old value, and or 1563 // in the new bits. 1564 if (SrcWidth != DestWidth) { 1565 assert(DestWidth > SrcWidth); 1566 Old = BinaryOperator::CreateAnd(Old, ConstantInt::get(~Mask), 1567 Old->getName()+".mask", IP); 1568 SV = BinaryOperator::CreateOr(Old, SV, SV->getName()+".ins", IP); 1569 } 1570 return SV; 1571} 1572 1573 1574 1575/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1576/// some part of a constant global variable. This intentionally only accepts 1577/// constant expressions because we don't can't rewrite arbitrary instructions. 1578static bool PointsToConstantGlobal(Value *V) { 1579 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1580 return GV->isConstant(); 1581 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1582 if (CE->getOpcode() == Instruction::BitCast || 1583 CE->getOpcode() == Instruction::GetElementPtr) 1584 return PointsToConstantGlobal(CE->getOperand(0)); 1585 return false; 1586} 1587 1588/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1589/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1590/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1591/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1592/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1593/// the alloca, and if the source pointer is a pointer to a constant global, we 1594/// can optimize this. 1595static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1596 bool isOffset) { 1597 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1598 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 1599 // Ignore non-volatile loads, they are always ok. 1600 if (!LI->isVolatile()) 1601 continue; 1602 1603 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1604 // If uses of the bitcast are ok, we are ok. 1605 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1606 return false; 1607 continue; 1608 } 1609 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1610 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1611 // doesn't, it does. 1612 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1613 isOffset || !GEP->hasAllZeroIndices())) 1614 return false; 1615 continue; 1616 } 1617 1618 // If this is isn't our memcpy/memmove, reject it as something we can't 1619 // handle. 1620 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1621 return false; 1622 1623 // If we already have seen a copy, reject the second one. 1624 if (TheCopy) return false; 1625 1626 // If the pointer has been offset from the start of the alloca, we can't 1627 // safely handle this. 1628 if (isOffset) return false; 1629 1630 // If the memintrinsic isn't using the alloca as the dest, reject it. 1631 if (UI.getOperandNo() != 1) return false; 1632 1633 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1634 1635 // If the source of the memcpy/move is not a constant global, reject it. 1636 if (!PointsToConstantGlobal(MI->getOperand(2))) 1637 return false; 1638 1639 // Otherwise, the transform is safe. Remember the copy instruction. 1640 TheCopy = MI; 1641 } 1642 return true; 1643} 1644 1645/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1646/// modified by a copy from a constant global. If we can prove this, we can 1647/// replace any uses of the alloca with uses of the global directly. 1648Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1649 Instruction *TheCopy = 0; 1650 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1651 return TheCopy; 1652 return 0; 1653} 1654