ScalarReplAggregates.cpp revision 39c88a641b6bf9cea7d270ccee85992f9c30f40f
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/Pass.h" 32#include "llvm/Analysis/Dominators.h" 33#include "llvm/Target/TargetData.h" 34#include "llvm/Transforms/Utils/PromoteMemToReg.h" 35#include "llvm/Transforms/Utils/Local.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Support/ErrorHandling.h" 38#include "llvm/Support/GetElementPtrTypeIterator.h" 39#include "llvm/Support/IRBuilder.h" 40#include "llvm/Support/MathExtras.h" 41#include "llvm/Support/raw_ostream.h" 42#include "llvm/ADT/SmallVector.h" 43#include "llvm/ADT/Statistic.h" 44using namespace llvm; 45 46STATISTIC(NumReplaced, "Number of allocas broken up"); 47STATISTIC(NumPromoted, "Number of allocas promoted"); 48STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 49STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 50 51namespace { 52 struct SROA : public FunctionPass { 53 static char ID; // Pass identification, replacement for typeid 54 explicit SROA(signed T = -1) : FunctionPass(&ID) { 55 if (T == -1) 56 SRThreshold = 128; 57 else 58 SRThreshold = T; 59 } 60 61 bool runOnFunction(Function &F); 62 63 bool performScalarRepl(Function &F); 64 bool performPromotion(Function &F); 65 66 // getAnalysisUsage - This pass does not require any passes, but we know it 67 // will not alter the CFG, so say so. 68 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 69 AU.addRequired<DominatorTree>(); 70 AU.addRequired<DominanceFrontier>(); 71 AU.setPreservesCFG(); 72 } 73 74 private: 75 TargetData *TD; 76 77 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 78 /// information about the uses. All these fields are initialized to false 79 /// and set to true when something is learned. 80 struct AllocaInfo { 81 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 82 bool isUnsafe : 1; 83 84 /// needsCleanup - This is set to true if there is some use of the alloca 85 /// that requires cleanup. 86 bool needsCleanup : 1; 87 88 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 89 bool isMemCpySrc : 1; 90 91 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 92 bool isMemCpyDst : 1; 93 94 AllocaInfo() 95 : isUnsafe(false), needsCleanup(false), 96 isMemCpySrc(false), isMemCpyDst(false) {} 97 }; 98 99 unsigned SRThreshold; 100 101 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 102 103 int isSafeAllocaToScalarRepl(AllocaInst *AI); 104 105 void isSafeUseOfAllocation(Instruction *User, AllocaInst *AI, 106 AllocaInfo &Info); 107 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI, 108 AllocaInfo &Info); 109 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI, 110 unsigned OpNo, AllocaInfo &Info); 111 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocaInst *AI, 112 AllocaInfo &Info); 113 114 void DoScalarReplacement(AllocaInst *AI, 115 std::vector<AllocaInst*> &WorkList); 116 void CleanupGEP(GetElementPtrInst *GEP); 117 void CleanupAllocaUsers(AllocaInst *AI); 118 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base); 119 120 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI, 121 SmallVector<AllocaInst*, 32> &NewElts); 122 123 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 124 AllocaInst *AI, 125 SmallVector<AllocaInst*, 32> &NewElts); 126 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, 127 SmallVector<AllocaInst*, 32> &NewElts); 128 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, 129 SmallVector<AllocaInst*, 32> &NewElts); 130 131 bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 132 bool &SawVec, uint64_t Offset, unsigned AllocaSize); 133 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset); 134 Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType, 135 uint64_t Offset, IRBuilder<> &Builder); 136 Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, 137 uint64_t Offset, IRBuilder<> &Builder); 138 static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI); 139 }; 140} 141 142char SROA::ID = 0; 143static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 144 145// Public interface to the ScalarReplAggregates pass 146FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 147 return new SROA(Threshold); 148} 149 150 151bool SROA::runOnFunction(Function &F) { 152 TD = getAnalysisIfAvailable<TargetData>(); 153 154 bool Changed = performPromotion(F); 155 156 // FIXME: ScalarRepl currently depends on TargetData more than it 157 // theoretically needs to. It should be refactored in order to support 158 // target-independent IR. Until this is done, just skip the actual 159 // scalar-replacement portion of this pass. 160 if (!TD) return Changed; 161 162 while (1) { 163 bool LocalChange = performScalarRepl(F); 164 if (!LocalChange) break; // No need to repromote if no scalarrepl 165 Changed = true; 166 LocalChange = performPromotion(F); 167 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 168 } 169 170 return Changed; 171} 172 173 174bool SROA::performPromotion(Function &F) { 175 std::vector<AllocaInst*> Allocas; 176 DominatorTree &DT = getAnalysis<DominatorTree>(); 177 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 178 179 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 180 181 bool Changed = false; 182 183 while (1) { 184 Allocas.clear(); 185 186 // Find allocas that are safe to promote, by looking at all instructions in 187 // the entry node 188 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 189 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 190 if (isAllocaPromotable(AI)) 191 Allocas.push_back(AI); 192 193 if (Allocas.empty()) break; 194 195 PromoteMemToReg(Allocas, DT, DF); 196 NumPromoted += Allocas.size(); 197 Changed = true; 198 } 199 200 return Changed; 201} 202 203/// getNumSAElements - Return the number of elements in the specific struct or 204/// array. 205static uint64_t getNumSAElements(const Type *T) { 206 if (const StructType *ST = dyn_cast<StructType>(T)) 207 return ST->getNumElements(); 208 return cast<ArrayType>(T)->getNumElements(); 209} 210 211// performScalarRepl - This algorithm is a simple worklist driven algorithm, 212// which runs on all of the malloc/alloca instructions in the function, removing 213// them if they are only used by getelementptr instructions. 214// 215bool SROA::performScalarRepl(Function &F) { 216 std::vector<AllocaInst*> WorkList; 217 218 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 219 BasicBlock &BB = F.getEntryBlock(); 220 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 221 if (AllocaInst *A = dyn_cast<AllocaInst>(I)) 222 WorkList.push_back(A); 223 224 // Process the worklist 225 bool Changed = false; 226 while (!WorkList.empty()) { 227 AllocaInst *AI = WorkList.back(); 228 WorkList.pop_back(); 229 230 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 231 // with unused elements. 232 if (AI->use_empty()) { 233 AI->eraseFromParent(); 234 continue; 235 } 236 237 // If this alloca is impossible for us to promote, reject it early. 238 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) 239 continue; 240 241 // Check to see if this allocation is only modified by a memcpy/memmove from 242 // a constant global. If this is the case, we can change all users to use 243 // the constant global instead. This is commonly produced by the CFE by 244 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 245 // is only subsequently read. 246 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 247 DEBUG(errs() << "Found alloca equal to global: " << *AI << '\n'); 248 DEBUG(errs() << " memcpy = " << *TheCopy << '\n'); 249 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 250 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 251 TheCopy->eraseFromParent(); // Don't mutate the global. 252 AI->eraseFromParent(); 253 ++NumGlobals; 254 Changed = true; 255 continue; 256 } 257 258 // Check to see if we can perform the core SROA transformation. We cannot 259 // transform the allocation instruction if it is an array allocation 260 // (allocations OF arrays are ok though), and an allocation of a scalar 261 // value cannot be decomposed at all. 262 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 263 264 // Do not promote [0 x %struct]. 265 if (AllocaSize == 0) continue; 266 267 // Do not promote any struct whose size is too big. 268 if (AllocaSize > SRThreshold) continue; 269 270 if ((isa<StructType>(AI->getAllocatedType()) || 271 isa<ArrayType>(AI->getAllocatedType())) && 272 // Do not promote any struct into more than "32" separate vars. 273 getNumSAElements(AI->getAllocatedType()) <= SRThreshold/4) { 274 // Check that all of the users of the allocation are capable of being 275 // transformed. 276 switch (isSafeAllocaToScalarRepl(AI)) { 277 default: llvm_unreachable("Unexpected value!"); 278 case 0: // Not safe to scalar replace. 279 break; 280 case 1: // Safe, but requires cleanup/canonicalizations first 281 CleanupAllocaUsers(AI); 282 // FALL THROUGH. 283 case 3: // Safe to scalar replace. 284 DoScalarReplacement(AI, WorkList); 285 Changed = true; 286 continue; 287 } 288 } 289 290 // If we can turn this aggregate value (potentially with casts) into a 291 // simple scalar value that can be mem2reg'd into a register value. 292 // IsNotTrivial tracks whether this is something that mem2reg could have 293 // promoted itself. If so, we don't want to transform it needlessly. Note 294 // that we can't just check based on the type: the alloca may be of an i32 295 // but that has pointer arithmetic to set byte 3 of it or something. 296 bool IsNotTrivial = false; 297 const Type *VectorTy = 0; 298 bool HadAVector = false; 299 if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector, 300 0, unsigned(AllocaSize)) && IsNotTrivial) { 301 AllocaInst *NewAI; 302 // If we were able to find a vector type that can handle this with 303 // insert/extract elements, and if there was at least one use that had 304 // a vector type, promote this to a vector. We don't want to promote 305 // random stuff that doesn't use vectors (e.g. <9 x double>) because then 306 // we just get a lot of insert/extracts. If at least one vector is 307 // involved, then we probably really do have a union of vector/array. 308 if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) { 309 DEBUG(errs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = " 310 << *VectorTy << '\n'); 311 312 // Create and insert the vector alloca. 313 NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin()); 314 ConvertUsesToScalar(AI, NewAI, 0); 315 } else { 316 DEBUG(errs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"); 317 318 // Create and insert the integer alloca. 319 const Type *NewTy = IntegerType::get(AI->getContext(), AllocaSize*8); 320 NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin()); 321 ConvertUsesToScalar(AI, NewAI, 0); 322 } 323 NewAI->takeName(AI); 324 AI->eraseFromParent(); 325 ++NumConverted; 326 Changed = true; 327 continue; 328 } 329 330 // Otherwise, couldn't process this alloca. 331 } 332 333 return Changed; 334} 335 336/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 337/// predicate, do SROA now. 338void SROA::DoScalarReplacement(AllocaInst *AI, 339 std::vector<AllocaInst*> &WorkList) { 340 DEBUG(errs() << "Found inst to SROA: " << *AI << '\n'); 341 SmallVector<AllocaInst*, 32> ElementAllocas; 342 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 343 ElementAllocas.reserve(ST->getNumContainedTypes()); 344 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 345 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 346 AI->getAlignment(), 347 AI->getName() + "." + Twine(i), AI); 348 ElementAllocas.push_back(NA); 349 WorkList.push_back(NA); // Add to worklist for recursive processing 350 } 351 } else { 352 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 353 ElementAllocas.reserve(AT->getNumElements()); 354 const Type *ElTy = AT->getElementType(); 355 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 356 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 357 AI->getName() + "." + Twine(i), AI); 358 ElementAllocas.push_back(NA); 359 WorkList.push_back(NA); // Add to worklist for recursive processing 360 } 361 } 362 363 // Now that we have created the alloca instructions that we want to use, 364 // expand the getelementptr instructions to use them. 365 while (!AI->use_empty()) { 366 Instruction *User = cast<Instruction>(AI->use_back()); 367 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 368 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 369 BCInst->eraseFromParent(); 370 continue; 371 } 372 373 // Replace: 374 // %res = load { i32, i32 }* %alloc 375 // with: 376 // %load.0 = load i32* %alloc.0 377 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 378 // %load.1 = load i32* %alloc.1 379 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 380 // (Also works for arrays instead of structs) 381 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 382 Value *Insert = UndefValue::get(LI->getType()); 383 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 384 Value *Load = new LoadInst(ElementAllocas[i], "load", LI); 385 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); 386 } 387 LI->replaceAllUsesWith(Insert); 388 LI->eraseFromParent(); 389 continue; 390 } 391 392 // Replace: 393 // store { i32, i32 } %val, { i32, i32 }* %alloc 394 // with: 395 // %val.0 = extractvalue { i32, i32 } %val, 0 396 // store i32 %val.0, i32* %alloc.0 397 // %val.1 = extractvalue { i32, i32 } %val, 1 398 // store i32 %val.1, i32* %alloc.1 399 // (Also works for arrays instead of structs) 400 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 401 Value *Val = SI->getOperand(0); 402 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 403 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); 404 new StoreInst(Extract, ElementAllocas[i], SI); 405 } 406 SI->eraseFromParent(); 407 continue; 408 } 409 410 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 411 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 412 unsigned Idx = 413 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 414 415 assert(Idx < ElementAllocas.size() && "Index out of range?"); 416 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 417 418 Value *RepValue; 419 if (GEPI->getNumOperands() == 3) { 420 // Do not insert a new getelementptr instruction with zero indices, only 421 // to have it optimized out later. 422 RepValue = AllocaToUse; 423 } else { 424 // We are indexing deeply into the structure, so we still need a 425 // getelement ptr instruction to finish the indexing. This may be 426 // expanded itself once the worklist is rerun. 427 // 428 SmallVector<Value*, 8> NewArgs; 429 NewArgs.push_back(Constant::getNullValue( 430 Type::getInt32Ty(AI->getContext()))); 431 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 432 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), 433 NewArgs.end(), "", GEPI); 434 RepValue->takeName(GEPI); 435 } 436 437 // If this GEP is to the start of the aggregate, check for memcpys. 438 if (Idx == 0 && GEPI->hasAllZeroIndices()) 439 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 440 441 // Move all of the users over to the new GEP. 442 GEPI->replaceAllUsesWith(RepValue); 443 // Delete the old GEP 444 GEPI->eraseFromParent(); 445 } 446 447 // Finally, delete the Alloca instruction 448 AI->eraseFromParent(); 449 NumReplaced++; 450} 451 452/// isSafeElementUse - Check to see if this use is an allowed use for a 453/// getelementptr instruction of an array aggregate allocation. isFirstElt 454/// indicates whether Ptr is known to the start of the aggregate. 455void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI, 456 AllocaInfo &Info) { 457 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 458 I != E; ++I) { 459 Instruction *User = cast<Instruction>(*I); 460 switch (User->getOpcode()) { 461 case Instruction::Load: break; 462 case Instruction::Store: 463 // Store is ok if storing INTO the pointer, not storing the pointer 464 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 465 break; 466 case Instruction::GetElementPtr: { 467 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 468 bool AreAllZeroIndices = isFirstElt; 469 if (GEP->getNumOperands() > 1 && 470 (!isa<ConstantInt>(GEP->getOperand(1)) || 471 !cast<ConstantInt>(GEP->getOperand(1))->isZero())) 472 // Using pointer arithmetic to navigate the array. 473 return MarkUnsafe(Info); 474 475 // Verify that any array subscripts are in range. 476 for (gep_type_iterator GEPIt = gep_type_begin(GEP), 477 E = gep_type_end(GEP); GEPIt != E; ++GEPIt) { 478 // Ignore struct elements, no extra checking needed for these. 479 if (isa<StructType>(*GEPIt)) 480 continue; 481 482 // This GEP indexes an array. Verify that this is an in-range 483 // constant integer. Specifically, consider A[0][i]. We cannot know that 484 // the user isn't doing invalid things like allowing i to index an 485 // out-of-range subscript that accesses A[1]. Because of this, we have 486 // to reject SROA of any accesses into structs where any of the 487 // components are variables. 488 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); 489 if (!IdxVal) return MarkUnsafe(Info); 490 491 // Are all indices still zero? 492 AreAllZeroIndices &= IdxVal->isZero(); 493 494 if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPIt)) { 495 if (IdxVal->getZExtValue() >= AT->getNumElements()) 496 return MarkUnsafe(Info); 497 } else if (const VectorType *VT = dyn_cast<VectorType>(*GEPIt)) { 498 if (IdxVal->getZExtValue() >= VT->getNumElements()) 499 return MarkUnsafe(Info); 500 } 501 } 502 503 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 504 if (Info.isUnsafe) return; 505 break; 506 } 507 case Instruction::BitCast: 508 if (isFirstElt) { 509 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 510 if (Info.isUnsafe) return; 511 break; 512 } 513 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); 514 return MarkUnsafe(Info); 515 case Instruction::Call: 516 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 517 if (isFirstElt) { 518 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 519 if (Info.isUnsafe) return; 520 break; 521 } 522 } 523 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); 524 return MarkUnsafe(Info); 525 default: 526 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); 527 return MarkUnsafe(Info); 528 } 529 } 530 return; // All users look ok :) 531} 532 533/// AllUsersAreLoads - Return true if all users of this value are loads. 534static bool AllUsersAreLoads(Value *Ptr) { 535 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 536 I != E; ++I) 537 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 538 return false; 539 return true; 540} 541 542/// isSafeUseOfAllocation - Check if this user is an allowed use for an 543/// aggregate allocation. 544void SROA::isSafeUseOfAllocation(Instruction *User, AllocaInst *AI, 545 AllocaInfo &Info) { 546 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 547 return isSafeUseOfBitCastedAllocation(C, AI, Info); 548 549 if (LoadInst *LI = dyn_cast<LoadInst>(User)) 550 if (!LI->isVolatile()) 551 return;// Loads (returning a first class aggregrate) are always rewritable 552 553 if (StoreInst *SI = dyn_cast<StoreInst>(User)) 554 if (!SI->isVolatile() && SI->getOperand(0) != AI) 555 return;// Store is ok if storing INTO the pointer, not storing the pointer 556 557 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 558 if (GEPI == 0) 559 return MarkUnsafe(Info); 560 561 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 562 563 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 564 if (I == E || 565 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 566 return MarkUnsafe(Info); 567 } 568 569 ++I; 570 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 571 572 bool IsAllZeroIndices = true; 573 574 // If the first index is a non-constant index into an array, see if we can 575 // handle it as a special case. 576 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 577 if (!isa<ConstantInt>(I.getOperand())) { 578 IsAllZeroIndices = 0; 579 uint64_t NumElements = AT->getNumElements(); 580 581 // If this is an array index and the index is not constant, we cannot 582 // promote... that is unless the array has exactly one or two elements in 583 // it, in which case we CAN promote it, but we have to canonicalize this 584 // out if this is the only problem. 585 if ((NumElements == 1 || NumElements == 2) && 586 AllUsersAreLoads(GEPI)) { 587 Info.needsCleanup = true; 588 return; // Canonicalization required! 589 } 590 return MarkUnsafe(Info); 591 } 592 } 593 594 // Walk through the GEP type indices, checking the types that this indexes 595 // into. 596 for (; I != E; ++I) { 597 // Ignore struct elements, no extra checking needed for these. 598 if (isa<StructType>(*I)) 599 continue; 600 601 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 602 if (!IdxVal) return MarkUnsafe(Info); 603 604 // Are all indices still zero? 605 IsAllZeroIndices &= IdxVal->isZero(); 606 607 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 608 // This GEP indexes an array. Verify that this is an in-range constant 609 // integer. Specifically, consider A[0][i]. We cannot know that the user 610 // isn't doing invalid things like allowing i to index an out-of-range 611 // subscript that accesses A[1]. Because of this, we have to reject SROA 612 // of any accesses into structs where any of the components are variables. 613 if (IdxVal->getZExtValue() >= AT->getNumElements()) 614 return MarkUnsafe(Info); 615 } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) { 616 if (IdxVal->getZExtValue() >= VT->getNumElements()) 617 return MarkUnsafe(Info); 618 } 619 } 620 621 // If there are any non-simple uses of this getelementptr, make sure to reject 622 // them. 623 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 624} 625 626/// isSafeMemIntrinsicOnAllocation - Check if the specified memory 627/// intrinsic can be promoted by SROA. At this point, we know that the operand 628/// of the memintrinsic is a pointer to the beginning of the allocation. 629void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI, 630 unsigned OpNo, AllocaInfo &Info) { 631 // If not constant length, give up. 632 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 633 if (!Length) return MarkUnsafe(Info); 634 635 // If not the whole aggregate, give up. 636 if (Length->getZExtValue() != 637 TD->getTypeAllocSize(AI->getType()->getElementType())) 638 return MarkUnsafe(Info); 639 640 // We only know about memcpy/memset/memmove. 641 if (!isa<MemIntrinsic>(MI)) 642 return MarkUnsafe(Info); 643 644 // Otherwise, we can transform it. Determine whether this is a memcpy/set 645 // into or out of the aggregate. 646 if (OpNo == 1) 647 Info.isMemCpyDst = true; 648 else { 649 assert(OpNo == 2); 650 Info.isMemCpySrc = true; 651 } 652} 653 654/// isSafeUseOfBitCastedAllocation - Check if all users of this bitcast 655/// from an alloca are safe for SROA of that alloca. 656void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocaInst *AI, 657 AllocaInfo &Info) { 658 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 659 UI != E; ++UI) { 660 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 661 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 662 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 663 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 664 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 665 if (SI->isVolatile()) 666 return MarkUnsafe(Info); 667 668 // If storing the entire alloca in one chunk through a bitcasted pointer 669 // to integer, we can transform it. This happens (for example) when you 670 // cast a {i32,i32}* to i64* and store through it. This is similar to the 671 // memcpy case and occurs in various "byval" cases and emulated memcpys. 672 if (isa<IntegerType>(SI->getOperand(0)->getType()) && 673 TD->getTypeAllocSize(SI->getOperand(0)->getType()) == 674 TD->getTypeAllocSize(AI->getType()->getElementType())) { 675 Info.isMemCpyDst = true; 676 continue; 677 } 678 return MarkUnsafe(Info); 679 } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { 680 if (LI->isVolatile()) 681 return MarkUnsafe(Info); 682 683 // If loading the entire alloca in one chunk through a bitcasted pointer 684 // to integer, we can transform it. This happens (for example) when you 685 // cast a {i32,i32}* to i64* and load through it. This is similar to the 686 // memcpy case and occurs in various "byval" cases and emulated memcpys. 687 if (isa<IntegerType>(LI->getType()) && 688 TD->getTypeAllocSize(LI->getType()) == 689 TD->getTypeAllocSize(AI->getType()->getElementType())) { 690 Info.isMemCpySrc = true; 691 continue; 692 } 693 return MarkUnsafe(Info); 694 } else if (isa<DbgInfoIntrinsic>(UI)) { 695 // If one user is DbgInfoIntrinsic then check if all users are 696 // DbgInfoIntrinsics. 697 if (OnlyUsedByDbgInfoIntrinsics(BC)) { 698 Info.needsCleanup = true; 699 return; 700 } 701 else 702 MarkUnsafe(Info); 703 } 704 else { 705 return MarkUnsafe(Info); 706 } 707 if (Info.isUnsafe) return; 708 } 709} 710 711/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 712/// to its first element. Transform users of the cast to use the new values 713/// instead. 714void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI, 715 SmallVector<AllocaInst*, 32> &NewElts) { 716 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 717 while (UI != UE) { 718 Instruction *User = cast<Instruction>(*UI++); 719 if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) { 720 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 721 if (BCU->use_empty()) BCU->eraseFromParent(); 722 continue; 723 } 724 725 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 726 // This must be memcpy/memmove/memset of the entire aggregate. 727 // Split into one per element. 728 RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts); 729 continue; 730 } 731 732 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 733 // If this is a store of the entire alloca from an integer, rewrite it. 734 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); 735 continue; 736 } 737 738 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 739 // If this is a load of the entire alloca to an integer, rewrite it. 740 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); 741 continue; 742 } 743 744 // Otherwise it must be some other user of a gep of the first pointer. Just 745 // leave these alone. 746 continue; 747 } 748} 749 750/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. 751/// Rewrite it to copy or set the elements of the scalarized memory. 752void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 753 AllocaInst *AI, 754 SmallVector<AllocaInst*, 32> &NewElts) { 755 756 // If this is a memcpy/memmove, construct the other pointer as the 757 // appropriate type. The "Other" pointer is the pointer that goes to memory 758 // that doesn't have anything to do with the alloca that we are promoting. For 759 // memset, this Value* stays null. 760 Value *OtherPtr = 0; 761 LLVMContext &Context = MI->getContext(); 762 unsigned MemAlignment = MI->getAlignment(); 763 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy 764 if (BCInst == MTI->getRawDest()) 765 OtherPtr = MTI->getRawSource(); 766 else { 767 assert(BCInst == MTI->getRawSource()); 768 OtherPtr = MTI->getRawDest(); 769 } 770 } 771 772 // Keep track of the other intrinsic argument, so it can be removed if it 773 // is dead when the intrinsic is replaced. 774 Value *PossiblyDead = OtherPtr; 775 776 // If there is an other pointer, we want to convert it to the same pointer 777 // type as AI has, so we can GEP through it safely. 778 if (OtherPtr) { 779 // It is likely that OtherPtr is a bitcast, if so, remove it. 780 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 781 OtherPtr = BC->getOperand(0); 782 // All zero GEPs are effectively bitcasts. 783 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) 784 if (GEP->hasAllZeroIndices()) 785 OtherPtr = GEP->getOperand(0); 786 787 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 788 if (BCE->getOpcode() == Instruction::BitCast) 789 OtherPtr = BCE->getOperand(0); 790 791 // If the pointer is not the right type, insert a bitcast to the right 792 // type. 793 if (OtherPtr->getType() != AI->getType()) 794 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 795 MI); 796 } 797 798 // Process each element of the aggregate. 799 Value *TheFn = MI->getOperand(0); 800 const Type *BytePtrTy = MI->getRawDest()->getType(); 801 bool SROADest = MI->getRawDest() == BCInst; 802 803 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext())); 804 805 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 806 // If this is a memcpy/memmove, emit a GEP of the other element address. 807 Value *OtherElt = 0; 808 unsigned OtherEltAlign = MemAlignment; 809 810 if (OtherPtr) { 811 Value *Idx[2] = { Zero, 812 ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) }; 813 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, 814 OtherPtr->getNameStr()+"."+Twine(i), 815 MI); 816 uint64_t EltOffset; 817 const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); 818 if (const StructType *ST = 819 dyn_cast<StructType>(OtherPtrTy->getElementType())) { 820 EltOffset = TD->getStructLayout(ST)->getElementOffset(i); 821 } else { 822 const Type *EltTy = 823 cast<SequentialType>(OtherPtr->getType())->getElementType(); 824 EltOffset = TD->getTypeAllocSize(EltTy)*i; 825 } 826 827 // The alignment of the other pointer is the guaranteed alignment of the 828 // element, which is affected by both the known alignment of the whole 829 // mem intrinsic and the alignment of the element. If the alignment of 830 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the 831 // known alignment is just 4 bytes. 832 OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset); 833 } 834 835 Value *EltPtr = NewElts[i]; 836 const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType(); 837 838 // If we got down to a scalar, insert a load or store as appropriate. 839 if (EltTy->isSingleValueType()) { 840 if (isa<MemTransferInst>(MI)) { 841 if (SROADest) { 842 // From Other to Alloca. 843 Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI); 844 new StoreInst(Elt, EltPtr, MI); 845 } else { 846 // From Alloca to Other. 847 Value *Elt = new LoadInst(EltPtr, "tmp", MI); 848 new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI); 849 } 850 continue; 851 } 852 assert(isa<MemSetInst>(MI)); 853 854 // If the stored element is zero (common case), just store a null 855 // constant. 856 Constant *StoreVal; 857 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 858 if (CI->isZero()) { 859 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 860 } else { 861 // If EltTy is a vector type, get the element type. 862 const Type *ValTy = EltTy->getScalarType(); 863 864 // Construct an integer with the right value. 865 unsigned EltSize = TD->getTypeSizeInBits(ValTy); 866 APInt OneVal(EltSize, CI->getZExtValue()); 867 APInt TotalVal(OneVal); 868 // Set each byte. 869 for (unsigned i = 0; 8*i < EltSize; ++i) { 870 TotalVal = TotalVal.shl(8); 871 TotalVal |= OneVal; 872 } 873 874 // Convert the integer value to the appropriate type. 875 StoreVal = ConstantInt::get(Context, TotalVal); 876 if (isa<PointerType>(ValTy)) 877 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 878 else if (ValTy->isFloatingPoint()) 879 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 880 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 881 882 // If the requested value was a vector constant, create it. 883 if (EltTy != ValTy) { 884 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 885 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 886 StoreVal = ConstantVector::get(&Elts[0], NumElts); 887 } 888 } 889 new StoreInst(StoreVal, EltPtr, MI); 890 continue; 891 } 892 // Otherwise, if we're storing a byte variable, use a memset call for 893 // this element. 894 } 895 896 // Cast the element pointer to BytePtrTy. 897 if (EltPtr->getType() != BytePtrTy) 898 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 899 900 // Cast the other pointer (if we have one) to BytePtrTy. 901 if (OtherElt && OtherElt->getType() != BytePtrTy) 902 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 903 MI); 904 905 unsigned EltSize = TD->getTypeAllocSize(EltTy); 906 907 // Finally, insert the meminst for this element. 908 if (isa<MemTransferInst>(MI)) { 909 Value *Ops[] = { 910 SROADest ? EltPtr : OtherElt, // Dest ptr 911 SROADest ? OtherElt : EltPtr, // Src ptr 912 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 913 // Align 914 ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign) 915 }; 916 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 917 } else { 918 assert(isa<MemSetInst>(MI)); 919 Value *Ops[] = { 920 EltPtr, MI->getOperand(2), // Dest, Value, 921 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 922 Zero // Align 923 }; 924 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 925 } 926 } 927 MI->eraseFromParent(); 928 if (PossiblyDead) 929 RecursivelyDeleteTriviallyDeadInstructions(PossiblyDead); 930} 931 932/// RewriteStoreUserOfWholeAlloca - We found a store of an integer that 933/// overwrites the entire allocation. Extract out the pieces of the stored 934/// integer and store them individually. 935void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, 936 SmallVector<AllocaInst*, 32> &NewElts){ 937 // Extract each element out of the integer according to its structure offset 938 // and store the element value to the individual alloca. 939 Value *SrcVal = SI->getOperand(0); 940 const Type *AllocaEltTy = AI->getType()->getElementType(); 941 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); 942 943 // If this isn't a store of an integer to the whole alloca, it may be a store 944 // to the first element. Just ignore the store in this case and normal SROA 945 // will handle it. 946 if (!isa<IntegerType>(SrcVal->getType()) || 947 TD->getTypeAllocSizeInBits(SrcVal->getType()) != AllocaSizeBits) 948 return; 949 // Handle tail padding by extending the operand 950 if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) 951 SrcVal = new ZExtInst(SrcVal, 952 IntegerType::get(SI->getContext(), AllocaSizeBits), 953 "", SI); 954 955 DEBUG(errs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI 956 << '\n'); 957 958 // There are two forms here: AI could be an array or struct. Both cases 959 // have different ways to compute the element offset. 960 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 961 const StructLayout *Layout = TD->getStructLayout(EltSTy); 962 963 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 964 // Get the number of bits to shift SrcVal to get the value. 965 const Type *FieldTy = EltSTy->getElementType(i); 966 uint64_t Shift = Layout->getElementOffsetInBits(i); 967 968 if (TD->isBigEndian()) 969 Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy); 970 971 Value *EltVal = SrcVal; 972 if (Shift) { 973 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 974 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 975 "sroa.store.elt", SI); 976 } 977 978 // Truncate down to an integer of the right size. 979 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 980 981 // Ignore zero sized fields like {}, they obviously contain no data. 982 if (FieldSizeBits == 0) continue; 983 984 if (FieldSizeBits != AllocaSizeBits) 985 EltVal = new TruncInst(EltVal, 986 IntegerType::get(SI->getContext(), FieldSizeBits), 987 "", SI); 988 Value *DestField = NewElts[i]; 989 if (EltVal->getType() == FieldTy) { 990 // Storing to an integer field of this size, just do it. 991 } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) { 992 // Bitcast to the right element type (for fp/vector values). 993 EltVal = new BitCastInst(EltVal, FieldTy, "", SI); 994 } else { 995 // Otherwise, bitcast the dest pointer (for aggregates). 996 DestField = new BitCastInst(DestField, 997 PointerType::getUnqual(EltVal->getType()), 998 "", SI); 999 } 1000 new StoreInst(EltVal, DestField, SI); 1001 } 1002 1003 } else { 1004 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy); 1005 const Type *ArrayEltTy = ATy->getElementType(); 1006 uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); 1007 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); 1008 1009 uint64_t Shift; 1010 1011 if (TD->isBigEndian()) 1012 Shift = AllocaSizeBits-ElementOffset; 1013 else 1014 Shift = 0; 1015 1016 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1017 // Ignore zero sized fields like {}, they obviously contain no data. 1018 if (ElementSizeBits == 0) continue; 1019 1020 Value *EltVal = SrcVal; 1021 if (Shift) { 1022 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 1023 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 1024 "sroa.store.elt", SI); 1025 } 1026 1027 // Truncate down to an integer of the right size. 1028 if (ElementSizeBits != AllocaSizeBits) 1029 EltVal = new TruncInst(EltVal, 1030 IntegerType::get(SI->getContext(), 1031 ElementSizeBits),"",SI); 1032 Value *DestField = NewElts[i]; 1033 if (EltVal->getType() == ArrayEltTy) { 1034 // Storing to an integer field of this size, just do it. 1035 } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) { 1036 // Bitcast to the right element type (for fp/vector values). 1037 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI); 1038 } else { 1039 // Otherwise, bitcast the dest pointer (for aggregates). 1040 DestField = new BitCastInst(DestField, 1041 PointerType::getUnqual(EltVal->getType()), 1042 "", SI); 1043 } 1044 new StoreInst(EltVal, DestField, SI); 1045 1046 if (TD->isBigEndian()) 1047 Shift -= ElementOffset; 1048 else 1049 Shift += ElementOffset; 1050 } 1051 } 1052 1053 SI->eraseFromParent(); 1054} 1055 1056/// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to 1057/// an integer. Load the individual pieces to form the aggregate value. 1058void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, 1059 SmallVector<AllocaInst*, 32> &NewElts) { 1060 // Extract each element out of the NewElts according to its structure offset 1061 // and form the result value. 1062 const Type *AllocaEltTy = AI->getType()->getElementType(); 1063 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); 1064 1065 // If this isn't a load of the whole alloca to an integer, it may be a load 1066 // of the first element. Just ignore the load in this case and normal SROA 1067 // will handle it. 1068 if (!isa<IntegerType>(LI->getType()) || 1069 TD->getTypeAllocSizeInBits(LI->getType()) != AllocaSizeBits) 1070 return; 1071 1072 DEBUG(errs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI 1073 << '\n'); 1074 1075 // There are two forms here: AI could be an array or struct. Both cases 1076 // have different ways to compute the element offset. 1077 const StructLayout *Layout = 0; 1078 uint64_t ArrayEltBitOffset = 0; 1079 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 1080 Layout = TD->getStructLayout(EltSTy); 1081 } else { 1082 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); 1083 ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); 1084 } 1085 1086 Value *ResultVal = 1087 Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits)); 1088 1089 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1090 // Load the value from the alloca. If the NewElt is an aggregate, cast 1091 // the pointer to an integer of the same size before doing the load. 1092 Value *SrcField = NewElts[i]; 1093 const Type *FieldTy = 1094 cast<PointerType>(SrcField->getType())->getElementType(); 1095 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 1096 1097 // Ignore zero sized fields like {}, they obviously contain no data. 1098 if (FieldSizeBits == 0) continue; 1099 1100 const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(), 1101 FieldSizeBits); 1102 if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() && 1103 !isa<VectorType>(FieldTy)) 1104 SrcField = new BitCastInst(SrcField, 1105 PointerType::getUnqual(FieldIntTy), 1106 "", LI); 1107 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); 1108 1109 // If SrcField is a fp or vector of the right size but that isn't an 1110 // integer type, bitcast to an integer so we can shift it. 1111 if (SrcField->getType() != FieldIntTy) 1112 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); 1113 1114 // Zero extend the field to be the same size as the final alloca so that 1115 // we can shift and insert it. 1116 if (SrcField->getType() != ResultVal->getType()) 1117 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); 1118 1119 // Determine the number of bits to shift SrcField. 1120 uint64_t Shift; 1121 if (Layout) // Struct case. 1122 Shift = Layout->getElementOffsetInBits(i); 1123 else // Array case. 1124 Shift = i*ArrayEltBitOffset; 1125 1126 if (TD->isBigEndian()) 1127 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); 1128 1129 if (Shift) { 1130 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); 1131 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); 1132 } 1133 1134 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); 1135 } 1136 1137 // Handle tail padding by truncating the result 1138 if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits) 1139 ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI); 1140 1141 LI->replaceAllUsesWith(ResultVal); 1142 LI->eraseFromParent(); 1143} 1144 1145 1146/// HasPadding - Return true if the specified type has any structure or 1147/// alignment padding, false otherwise. 1148static bool HasPadding(const Type *Ty, const TargetData &TD) { 1149 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1150 const StructLayout *SL = TD.getStructLayout(STy); 1151 unsigned PrevFieldBitOffset = 0; 1152 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1153 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 1154 1155 // Padding in sub-elements? 1156 if (HasPadding(STy->getElementType(i), TD)) 1157 return true; 1158 1159 // Check to see if there is any padding between this element and the 1160 // previous one. 1161 if (i) { 1162 unsigned PrevFieldEnd = 1163 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 1164 if (PrevFieldEnd < FieldBitOffset) 1165 return true; 1166 } 1167 1168 PrevFieldBitOffset = FieldBitOffset; 1169 } 1170 1171 // Check for tail padding. 1172 if (unsigned EltCount = STy->getNumElements()) { 1173 unsigned PrevFieldEnd = PrevFieldBitOffset + 1174 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 1175 if (PrevFieldEnd < SL->getSizeInBits()) 1176 return true; 1177 } 1178 1179 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1180 return HasPadding(ATy->getElementType(), TD); 1181 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1182 return HasPadding(VTy->getElementType(), TD); 1183 } 1184 return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty); 1185} 1186 1187/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 1188/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 1189/// or 1 if safe after canonicalization has been performed. 1190int SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { 1191 // Loop over the use list of the alloca. We can only transform it if all of 1192 // the users are safe to transform. 1193 AllocaInfo Info; 1194 1195 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 1196 I != E; ++I) { 1197 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 1198 if (Info.isUnsafe) { 1199 DEBUG(errs() << "Cannot transform: " << *AI << "\n due to user: " 1200 << **I << '\n'); 1201 return 0; 1202 } 1203 } 1204 1205 // Okay, we know all the users are promotable. If the aggregate is a memcpy 1206 // source and destination, we have to be careful. In particular, the memcpy 1207 // could be moving around elements that live in structure padding of the LLVM 1208 // types, but may actually be used. In these cases, we refuse to promote the 1209 // struct. 1210 if (Info.isMemCpySrc && Info.isMemCpyDst && 1211 HasPadding(AI->getType()->getElementType(), *TD)) 1212 return 0; 1213 1214 // If we require cleanup, return 1, otherwise return 3. 1215 return Info.needsCleanup ? 1 : 3; 1216} 1217 1218/// CleanupGEP - GEP is used by an Alloca, which can be promoted after the GEP 1219/// is canonicalized here. 1220void SROA::CleanupGEP(GetElementPtrInst *GEPI) { 1221 gep_type_iterator I = gep_type_begin(GEPI); 1222 ++I; 1223 1224 const ArrayType *AT = dyn_cast<ArrayType>(*I); 1225 if (!AT) 1226 return; 1227 1228 uint64_t NumElements = AT->getNumElements(); 1229 1230 if (isa<ConstantInt>(I.getOperand())) 1231 return; 1232 1233 if (NumElements == 1) { 1234 GEPI->setOperand(2, 1235 Constant::getNullValue(Type::getInt32Ty(GEPI->getContext()))); 1236 return; 1237 } 1238 1239 assert(NumElements == 2 && "Unhandled case!"); 1240 // All users of the GEP must be loads. At each use of the GEP, insert 1241 // two loads of the appropriate indexed GEP and select between them. 1242 Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(), 1243 Constant::getNullValue(I.getOperand()->getType()), 1244 "isone"); 1245 // Insert the new GEP instructions, which are properly indexed. 1246 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 1247 Indices[1] = Constant::getNullValue(Type::getInt32Ty(GEPI->getContext())); 1248 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1249 Indices.begin(), 1250 Indices.end(), 1251 GEPI->getName()+".0", GEPI); 1252 Indices[1] = ConstantInt::get(Type::getInt32Ty(GEPI->getContext()), 1); 1253 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1254 Indices.begin(), 1255 Indices.end(), 1256 GEPI->getName()+".1", GEPI); 1257 // Replace all loads of the variable index GEP with loads from both 1258 // indexes and a select. 1259 while (!GEPI->use_empty()) { 1260 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 1261 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 1262 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 1263 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI); 1264 LI->replaceAllUsesWith(R); 1265 LI->eraseFromParent(); 1266 } 1267 GEPI->eraseFromParent(); 1268} 1269 1270 1271/// CleanupAllocaUsers - If SROA reported that it can promote the specified 1272/// allocation, but only if cleaned up, perform the cleanups required. 1273void SROA::CleanupAllocaUsers(AllocaInst *AI) { 1274 // At this point, we know that the end result will be SROA'd and promoted, so 1275 // we can insert ugly code if required so long as sroa+mem2reg will clean it 1276 // up. 1277 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 1278 UI != E; ) { 1279 User *U = *UI++; 1280 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) 1281 CleanupGEP(GEPI); 1282 else { 1283 Instruction *I = cast<Instruction>(U); 1284 SmallVector<DbgInfoIntrinsic *, 2> DbgInUses; 1285 if (!isa<StoreInst>(I) && OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) { 1286 // Safe to remove debug info uses. 1287 while (!DbgInUses.empty()) { 1288 DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back(); 1289 DI->eraseFromParent(); 1290 } 1291 I->eraseFromParent(); 1292 } 1293 } 1294 } 1295} 1296 1297/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at 1298/// the offset specified by Offset (which is specified in bytes). 1299/// 1300/// There are two cases we handle here: 1301/// 1) A union of vector types of the same size and potentially its elements. 1302/// Here we turn element accesses into insert/extract element operations. 1303/// This promotes a <4 x float> with a store of float to the third element 1304/// into a <4 x float> that uses insert element. 1305/// 2) A fully general blob of memory, which we turn into some (potentially 1306/// large) integer type with extract and insert operations where the loads 1307/// and stores would mutate the memory. 1308static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy, 1309 unsigned AllocaSize, const TargetData &TD, 1310 LLVMContext &Context) { 1311 // If this could be contributing to a vector, analyze it. 1312 if (VecTy != Type::getVoidTy(Context)) { // either null or a vector type. 1313 1314 // If the In type is a vector that is the same size as the alloca, see if it 1315 // matches the existing VecTy. 1316 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) { 1317 if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { 1318 // If we're storing/loading a vector of the right size, allow it as a 1319 // vector. If this the first vector we see, remember the type so that 1320 // we know the element size. 1321 if (VecTy == 0) 1322 VecTy = VInTy; 1323 return; 1324 } 1325 } else if (In->isFloatTy() || In->isDoubleTy() || 1326 (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 && 1327 isPowerOf2_32(In->getPrimitiveSizeInBits()))) { 1328 // If we're accessing something that could be an element of a vector, see 1329 // if the implied vector agrees with what we already have and if Offset is 1330 // compatible with it. 1331 unsigned EltSize = In->getPrimitiveSizeInBits()/8; 1332 if (Offset % EltSize == 0 && 1333 AllocaSize % EltSize == 0 && 1334 (VecTy == 0 || 1335 cast<VectorType>(VecTy)->getElementType() 1336 ->getPrimitiveSizeInBits()/8 == EltSize)) { 1337 if (VecTy == 0) 1338 VecTy = VectorType::get(In, AllocaSize/EltSize); 1339 return; 1340 } 1341 } 1342 } 1343 1344 // Otherwise, we have a case that we can't handle with an optimized vector 1345 // form. We can still turn this into a large integer. 1346 VecTy = Type::getVoidTy(Context); 1347} 1348 1349/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all 1350/// its accesses to a single vector type, return true and set VecTy to 1351/// the new type. If we could convert the alloca into a single promotable 1352/// integer, return true but set VecTy to VoidTy. Further, if the use is not a 1353/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset 1354/// is the current offset from the base of the alloca being analyzed. 1355/// 1356/// If we see at least one access to the value that is as a vector type, set the 1357/// SawVec flag. 1358bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 1359 bool &SawVec, uint64_t Offset, 1360 unsigned AllocaSize) { 1361 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1362 Instruction *User = cast<Instruction>(*UI); 1363 1364 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1365 // Don't break volatile loads. 1366 if (LI->isVolatile()) 1367 return false; 1368 MergeInType(LI->getType(), Offset, VecTy, 1369 AllocaSize, *TD, V->getContext()); 1370 SawVec |= isa<VectorType>(LI->getType()); 1371 continue; 1372 } 1373 1374 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1375 // Storing the pointer, not into the value? 1376 if (SI->getOperand(0) == V || SI->isVolatile()) return 0; 1377 MergeInType(SI->getOperand(0)->getType(), Offset, 1378 VecTy, AllocaSize, *TD, V->getContext()); 1379 SawVec |= isa<VectorType>(SI->getOperand(0)->getType()); 1380 continue; 1381 } 1382 1383 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 1384 if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset, 1385 AllocaSize)) 1386 return false; 1387 IsNotTrivial = true; 1388 continue; 1389 } 1390 1391 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1392 // If this is a GEP with a variable indices, we can't handle it. 1393 if (!GEP->hasAllConstantIndices()) 1394 return false; 1395 1396 // Compute the offset that this GEP adds to the pointer. 1397 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1398 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1399 &Indices[0], Indices.size()); 1400 // See if all uses can be converted. 1401 if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset, 1402 AllocaSize)) 1403 return false; 1404 IsNotTrivial = true; 1405 continue; 1406 } 1407 1408 // If this is a constant sized memset of a constant value (e.g. 0) we can 1409 // handle it. 1410 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 1411 // Store of constant value and constant size. 1412 if (isa<ConstantInt>(MSI->getValue()) && 1413 isa<ConstantInt>(MSI->getLength())) { 1414 IsNotTrivial = true; 1415 continue; 1416 } 1417 } 1418 1419 // If this is a memcpy or memmove into or out of the whole allocation, we 1420 // can handle it like a load or store of the scalar type. 1421 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { 1422 if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength())) 1423 if (Len->getZExtValue() == AllocaSize && Offset == 0) { 1424 IsNotTrivial = true; 1425 continue; 1426 } 1427 } 1428 1429 // Ignore dbg intrinsic. 1430 if (isa<DbgInfoIntrinsic>(User)) 1431 continue; 1432 1433 // Otherwise, we cannot handle this! 1434 return false; 1435 } 1436 1437 return true; 1438} 1439 1440/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1441/// directly. This happens when we are converting an "integer union" to a 1442/// single integer scalar, or when we are converting a "vector union" to a 1443/// vector with insert/extractelement instructions. 1444/// 1445/// Offset is an offset from the original alloca, in bits that need to be 1446/// shifted to the right. By the end of this, there should be no uses of Ptr. 1447void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { 1448 while (!Ptr->use_empty()) { 1449 Instruction *User = cast<Instruction>(Ptr->use_back()); 1450 1451 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1452 ConvertUsesToScalar(CI, NewAI, Offset); 1453 CI->eraseFromParent(); 1454 continue; 1455 } 1456 1457 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1458 // Compute the offset that this GEP adds to the pointer. 1459 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1460 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1461 &Indices[0], Indices.size()); 1462 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8); 1463 GEP->eraseFromParent(); 1464 continue; 1465 } 1466 1467 IRBuilder<> Builder(User->getParent(), User); 1468 1469 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1470 // The load is a bit extract from NewAI shifted right by Offset bits. 1471 Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp"); 1472 Value *NewLoadVal 1473 = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder); 1474 LI->replaceAllUsesWith(NewLoadVal); 1475 LI->eraseFromParent(); 1476 continue; 1477 } 1478 1479 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1480 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1481 // FIXME: Remove once builder has Twine API. 1482 Value *Old = Builder.CreateLoad(NewAI, 1483 (NewAI->getName()+".in").str().c_str()); 1484 Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, 1485 Builder); 1486 Builder.CreateStore(New, NewAI); 1487 SI->eraseFromParent(); 1488 continue; 1489 } 1490 1491 // If this is a constant sized memset of a constant value (e.g. 0) we can 1492 // transform it into a store of the expanded constant value. 1493 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 1494 assert(MSI->getRawDest() == Ptr && "Consistency error!"); 1495 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 1496 if (NumBytes != 0) { 1497 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); 1498 1499 // Compute the value replicated the right number of times. 1500 APInt APVal(NumBytes*8, Val); 1501 1502 // Splat the value if non-zero. 1503 if (Val) 1504 for (unsigned i = 1; i != NumBytes; ++i) 1505 APVal |= APVal << 8; 1506 1507 // FIXME: Remove once builder has Twine API. 1508 Value *Old = Builder.CreateLoad(NewAI, 1509 (NewAI->getName()+".in").str().c_str()); 1510 Value *New = ConvertScalar_InsertValue( 1511 ConstantInt::get(User->getContext(), APVal), 1512 Old, Offset, Builder); 1513 Builder.CreateStore(New, NewAI); 1514 } 1515 MSI->eraseFromParent(); 1516 continue; 1517 } 1518 1519 // If this is a memcpy or memmove into or out of the whole allocation, we 1520 // can handle it like a load or store of the scalar type. 1521 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { 1522 assert(Offset == 0 && "must be store to start of alloca"); 1523 1524 // If the source and destination are both to the same alloca, then this is 1525 // a noop copy-to-self, just delete it. Otherwise, emit a load and store 1526 // as appropriate. 1527 AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject()); 1528 1529 if (MTI->getSource()->getUnderlyingObject() != OrigAI) { 1530 // Dest must be OrigAI, change this to be a load from the original 1531 // pointer (bitcasted), then a store to our new alloca. 1532 assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?"); 1533 Value *SrcPtr = MTI->getSource(); 1534 SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType()); 1535 1536 LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval"); 1537 SrcVal->setAlignment(MTI->getAlignment()); 1538 Builder.CreateStore(SrcVal, NewAI); 1539 } else if (MTI->getDest()->getUnderlyingObject() != OrigAI) { 1540 // Src must be OrigAI, change this to be a load from NewAI then a store 1541 // through the original dest pointer (bitcasted). 1542 assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?"); 1543 LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval"); 1544 1545 Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType()); 1546 StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr); 1547 NewStore->setAlignment(MTI->getAlignment()); 1548 } else { 1549 // Noop transfer. Src == Dst 1550 } 1551 1552 1553 MTI->eraseFromParent(); 1554 continue; 1555 } 1556 1557 // If user is a dbg info intrinsic then it is safe to remove it. 1558 if (isa<DbgInfoIntrinsic>(User)) { 1559 User->eraseFromParent(); 1560 continue; 1561 } 1562 1563 llvm_unreachable("Unsupported operation!"); 1564 } 1565} 1566 1567/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer 1568/// or vector value FromVal, extracting the bits from the offset specified by 1569/// Offset. This returns the value, which is of type ToType. 1570/// 1571/// This happens when we are converting an "integer union" to a single 1572/// integer scalar, or when we are converting a "vector union" to a vector with 1573/// insert/extractelement instructions. 1574/// 1575/// Offset is an offset from the original alloca, in bits that need to be 1576/// shifted to the right. 1577Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, 1578 uint64_t Offset, IRBuilder<> &Builder) { 1579 // If the load is of the whole new alloca, no conversion is needed. 1580 if (FromVal->getType() == ToType && Offset == 0) 1581 return FromVal; 1582 1583 // If the result alloca is a vector type, this is either an element 1584 // access or a bitcast to another vector type of the same size. 1585 if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) { 1586 if (isa<VectorType>(ToType)) 1587 return Builder.CreateBitCast(FromVal, ToType, "tmp"); 1588 1589 // Otherwise it must be an element access. 1590 unsigned Elt = 0; 1591 if (Offset) { 1592 unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType()); 1593 Elt = Offset/EltSize; 1594 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); 1595 } 1596 // Return the element extracted out of it. 1597 Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get( 1598 Type::getInt32Ty(FromVal->getContext()), Elt), "tmp"); 1599 if (V->getType() != ToType) 1600 V = Builder.CreateBitCast(V, ToType, "tmp"); 1601 return V; 1602 } 1603 1604 // If ToType is a first class aggregate, extract out each of the pieces and 1605 // use insertvalue's to form the FCA. 1606 if (const StructType *ST = dyn_cast<StructType>(ToType)) { 1607 const StructLayout &Layout = *TD->getStructLayout(ST); 1608 Value *Res = UndefValue::get(ST); 1609 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1610 Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), 1611 Offset+Layout.getElementOffsetInBits(i), 1612 Builder); 1613 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 1614 } 1615 return Res; 1616 } 1617 1618 if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) { 1619 uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType()); 1620 Value *Res = UndefValue::get(AT); 1621 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 1622 Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), 1623 Offset+i*EltSize, Builder); 1624 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 1625 } 1626 return Res; 1627 } 1628 1629 // Otherwise, this must be a union that was converted to an integer value. 1630 const IntegerType *NTy = cast<IntegerType>(FromVal->getType()); 1631 1632 // If this is a big-endian system and the load is narrower than the 1633 // full alloca type, we need to do a shift to get the right bits. 1634 int ShAmt = 0; 1635 if (TD->isBigEndian()) { 1636 // On big-endian machines, the lowest bit is stored at the bit offset 1637 // from the pointer given by getTypeStoreSizeInBits. This matters for 1638 // integers with a bitwidth that is not a multiple of 8. 1639 ShAmt = TD->getTypeStoreSizeInBits(NTy) - 1640 TD->getTypeStoreSizeInBits(ToType) - Offset; 1641 } else { 1642 ShAmt = Offset; 1643 } 1644 1645 // Note: we support negative bitwidths (with shl) which are not defined. 1646 // We do this to support (f.e.) loads off the end of a structure where 1647 // only some bits are used. 1648 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1649 FromVal = Builder.CreateLShr(FromVal, 1650 ConstantInt::get(FromVal->getType(), 1651 ShAmt), "tmp"); 1652 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1653 FromVal = Builder.CreateShl(FromVal, 1654 ConstantInt::get(FromVal->getType(), 1655 -ShAmt), "tmp"); 1656 1657 // Finally, unconditionally truncate the integer to the right width. 1658 unsigned LIBitWidth = TD->getTypeSizeInBits(ToType); 1659 if (LIBitWidth < NTy->getBitWidth()) 1660 FromVal = 1661 Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(), 1662 LIBitWidth), "tmp"); 1663 else if (LIBitWidth > NTy->getBitWidth()) 1664 FromVal = 1665 Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(), 1666 LIBitWidth), "tmp"); 1667 1668 // If the result is an integer, this is a trunc or bitcast. 1669 if (isa<IntegerType>(ToType)) { 1670 // Should be done. 1671 } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) { 1672 // Just do a bitcast, we know the sizes match up. 1673 FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp"); 1674 } else { 1675 // Otherwise must be a pointer. 1676 FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp"); 1677 } 1678 assert(FromVal->getType() == ToType && "Didn't convert right?"); 1679 return FromVal; 1680} 1681 1682/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer 1683/// or vector value "Old" at the offset specified by Offset. 1684/// 1685/// This happens when we are converting an "integer union" to a 1686/// single integer scalar, or when we are converting a "vector union" to a 1687/// vector with insert/extractelement instructions. 1688/// 1689/// Offset is an offset from the original alloca, in bits that need to be 1690/// shifted to the right. 1691Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, 1692 uint64_t Offset, IRBuilder<> &Builder) { 1693 1694 // Convert the stored type to the actual type, shift it left to insert 1695 // then 'or' into place. 1696 const Type *AllocaType = Old->getType(); 1697 LLVMContext &Context = Old->getContext(); 1698 1699 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { 1700 uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy); 1701 uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType()); 1702 1703 // Changing the whole vector with memset or with an access of a different 1704 // vector type? 1705 if (ValSize == VecSize) 1706 return Builder.CreateBitCast(SV, AllocaType, "tmp"); 1707 1708 uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType()); 1709 1710 // Must be an element insertion. 1711 unsigned Elt = Offset/EltSize; 1712 1713 if (SV->getType() != VTy->getElementType()) 1714 SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp"); 1715 1716 SV = Builder.CreateInsertElement(Old, SV, 1717 ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt), 1718 "tmp"); 1719 return SV; 1720 } 1721 1722 // If SV is a first-class aggregate value, insert each value recursively. 1723 if (const StructType *ST = dyn_cast<StructType>(SV->getType())) { 1724 const StructLayout &Layout = *TD->getStructLayout(ST); 1725 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1726 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1727 Old = ConvertScalar_InsertValue(Elt, Old, 1728 Offset+Layout.getElementOffsetInBits(i), 1729 Builder); 1730 } 1731 return Old; 1732 } 1733 1734 if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { 1735 uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType()); 1736 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 1737 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1738 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder); 1739 } 1740 return Old; 1741 } 1742 1743 // If SV is a float, convert it to the appropriate integer type. 1744 // If it is a pointer, do the same. 1745 unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType()); 1746 unsigned DestWidth = TD->getTypeSizeInBits(AllocaType); 1747 unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType()); 1748 unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType); 1749 if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType())) 1750 SV = Builder.CreateBitCast(SV, 1751 IntegerType::get(SV->getContext(),SrcWidth), "tmp"); 1752 else if (isa<PointerType>(SV->getType())) 1753 SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(SV->getContext()), "tmp"); 1754 1755 // Zero extend or truncate the value if needed. 1756 if (SV->getType() != AllocaType) { 1757 if (SV->getType()->getPrimitiveSizeInBits() < 1758 AllocaType->getPrimitiveSizeInBits()) 1759 SV = Builder.CreateZExt(SV, AllocaType, "tmp"); 1760 else { 1761 // Truncation may be needed if storing more than the alloca can hold 1762 // (undefined behavior). 1763 SV = Builder.CreateTrunc(SV, AllocaType, "tmp"); 1764 SrcWidth = DestWidth; 1765 SrcStoreWidth = DestStoreWidth; 1766 } 1767 } 1768 1769 // If this is a big-endian system and the store is narrower than the 1770 // full alloca type, we need to do a shift to get the right bits. 1771 int ShAmt = 0; 1772 if (TD->isBigEndian()) { 1773 // On big-endian machines, the lowest bit is stored at the bit offset 1774 // from the pointer given by getTypeStoreSizeInBits. This matters for 1775 // integers with a bitwidth that is not a multiple of 8. 1776 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 1777 } else { 1778 ShAmt = Offset; 1779 } 1780 1781 // Note: we support negative bitwidths (with shr) which are not defined. 1782 // We do this to support (f.e.) stores off the end of a structure where 1783 // only some bits in the structure are set. 1784 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1785 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1786 SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), 1787 ShAmt), "tmp"); 1788 Mask <<= ShAmt; 1789 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1790 SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), 1791 -ShAmt), "tmp"); 1792 Mask = Mask.lshr(-ShAmt); 1793 } 1794 1795 // Mask out the bits we are about to insert from the old value, and or 1796 // in the new bits. 1797 if (SrcWidth != DestWidth) { 1798 assert(DestWidth > SrcWidth); 1799 Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask"); 1800 SV = Builder.CreateOr(Old, SV, "ins"); 1801 } 1802 return SV; 1803} 1804 1805 1806 1807/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1808/// some part of a constant global variable. This intentionally only accepts 1809/// constant expressions because we don't can't rewrite arbitrary instructions. 1810static bool PointsToConstantGlobal(Value *V) { 1811 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1812 return GV->isConstant(); 1813 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1814 if (CE->getOpcode() == Instruction::BitCast || 1815 CE->getOpcode() == Instruction::GetElementPtr) 1816 return PointsToConstantGlobal(CE->getOperand(0)); 1817 return false; 1818} 1819 1820/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1821/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1822/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1823/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1824/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1825/// the alloca, and if the source pointer is a pointer to a constant global, we 1826/// can optimize this. 1827static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1828 bool isOffset) { 1829 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1830 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 1831 // Ignore non-volatile loads, they are always ok. 1832 if (!LI->isVolatile()) 1833 continue; 1834 1835 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1836 // If uses of the bitcast are ok, we are ok. 1837 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1838 return false; 1839 continue; 1840 } 1841 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1842 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1843 // doesn't, it does. 1844 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1845 isOffset || !GEP->hasAllZeroIndices())) 1846 return false; 1847 continue; 1848 } 1849 1850 // If this is isn't our memcpy/memmove, reject it as something we can't 1851 // handle. 1852 if (!isa<MemTransferInst>(*UI)) 1853 return false; 1854 1855 // If we already have seen a copy, reject the second one. 1856 if (TheCopy) return false; 1857 1858 // If the pointer has been offset from the start of the alloca, we can't 1859 // safely handle this. 1860 if (isOffset) return false; 1861 1862 // If the memintrinsic isn't using the alloca as the dest, reject it. 1863 if (UI.getOperandNo() != 1) return false; 1864 1865 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1866 1867 // If the source of the memcpy/move is not a constant global, reject it. 1868 if (!PointsToConstantGlobal(MI->getOperand(2))) 1869 return false; 1870 1871 // Otherwise, the transform is safe. Remember the copy instruction. 1872 TheCopy = MI; 1873 } 1874 return true; 1875} 1876 1877/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1878/// modified by a copy from a constant global. If we can prove this, we can 1879/// replace any uses of the alloca with uses of the global directly. 1880Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) { 1881 Instruction *TheCopy = 0; 1882 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1883 return TheCopy; 1884 return 0; 1885} 1886