ScalarReplAggregates.cpp revision 88fe1ad187041e2ca636e0f86204e30fc6e14300
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Transforms/Utils/Local.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/GetElementPtrTypeIterator.h" 37#include "llvm/Support/IRBuilder.h" 38#include "llvm/Support/MathExtras.h" 39#include "llvm/Support/Compiler.h" 40#include "llvm/ADT/SmallVector.h" 41#include "llvm/ADT/Statistic.h" 42#include "llvm/ADT/StringExtras.h" 43using namespace llvm; 44 45STATISTIC(NumReplaced, "Number of allocas broken up"); 46STATISTIC(NumPromoted, "Number of allocas promoted"); 47STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 48STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 49 50namespace { 51 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 52 static char ID; // Pass identification, replacement for typeid 53 explicit SROA(signed T = -1) : FunctionPass(&ID) { 54 if (T == -1) 55 SRThreshold = 128; 56 else 57 SRThreshold = T; 58 } 59 60 bool runOnFunction(Function &F); 61 62 bool performScalarRepl(Function &F); 63 bool performPromotion(Function &F); 64 65 // getAnalysisUsage - This pass does not require any passes, but we know it 66 // will not alter the CFG, so say so. 67 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 68 AU.addRequired<DominatorTree>(); 69 AU.addRequired<DominanceFrontier>(); 70 AU.addRequired<TargetData>(); 71 AU.setPreservesCFG(); 72 } 73 74 private: 75 TargetData *TD; 76 77 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 78 /// information about the uses. All these fields are initialized to false 79 /// and set to true when something is learned. 80 struct AllocaInfo { 81 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 82 bool isUnsafe : 1; 83 84 /// needsCleanup - This is set to true if there is some use of the alloca 85 /// that requires cleanup. 86 bool needsCleanup : 1; 87 88 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 89 bool isMemCpySrc : 1; 90 91 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 92 bool isMemCpyDst : 1; 93 94 AllocaInfo() 95 : isUnsafe(false), needsCleanup(false), 96 isMemCpySrc(false), isMemCpyDst(false) {} 97 }; 98 99 unsigned SRThreshold; 100 101 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 102 103 int isSafeAllocaToScalarRepl(AllocationInst *AI); 104 105 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 106 AllocaInfo &Info); 107 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 108 AllocaInfo &Info); 109 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 110 unsigned OpNo, AllocaInfo &Info); 111 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, 112 AllocaInfo &Info); 113 114 void DoScalarReplacement(AllocationInst *AI, 115 std::vector<AllocationInst*> &WorkList); 116 void CleanupGEP(GetElementPtrInst *GEP); 117 void CleanupAllocaUsers(AllocationInst *AI); 118 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 119 120 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 121 SmallVector<AllocaInst*, 32> &NewElts); 122 123 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 124 AllocationInst *AI, 125 SmallVector<AllocaInst*, 32> &NewElts); 126 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI, 127 SmallVector<AllocaInst*, 32> &NewElts); 128 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 129 SmallVector<AllocaInst*, 32> &NewElts); 130 131 bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 132 bool &SawVec, uint64_t Offset, unsigned AllocaSize); 133 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset); 134 Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType, 135 uint64_t Offset, IRBuilder<> &Builder); 136 Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, 137 uint64_t Offset, IRBuilder<> &Builder); 138 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 139 }; 140} 141 142char SROA::ID = 0; 143static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 144 145// Public interface to the ScalarReplAggregates pass 146FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 147 return new SROA(Threshold); 148} 149 150 151bool SROA::runOnFunction(Function &F) { 152 TD = &getAnalysis<TargetData>(); 153 154 bool Changed = performPromotion(F); 155 while (1) { 156 bool LocalChange = performScalarRepl(F); 157 if (!LocalChange) break; // No need to repromote if no scalarrepl 158 Changed = true; 159 LocalChange = performPromotion(F); 160 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 161 } 162 163 return Changed; 164} 165 166 167bool SROA::performPromotion(Function &F) { 168 std::vector<AllocaInst*> Allocas; 169 DominatorTree &DT = getAnalysis<DominatorTree>(); 170 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 171 172 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 173 174 bool Changed = false; 175 176 while (1) { 177 Allocas.clear(); 178 179 // Find allocas that are safe to promote, by looking at all instructions in 180 // the entry node 181 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 182 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 183 if (isAllocaPromotable(AI)) 184 Allocas.push_back(AI); 185 186 if (Allocas.empty()) break; 187 188 PromoteMemToReg(Allocas, DT, DF); 189 NumPromoted += Allocas.size(); 190 Changed = true; 191 } 192 193 return Changed; 194} 195 196/// getNumSAElements - Return the number of elements in the specific struct or 197/// array. 198static uint64_t getNumSAElements(const Type *T) { 199 if (const StructType *ST = dyn_cast<StructType>(T)) 200 return ST->getNumElements(); 201 return cast<ArrayType>(T)->getNumElements(); 202} 203 204// performScalarRepl - This algorithm is a simple worklist driven algorithm, 205// which runs on all of the malloc/alloca instructions in the function, removing 206// them if they are only used by getelementptr instructions. 207// 208bool SROA::performScalarRepl(Function &F) { 209 std::vector<AllocationInst*> WorkList; 210 211 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 212 BasicBlock &BB = F.getEntryBlock(); 213 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 214 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 215 WorkList.push_back(A); 216 217 // Process the worklist 218 bool Changed = false; 219 while (!WorkList.empty()) { 220 AllocationInst *AI = WorkList.back(); 221 WorkList.pop_back(); 222 223 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 224 // with unused elements. 225 if (AI->use_empty()) { 226 AI->eraseFromParent(); 227 continue; 228 } 229 230 // If this alloca is impossible for us to promote, reject it early. 231 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) 232 continue; 233 234 // Check to see if this allocation is only modified by a memcpy/memmove from 235 // a constant global. If this is the case, we can change all users to use 236 // the constant global instead. This is commonly produced by the CFE by 237 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 238 // is only subsequently read. 239 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 240 DOUT << "Found alloca equal to global: " << *AI; 241 DOUT << " memcpy = " << *TheCopy; 242 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 243 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 244 TheCopy->eraseFromParent(); // Don't mutate the global. 245 AI->eraseFromParent(); 246 ++NumGlobals; 247 Changed = true; 248 continue; 249 } 250 251 // Check to see if we can perform the core SROA transformation. We cannot 252 // transform the allocation instruction if it is an array allocation 253 // (allocations OF arrays are ok though), and an allocation of a scalar 254 // value cannot be decomposed at all. 255 uint64_t AllocaSize = TD->getTypePaddedSize(AI->getAllocatedType()); 256 257 // Do not promote any struct whose size is too big. 258 if (AllocaSize > SRThreshold) continue; 259 260 if ((isa<StructType>(AI->getAllocatedType()) || 261 isa<ArrayType>(AI->getAllocatedType())) && 262 // Do not promote any struct into more than "32" separate vars. 263 getNumSAElements(AI->getAllocatedType()) < SRThreshold/4) { 264 // Check that all of the users of the allocation are capable of being 265 // transformed. 266 switch (isSafeAllocaToScalarRepl(AI)) { 267 default: assert(0 && "Unexpected value!"); 268 case 0: // Not safe to scalar replace. 269 break; 270 case 1: // Safe, but requires cleanup/canonicalizations first 271 CleanupAllocaUsers(AI); 272 // FALL THROUGH. 273 case 3: // Safe to scalar replace. 274 DoScalarReplacement(AI, WorkList); 275 Changed = true; 276 continue; 277 } 278 } 279 280 // If we can turn this aggregate value (potentially with casts) into a 281 // simple scalar value that can be mem2reg'd into a register value. 282 // IsNotTrivial tracks whether this is something that mem2reg could have 283 // promoted itself. If so, we don't want to transform it needlessly. Note 284 // that we can't just check based on the type: the alloca may be of an i32 285 // but that has pointer arithmetic to set byte 3 of it or something. 286 bool IsNotTrivial = false; 287 const Type *VectorTy = 0; 288 bool HadAVector = false; 289 if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector, 290 0, unsigned(AllocaSize)) && IsNotTrivial) { 291 AllocaInst *NewAI; 292 // If we were able to find a vector type that can handle this with 293 // insert/extract elements, and if there was at least one use that had 294 // a vector type, promote this to a vector. We don't want to promote 295 // random stuff that doesn't use vectors (e.g. <9 x double>) because then 296 // we just get a lot of insert/extracts. If at least one vector is 297 // involved, then we probably really do have a union of vector/array. 298 if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) { 299 DOUT << "CONVERT TO VECTOR: " << *AI << " TYPE = " << *VectorTy <<"\n"; 300 301 // Create and insert the vector alloca. 302 NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin()); 303 ConvertUsesToScalar(AI, NewAI, 0); 304 } else { 305 DOUT << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"; 306 307 // Create and insert the integer alloca. 308 const Type *NewTy = IntegerType::get(AllocaSize*8); 309 NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin()); 310 ConvertUsesToScalar(AI, NewAI, 0); 311 } 312 NewAI->takeName(AI); 313 AI->eraseFromParent(); 314 ++NumConverted; 315 Changed = true; 316 continue; 317 } 318 319 // Otherwise, couldn't process this alloca. 320 } 321 322 return Changed; 323} 324 325/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 326/// predicate, do SROA now. 327void SROA::DoScalarReplacement(AllocationInst *AI, 328 std::vector<AllocationInst*> &WorkList) { 329 DOUT << "Found inst to SROA: " << *AI; 330 SmallVector<AllocaInst*, 32> ElementAllocas; 331 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 332 ElementAllocas.reserve(ST->getNumContainedTypes()); 333 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 334 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 335 AI->getAlignment(), 336 AI->getName() + "." + utostr(i), AI); 337 ElementAllocas.push_back(NA); 338 WorkList.push_back(NA); // Add to worklist for recursive processing 339 } 340 } else { 341 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 342 ElementAllocas.reserve(AT->getNumElements()); 343 const Type *ElTy = AT->getElementType(); 344 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 345 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 346 AI->getName() + "." + utostr(i), AI); 347 ElementAllocas.push_back(NA); 348 WorkList.push_back(NA); // Add to worklist for recursive processing 349 } 350 } 351 352 // Now that we have created the alloca instructions that we want to use, 353 // expand the getelementptr instructions to use them. 354 // 355 while (!AI->use_empty()) { 356 Instruction *User = cast<Instruction>(AI->use_back()); 357 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 358 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 359 BCInst->eraseFromParent(); 360 continue; 361 } 362 363 // Replace: 364 // %res = load { i32, i32 }* %alloc 365 // with: 366 // %load.0 = load i32* %alloc.0 367 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 368 // %load.1 = load i32* %alloc.1 369 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 370 // (Also works for arrays instead of structs) 371 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 372 Value *Insert = UndefValue::get(LI->getType()); 373 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 374 Value *Load = new LoadInst(ElementAllocas[i], "load", LI); 375 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); 376 } 377 LI->replaceAllUsesWith(Insert); 378 LI->eraseFromParent(); 379 continue; 380 } 381 382 // Replace: 383 // store { i32, i32 } %val, { i32, i32 }* %alloc 384 // with: 385 // %val.0 = extractvalue { i32, i32 } %val, 0 386 // store i32 %val.0, i32* %alloc.0 387 // %val.1 = extractvalue { i32, i32 } %val, 1 388 // store i32 %val.1, i32* %alloc.1 389 // (Also works for arrays instead of structs) 390 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 391 Value *Val = SI->getOperand(0); 392 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 393 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); 394 new StoreInst(Extract, ElementAllocas[i], SI); 395 } 396 SI->eraseFromParent(); 397 continue; 398 } 399 400 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 401 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 402 unsigned Idx = 403 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 404 405 assert(Idx < ElementAllocas.size() && "Index out of range?"); 406 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 407 408 Value *RepValue; 409 if (GEPI->getNumOperands() == 3) { 410 // Do not insert a new getelementptr instruction with zero indices, only 411 // to have it optimized out later. 412 RepValue = AllocaToUse; 413 } else { 414 // We are indexing deeply into the structure, so we still need a 415 // getelement ptr instruction to finish the indexing. This may be 416 // expanded itself once the worklist is rerun. 417 // 418 SmallVector<Value*, 8> NewArgs; 419 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 420 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 421 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), 422 NewArgs.end(), "", GEPI); 423 RepValue->takeName(GEPI); 424 } 425 426 // If this GEP is to the start of the aggregate, check for memcpys. 427 if (Idx == 0 && GEPI->hasAllZeroIndices()) 428 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 429 430 // Move all of the users over to the new GEP. 431 GEPI->replaceAllUsesWith(RepValue); 432 // Delete the old GEP 433 GEPI->eraseFromParent(); 434 } 435 436 // Finally, delete the Alloca instruction 437 AI->eraseFromParent(); 438 NumReplaced++; 439} 440 441 442/// isSafeElementUse - Check to see if this use is an allowed use for a 443/// getelementptr instruction of an array aggregate allocation. isFirstElt 444/// indicates whether Ptr is known to the start of the aggregate. 445/// 446void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 447 AllocaInfo &Info) { 448 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 449 I != E; ++I) { 450 Instruction *User = cast<Instruction>(*I); 451 switch (User->getOpcode()) { 452 case Instruction::Load: break; 453 case Instruction::Store: 454 // Store is ok if storing INTO the pointer, not storing the pointer 455 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 456 break; 457 case Instruction::GetElementPtr: { 458 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 459 bool AreAllZeroIndices = isFirstElt; 460 if (GEP->getNumOperands() > 1) { 461 if (!isa<ConstantInt>(GEP->getOperand(1)) || 462 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 463 // Using pointer arithmetic to navigate the array. 464 return MarkUnsafe(Info); 465 466 if (AreAllZeroIndices) 467 AreAllZeroIndices = GEP->hasAllZeroIndices(); 468 } 469 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 470 if (Info.isUnsafe) return; 471 break; 472 } 473 case Instruction::BitCast: 474 if (isFirstElt) { 475 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 476 if (Info.isUnsafe) return; 477 break; 478 } 479 DOUT << " Transformation preventing inst: " << *User; 480 return MarkUnsafe(Info); 481 case Instruction::Call: 482 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 483 if (isFirstElt) { 484 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 485 if (Info.isUnsafe) return; 486 break; 487 } 488 } 489 DOUT << " Transformation preventing inst: " << *User; 490 return MarkUnsafe(Info); 491 default: 492 DOUT << " Transformation preventing inst: " << *User; 493 return MarkUnsafe(Info); 494 } 495 } 496 return; // All users look ok :) 497} 498 499/// AllUsersAreLoads - Return true if all users of this value are loads. 500static bool AllUsersAreLoads(Value *Ptr) { 501 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 502 I != E; ++I) 503 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 504 return false; 505 return true; 506} 507 508/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 509/// aggregate allocation. 510/// 511void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 512 AllocaInfo &Info) { 513 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 514 return isSafeUseOfBitCastedAllocation(C, AI, Info); 515 516 if (LoadInst *LI = dyn_cast<LoadInst>(User)) 517 if (!LI->isVolatile()) 518 return;// Loads (returning a first class aggregrate) are always rewritable 519 520 if (StoreInst *SI = dyn_cast<StoreInst>(User)) 521 if (!SI->isVolatile() && SI->getOperand(0) != AI) 522 return;// Store is ok if storing INTO the pointer, not storing the pointer 523 524 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 525 if (GEPI == 0) 526 return MarkUnsafe(Info); 527 528 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 529 530 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 531 if (I == E || 532 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 533 return MarkUnsafe(Info); 534 } 535 536 ++I; 537 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 538 539 bool IsAllZeroIndices = true; 540 541 // If the first index is a non-constant index into an array, see if we can 542 // handle it as a special case. 543 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 544 if (!isa<ConstantInt>(I.getOperand())) { 545 IsAllZeroIndices = 0; 546 uint64_t NumElements = AT->getNumElements(); 547 548 // If this is an array index and the index is not constant, we cannot 549 // promote... that is unless the array has exactly one or two elements in 550 // it, in which case we CAN promote it, but we have to canonicalize this 551 // out if this is the only problem. 552 if ((NumElements == 1 || NumElements == 2) && 553 AllUsersAreLoads(GEPI)) { 554 Info.needsCleanup = true; 555 return; // Canonicalization required! 556 } 557 return MarkUnsafe(Info); 558 } 559 } 560 561 // Walk through the GEP type indices, checking the types that this indexes 562 // into. 563 for (; I != E; ++I) { 564 // Ignore struct elements, no extra checking needed for these. 565 if (isa<StructType>(*I)) 566 continue; 567 568 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 569 if (!IdxVal) return MarkUnsafe(Info); 570 571 // Are all indices still zero? 572 IsAllZeroIndices &= IdxVal->isZero(); 573 574 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 575 // This GEP indexes an array. Verify that this is an in-range constant 576 // integer. Specifically, consider A[0][i]. We cannot know that the user 577 // isn't doing invalid things like allowing i to index an out-of-range 578 // subscript that accesses A[1]. Because of this, we have to reject SROA 579 // of any accesses into structs where any of the components are variables. 580 if (IdxVal->getZExtValue() >= AT->getNumElements()) 581 return MarkUnsafe(Info); 582 } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) { 583 if (IdxVal->getZExtValue() >= VT->getNumElements()) 584 return MarkUnsafe(Info); 585 } 586 } 587 588 // If there are any non-simple uses of this getelementptr, make sure to reject 589 // them. 590 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 591} 592 593/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 594/// intrinsic can be promoted by SROA. At this point, we know that the operand 595/// of the memintrinsic is a pointer to the beginning of the allocation. 596void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 597 unsigned OpNo, AllocaInfo &Info) { 598 // If not constant length, give up. 599 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 600 if (!Length) return MarkUnsafe(Info); 601 602 // If not the whole aggregate, give up. 603 if (Length->getZExtValue() != 604 TD->getTypePaddedSize(AI->getType()->getElementType())) 605 return MarkUnsafe(Info); 606 607 // We only know about memcpy/memset/memmove. 608 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 609 return MarkUnsafe(Info); 610 611 // Otherwise, we can transform it. Determine whether this is a memcpy/set 612 // into or out of the aggregate. 613 if (OpNo == 1) 614 Info.isMemCpyDst = true; 615 else { 616 assert(OpNo == 2); 617 Info.isMemCpySrc = true; 618 } 619} 620 621/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 622/// are 623void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, 624 AllocaInfo &Info) { 625 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 626 UI != E; ++UI) { 627 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 628 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 629 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 630 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 631 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 632 if (SI->isVolatile()) 633 return MarkUnsafe(Info); 634 635 // If storing the entire alloca in one chunk through a bitcasted pointer 636 // to integer, we can transform it. This happens (for example) when you 637 // cast a {i32,i32}* to i64* and store through it. This is similar to the 638 // memcpy case and occurs in various "byval" cases and emulated memcpys. 639 if (isa<IntegerType>(SI->getOperand(0)->getType()) && 640 TD->getTypePaddedSize(SI->getOperand(0)->getType()) == 641 TD->getTypePaddedSize(AI->getType()->getElementType())) { 642 Info.isMemCpyDst = true; 643 continue; 644 } 645 return MarkUnsafe(Info); 646 } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { 647 if (LI->isVolatile()) 648 return MarkUnsafe(Info); 649 650 // If loading the entire alloca in one chunk through a bitcasted pointer 651 // to integer, we can transform it. This happens (for example) when you 652 // cast a {i32,i32}* to i64* and load through it. This is similar to the 653 // memcpy case and occurs in various "byval" cases and emulated memcpys. 654 if (isa<IntegerType>(LI->getType()) && 655 TD->getTypePaddedSize(LI->getType()) == 656 TD->getTypePaddedSize(AI->getType()->getElementType())) { 657 Info.isMemCpySrc = true; 658 continue; 659 } 660 return MarkUnsafe(Info); 661 } else if (isa<DbgInfoIntrinsic>(UI)) { 662 // If one user is DbgInfoIntrinsic then check if all users are 663 // DbgInfoIntrinsics. 664 if (OnlyUsedByDbgInfoIntrinsics(BC)) { 665 Info.needsCleanup = true; 666 return; 667 } 668 else 669 MarkUnsafe(Info); 670 } 671 else { 672 return MarkUnsafe(Info); 673 } 674 if (Info.isUnsafe) return; 675 } 676} 677 678/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 679/// to its first element. Transform users of the cast to use the new values 680/// instead. 681void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 682 SmallVector<AllocaInst*, 32> &NewElts) { 683 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 684 while (UI != UE) { 685 Instruction *User = cast<Instruction>(*UI++); 686 if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) { 687 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 688 if (BCU->use_empty()) BCU->eraseFromParent(); 689 continue; 690 } 691 692 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 693 // This must be memcpy/memmove/memset of the entire aggregate. 694 // Split into one per element. 695 RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts); 696 continue; 697 } 698 699 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 700 // If this is a store of the entire alloca from an integer, rewrite it. 701 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); 702 continue; 703 } 704 705 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 706 // If this is a load of the entire alloca to an integer, rewrite it. 707 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); 708 continue; 709 } 710 711 // Otherwise it must be some other user of a gep of the first pointer. Just 712 // leave these alone. 713 continue; 714 } 715} 716 717/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. 718/// Rewrite it to copy or set the elements of the scalarized memory. 719void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 720 AllocationInst *AI, 721 SmallVector<AllocaInst*, 32> &NewElts) { 722 723 // If this is a memcpy/memmove, construct the other pointer as the 724 // appropriate type. The "Other" pointer is the pointer that goes to memory 725 // that doesn't have anything to do with the alloca that we are promoting. For 726 // memset, this Value* stays null. 727 Value *OtherPtr = 0; 728 unsigned MemAlignment = MI->getAlignment()->getZExtValue(); 729 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 730 if (BCInst == MCI->getRawDest()) 731 OtherPtr = MCI->getRawSource(); 732 else { 733 assert(BCInst == MCI->getRawSource()); 734 OtherPtr = MCI->getRawDest(); 735 } 736 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 737 if (BCInst == MMI->getRawDest()) 738 OtherPtr = MMI->getRawSource(); 739 else { 740 assert(BCInst == MMI->getRawSource()); 741 OtherPtr = MMI->getRawDest(); 742 } 743 } 744 745 // If there is an other pointer, we want to convert it to the same pointer 746 // type as AI has, so we can GEP through it safely. 747 if (OtherPtr) { 748 // It is likely that OtherPtr is a bitcast, if so, remove it. 749 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 750 OtherPtr = BC->getOperand(0); 751 // All zero GEPs are effectively bitcasts. 752 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) 753 if (GEP->hasAllZeroIndices()) 754 OtherPtr = GEP->getOperand(0); 755 756 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 757 if (BCE->getOpcode() == Instruction::BitCast) 758 OtherPtr = BCE->getOperand(0); 759 760 // If the pointer is not the right type, insert a bitcast to the right 761 // type. 762 if (OtherPtr->getType() != AI->getType()) 763 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 764 MI); 765 } 766 767 // Process each element of the aggregate. 768 Value *TheFn = MI->getOperand(0); 769 const Type *BytePtrTy = MI->getRawDest()->getType(); 770 bool SROADest = MI->getRawDest() == BCInst; 771 772 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 773 774 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 775 // If this is a memcpy/memmove, emit a GEP of the other element address. 776 Value *OtherElt = 0; 777 unsigned OtherEltAlign = MemAlignment; 778 779 if (OtherPtr) { 780 Value *Idx[2] = { Zero, ConstantInt::get(Type::Int32Ty, i) }; 781 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, 782 OtherPtr->getNameStr()+"."+utostr(i), 783 MI); 784 uint64_t EltOffset; 785 const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); 786 if (const StructType *ST = 787 dyn_cast<StructType>(OtherPtrTy->getElementType())) { 788 EltOffset = TD->getStructLayout(ST)->getElementOffset(i); 789 } else { 790 const Type *EltTy = 791 cast<SequentialType>(OtherPtr->getType())->getElementType(); 792 EltOffset = TD->getTypePaddedSize(EltTy)*i; 793 } 794 795 // The alignment of the other pointer is the guaranteed alignment of the 796 // element, which is affected by both the known alignment of the whole 797 // mem intrinsic and the alignment of the element. If the alignment of 798 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the 799 // known alignment is just 4 bytes. 800 OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset); 801 } 802 803 Value *EltPtr = NewElts[i]; 804 const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType(); 805 806 // If we got down to a scalar, insert a load or store as appropriate. 807 if (EltTy->isSingleValueType()) { 808 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 809 if (SROADest) { 810 // From Other to Alloca. 811 Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI); 812 new StoreInst(Elt, EltPtr, MI); 813 } else { 814 // From Alloca to Other. 815 Value *Elt = new LoadInst(EltPtr, "tmp", MI); 816 new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI); 817 } 818 continue; 819 } 820 assert(isa<MemSetInst>(MI)); 821 822 // If the stored element is zero (common case), just store a null 823 // constant. 824 Constant *StoreVal; 825 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 826 if (CI->isZero()) { 827 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 828 } else { 829 // If EltTy is a vector type, get the element type. 830 const Type *ValTy = EltTy; 831 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 832 ValTy = VTy->getElementType(); 833 834 // Construct an integer with the right value. 835 unsigned EltSize = TD->getTypeSizeInBits(ValTy); 836 APInt OneVal(EltSize, CI->getZExtValue()); 837 APInt TotalVal(OneVal); 838 // Set each byte. 839 for (unsigned i = 0; 8*i < EltSize; ++i) { 840 TotalVal = TotalVal.shl(8); 841 TotalVal |= OneVal; 842 } 843 844 // Convert the integer value to the appropriate type. 845 StoreVal = ConstantInt::get(TotalVal); 846 if (isa<PointerType>(ValTy)) 847 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 848 else if (ValTy->isFloatingPoint()) 849 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 850 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 851 852 // If the requested value was a vector constant, create it. 853 if (EltTy != ValTy) { 854 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 855 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 856 StoreVal = ConstantVector::get(&Elts[0], NumElts); 857 } 858 } 859 new StoreInst(StoreVal, EltPtr, MI); 860 continue; 861 } 862 // Otherwise, if we're storing a byte variable, use a memset call for 863 // this element. 864 } 865 866 // Cast the element pointer to BytePtrTy. 867 if (EltPtr->getType() != BytePtrTy) 868 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 869 870 // Cast the other pointer (if we have one) to BytePtrTy. 871 if (OtherElt && OtherElt->getType() != BytePtrTy) 872 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 873 MI); 874 875 unsigned EltSize = TD->getTypePaddedSize(EltTy); 876 877 // Finally, insert the meminst for this element. 878 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 879 Value *Ops[] = { 880 SROADest ? EltPtr : OtherElt, // Dest ptr 881 SROADest ? OtherElt : EltPtr, // Src ptr 882 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 883 ConstantInt::get(Type::Int32Ty, OtherEltAlign) // Align 884 }; 885 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 886 } else { 887 assert(isa<MemSetInst>(MI)); 888 Value *Ops[] = { 889 EltPtr, MI->getOperand(2), // Dest, Value, 890 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 891 Zero // Align 892 }; 893 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 894 } 895 } 896 MI->eraseFromParent(); 897} 898 899/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that 900/// overwrites the entire allocation. Extract out the pieces of the stored 901/// integer and store them individually. 902void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, 903 AllocationInst *AI, 904 SmallVector<AllocaInst*, 32> &NewElts){ 905 // Extract each element out of the integer according to its structure offset 906 // and store the element value to the individual alloca. 907 Value *SrcVal = SI->getOperand(0); 908 const Type *AllocaEltTy = AI->getType()->getElementType(); 909 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); 910 911 // If this isn't a store of an integer to the whole alloca, it may be a store 912 // to the first element. Just ignore the store in this case and normal SROA 913 // will handle it. 914 if (!isa<IntegerType>(SrcVal->getType()) || 915 TD->getTypePaddedSizeInBits(SrcVal->getType()) != AllocaSizeBits) 916 return; 917 918 DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI; 919 920 // There are two forms here: AI could be an array or struct. Both cases 921 // have different ways to compute the element offset. 922 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 923 const StructLayout *Layout = TD->getStructLayout(EltSTy); 924 925 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 926 // Get the number of bits to shift SrcVal to get the value. 927 const Type *FieldTy = EltSTy->getElementType(i); 928 uint64_t Shift = Layout->getElementOffsetInBits(i); 929 930 if (TD->isBigEndian()) 931 Shift = AllocaSizeBits-Shift-TD->getTypePaddedSizeInBits(FieldTy); 932 933 Value *EltVal = SrcVal; 934 if (Shift) { 935 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 936 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 937 "sroa.store.elt", SI); 938 } 939 940 // Truncate down to an integer of the right size. 941 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 942 943 // Ignore zero sized fields like {}, they obviously contain no data. 944 if (FieldSizeBits == 0) continue; 945 946 if (FieldSizeBits != AllocaSizeBits) 947 EltVal = new TruncInst(EltVal, IntegerType::get(FieldSizeBits), "", SI); 948 Value *DestField = NewElts[i]; 949 if (EltVal->getType() == FieldTy) { 950 // Storing to an integer field of this size, just do it. 951 } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) { 952 // Bitcast to the right element type (for fp/vector values). 953 EltVal = new BitCastInst(EltVal, FieldTy, "", SI); 954 } else { 955 // Otherwise, bitcast the dest pointer (for aggregates). 956 DestField = new BitCastInst(DestField, 957 PointerType::getUnqual(EltVal->getType()), 958 "", SI); 959 } 960 new StoreInst(EltVal, DestField, SI); 961 } 962 963 } else { 964 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy); 965 const Type *ArrayEltTy = ATy->getElementType(); 966 uint64_t ElementOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); 967 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); 968 969 uint64_t Shift; 970 971 if (TD->isBigEndian()) 972 Shift = AllocaSizeBits-ElementOffset; 973 else 974 Shift = 0; 975 976 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 977 // Ignore zero sized fields like {}, they obviously contain no data. 978 if (ElementSizeBits == 0) continue; 979 980 Value *EltVal = SrcVal; 981 if (Shift) { 982 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 983 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 984 "sroa.store.elt", SI); 985 } 986 987 // Truncate down to an integer of the right size. 988 if (ElementSizeBits != AllocaSizeBits) 989 EltVal = new TruncInst(EltVal, IntegerType::get(ElementSizeBits),"",SI); 990 Value *DestField = NewElts[i]; 991 if (EltVal->getType() == ArrayEltTy) { 992 // Storing to an integer field of this size, just do it. 993 } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) { 994 // Bitcast to the right element type (for fp/vector values). 995 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI); 996 } else { 997 // Otherwise, bitcast the dest pointer (for aggregates). 998 DestField = new BitCastInst(DestField, 999 PointerType::getUnqual(EltVal->getType()), 1000 "", SI); 1001 } 1002 new StoreInst(EltVal, DestField, SI); 1003 1004 if (TD->isBigEndian()) 1005 Shift -= ElementOffset; 1006 else 1007 Shift += ElementOffset; 1008 } 1009 } 1010 1011 SI->eraseFromParent(); 1012} 1013 1014/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to 1015/// an integer. Load the individual pieces to form the aggregate value. 1016void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 1017 SmallVector<AllocaInst*, 32> &NewElts) { 1018 // Extract each element out of the NewElts according to its structure offset 1019 // and form the result value. 1020 const Type *AllocaEltTy = AI->getType()->getElementType(); 1021 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); 1022 1023 // If this isn't a load of the whole alloca to an integer, it may be a load 1024 // of the first element. Just ignore the load in this case and normal SROA 1025 // will handle it. 1026 if (!isa<IntegerType>(LI->getType()) || 1027 TD->getTypePaddedSizeInBits(LI->getType()) != AllocaSizeBits) 1028 return; 1029 1030 DOUT << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI; 1031 1032 // There are two forms here: AI could be an array or struct. Both cases 1033 // have different ways to compute the element offset. 1034 const StructLayout *Layout = 0; 1035 uint64_t ArrayEltBitOffset = 0; 1036 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 1037 Layout = TD->getStructLayout(EltSTy); 1038 } else { 1039 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); 1040 ArrayEltBitOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); 1041 } 1042 1043 Value *ResultVal = Constant::getNullValue(LI->getType()); 1044 1045 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1046 // Load the value from the alloca. If the NewElt is an aggregate, cast 1047 // the pointer to an integer of the same size before doing the load. 1048 Value *SrcField = NewElts[i]; 1049 const Type *FieldTy = 1050 cast<PointerType>(SrcField->getType())->getElementType(); 1051 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 1052 1053 // Ignore zero sized fields like {}, they obviously contain no data. 1054 if (FieldSizeBits == 0) continue; 1055 1056 const IntegerType *FieldIntTy = IntegerType::get(FieldSizeBits); 1057 if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() && 1058 !isa<VectorType>(FieldTy)) 1059 SrcField = new BitCastInst(SrcField, PointerType::getUnqual(FieldIntTy), 1060 "", LI); 1061 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); 1062 1063 // If SrcField is a fp or vector of the right size but that isn't an 1064 // integer type, bitcast to an integer so we can shift it. 1065 if (SrcField->getType() != FieldIntTy) 1066 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); 1067 1068 // Zero extend the field to be the same size as the final alloca so that 1069 // we can shift and insert it. 1070 if (SrcField->getType() != ResultVal->getType()) 1071 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); 1072 1073 // Determine the number of bits to shift SrcField. 1074 uint64_t Shift; 1075 if (Layout) // Struct case. 1076 Shift = Layout->getElementOffsetInBits(i); 1077 else // Array case. 1078 Shift = i*ArrayEltBitOffset; 1079 1080 if (TD->isBigEndian()) 1081 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); 1082 1083 if (Shift) { 1084 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); 1085 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); 1086 } 1087 1088 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); 1089 } 1090 1091 LI->replaceAllUsesWith(ResultVal); 1092 LI->eraseFromParent(); 1093} 1094 1095 1096/// HasPadding - Return true if the specified type has any structure or 1097/// alignment padding, false otherwise. 1098static bool HasPadding(const Type *Ty, const TargetData &TD) { 1099 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1100 const StructLayout *SL = TD.getStructLayout(STy); 1101 unsigned PrevFieldBitOffset = 0; 1102 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1103 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 1104 1105 // Padding in sub-elements? 1106 if (HasPadding(STy->getElementType(i), TD)) 1107 return true; 1108 1109 // Check to see if there is any padding between this element and the 1110 // previous one. 1111 if (i) { 1112 unsigned PrevFieldEnd = 1113 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 1114 if (PrevFieldEnd < FieldBitOffset) 1115 return true; 1116 } 1117 1118 PrevFieldBitOffset = FieldBitOffset; 1119 } 1120 1121 // Check for tail padding. 1122 if (unsigned EltCount = STy->getNumElements()) { 1123 unsigned PrevFieldEnd = PrevFieldBitOffset + 1124 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 1125 if (PrevFieldEnd < SL->getSizeInBits()) 1126 return true; 1127 } 1128 1129 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1130 return HasPadding(ATy->getElementType(), TD); 1131 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1132 return HasPadding(VTy->getElementType(), TD); 1133 } 1134 return TD.getTypeSizeInBits(Ty) != TD.getTypePaddedSizeInBits(Ty); 1135} 1136 1137/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 1138/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 1139/// or 1 if safe after canonicalization has been performed. 1140/// 1141int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 1142 // Loop over the use list of the alloca. We can only transform it if all of 1143 // the users are safe to transform. 1144 AllocaInfo Info; 1145 1146 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 1147 I != E; ++I) { 1148 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 1149 if (Info.isUnsafe) { 1150 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 1151 return 0; 1152 } 1153 } 1154 1155 // Okay, we know all the users are promotable. If the aggregate is a memcpy 1156 // source and destination, we have to be careful. In particular, the memcpy 1157 // could be moving around elements that live in structure padding of the LLVM 1158 // types, but may actually be used. In these cases, we refuse to promote the 1159 // struct. 1160 if (Info.isMemCpySrc && Info.isMemCpyDst && 1161 HasPadding(AI->getType()->getElementType(), *TD)) 1162 return 0; 1163 1164 // If we require cleanup, return 1, otherwise return 3. 1165 return Info.needsCleanup ? 1 : 3; 1166} 1167 1168/// CleanupGEP - GEP is used by an Alloca, which can be prompted after the GEP 1169/// is canonicalized here. 1170void SROA::CleanupGEP(GetElementPtrInst *GEPI) { 1171 gep_type_iterator I = gep_type_begin(GEPI); 1172 ++I; 1173 1174 const ArrayType *AT = dyn_cast<ArrayType>(*I); 1175 if (!AT) 1176 return; 1177 1178 uint64_t NumElements = AT->getNumElements(); 1179 1180 if (isa<ConstantInt>(I.getOperand())) 1181 return; 1182 1183 if (NumElements == 1) { 1184 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 1185 return; 1186 } 1187 1188 assert(NumElements == 2 && "Unhandled case!"); 1189 // All users of the GEP must be loads. At each use of the GEP, insert 1190 // two loads of the appropriate indexed GEP and select between them. 1191 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 1192 Constant::getNullValue(I.getOperand()->getType()), 1193 "isone", GEPI); 1194 // Insert the new GEP instructions, which are properly indexed. 1195 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 1196 Indices[1] = Constant::getNullValue(Type::Int32Ty); 1197 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1198 Indices.begin(), 1199 Indices.end(), 1200 GEPI->getName()+".0", GEPI); 1201 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 1202 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1203 Indices.begin(), 1204 Indices.end(), 1205 GEPI->getName()+".1", GEPI); 1206 // Replace all loads of the variable index GEP with loads from both 1207 // indexes and a select. 1208 while (!GEPI->use_empty()) { 1209 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 1210 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 1211 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 1212 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI); 1213 LI->replaceAllUsesWith(R); 1214 LI->eraseFromParent(); 1215 } 1216 GEPI->eraseFromParent(); 1217} 1218 1219 1220/// CleanupAllocaUsers - If SROA reported that it can promote the specified 1221/// allocation, but only if cleaned up, perform the cleanups required. 1222void SROA::CleanupAllocaUsers(AllocationInst *AI) { 1223 // At this point, we know that the end result will be SROA'd and promoted, so 1224 // we can insert ugly code if required so long as sroa+mem2reg will clean it 1225 // up. 1226 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 1227 UI != E; ) { 1228 User *U = *UI++; 1229 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) 1230 CleanupGEP(GEPI); 1231 else if (Instruction *I = dyn_cast<Instruction>(U)) { 1232 SmallVector<DbgInfoIntrinsic *, 2> DbgInUses; 1233 if (OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) { 1234 // Safe to remove debug info uses. 1235 while (!DbgInUses.empty()) { 1236 DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back(); 1237 DI->eraseFromParent(); 1238 } 1239 I->eraseFromParent(); 1240 } 1241 } 1242 } 1243} 1244 1245/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at 1246/// the offset specified by Offset (which is specified in bytes). 1247/// 1248/// There are two cases we handle here: 1249/// 1) A union of vector types of the same size and potentially its elements. 1250/// Here we turn element accesses into insert/extract element operations. 1251/// This promotes a <4 x float> with a store of float to the third element 1252/// into a <4 x float> that uses insert element. 1253/// 2) A fully general blob of memory, which we turn into some (potentially 1254/// large) integer type with extract and insert operations where the loads 1255/// and stores would mutate the memory. 1256static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy, 1257 unsigned AllocaSize, const TargetData &TD) { 1258 // If this could be contributing to a vector, analyze it. 1259 if (VecTy != Type::VoidTy) { // either null or a vector type. 1260 1261 // If the In type is a vector that is the same size as the alloca, see if it 1262 // matches the existing VecTy. 1263 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) { 1264 if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { 1265 // If we're storing/loading a vector of the right size, allow it as a 1266 // vector. If this the first vector we see, remember the type so that 1267 // we know the element size. 1268 if (VecTy == 0) 1269 VecTy = VInTy; 1270 return; 1271 } 1272 } else if (In == Type::FloatTy || In == Type::DoubleTy || 1273 (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 && 1274 isPowerOf2_32(In->getPrimitiveSizeInBits()))) { 1275 // If we're accessing something that could be an element of a vector, see 1276 // if the implied vector agrees with what we already have and if Offset is 1277 // compatible with it. 1278 unsigned EltSize = In->getPrimitiveSizeInBits()/8; 1279 if (Offset % EltSize == 0 && 1280 AllocaSize % EltSize == 0 && 1281 (VecTy == 0 || 1282 cast<VectorType>(VecTy)->getElementType() 1283 ->getPrimitiveSizeInBits()/8 == EltSize)) { 1284 if (VecTy == 0) 1285 VecTy = VectorType::get(In, AllocaSize/EltSize); 1286 return; 1287 } 1288 } 1289 } 1290 1291 // Otherwise, we have a case that we can't handle with an optimized vector 1292 // form. We can still turn this into a large integer. 1293 VecTy = Type::VoidTy; 1294} 1295 1296/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all 1297/// its accesses to use a to single vector type, return true, and set VecTy to 1298/// the new type. If we could convert the alloca into a single promotable 1299/// integer, return true but set VecTy to VoidTy. Further, if the use is not a 1300/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset 1301/// is the current offset from the base of the alloca being analyzed. 1302/// 1303/// If we see at least one access to the value that is as a vector type, set the 1304/// SawVec flag. 1305/// 1306bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 1307 bool &SawVec, uint64_t Offset, 1308 unsigned AllocaSize) { 1309 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1310 Instruction *User = cast<Instruction>(*UI); 1311 1312 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1313 // Don't break volatile loads. 1314 if (LI->isVolatile()) 1315 return false; 1316 MergeInType(LI->getType(), Offset, VecTy, AllocaSize, *TD); 1317 SawVec |= isa<VectorType>(LI->getType()); 1318 continue; 1319 } 1320 1321 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1322 // Storing the pointer, not into the value? 1323 if (SI->getOperand(0) == V || SI->isVolatile()) return 0; 1324 MergeInType(SI->getOperand(0)->getType(), Offset, VecTy, AllocaSize, *TD); 1325 SawVec |= isa<VectorType>(SI->getOperand(0)->getType()); 1326 continue; 1327 } 1328 1329 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 1330 if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset, 1331 AllocaSize)) 1332 return false; 1333 IsNotTrivial = true; 1334 continue; 1335 } 1336 1337 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1338 // If this is a GEP with a variable indices, we can't handle it. 1339 if (!GEP->hasAllConstantIndices()) 1340 return false; 1341 1342 // Compute the offset that this GEP adds to the pointer. 1343 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1344 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1345 &Indices[0], Indices.size()); 1346 // See if all uses can be converted. 1347 if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset, 1348 AllocaSize)) 1349 return false; 1350 IsNotTrivial = true; 1351 continue; 1352 } 1353 1354 // If this is a constant sized memset of a constant value (e.g. 0) we can 1355 // handle it. 1356 if (isa<MemSetInst>(User) && 1357 // Store of constant value. 1358 isa<ConstantInt>(User->getOperand(2)) && 1359 // Store with constant size. 1360 isa<ConstantInt>(User->getOperand(3))) { 1361 VecTy = Type::VoidTy; 1362 IsNotTrivial = true; 1363 continue; 1364 } 1365 1366 // Otherwise, we cannot handle this! 1367 return false; 1368 } 1369 1370 return true; 1371} 1372 1373 1374/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1375/// directly. This happens when we are converting an "integer union" to a 1376/// single integer scalar, or when we are converting a "vector union" to a 1377/// vector with insert/extractelement instructions. 1378/// 1379/// Offset is an offset from the original alloca, in bits that need to be 1380/// shifted to the right. By the end of this, there should be no uses of Ptr. 1381void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { 1382 while (!Ptr->use_empty()) { 1383 Instruction *User = cast<Instruction>(Ptr->use_back()); 1384 1385 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1386 ConvertUsesToScalar(CI, NewAI, Offset); 1387 CI->eraseFromParent(); 1388 continue; 1389 } 1390 1391 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1392 // Compute the offset that this GEP adds to the pointer. 1393 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1394 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1395 &Indices[0], Indices.size()); 1396 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8); 1397 GEP->eraseFromParent(); 1398 continue; 1399 } 1400 1401 IRBuilder<> Builder(User->getParent(), User); 1402 1403 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1404 // The load is a bit extract from NewAI shifted right by Offset bits. 1405 Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp"); 1406 Value *NewLoadVal 1407 = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder); 1408 LI->replaceAllUsesWith(NewLoadVal); 1409 LI->eraseFromParent(); 1410 continue; 1411 } 1412 1413 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1414 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1415 Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str()); 1416 Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, 1417 Builder); 1418 Builder.CreateStore(New, NewAI); 1419 SI->eraseFromParent(); 1420 continue; 1421 } 1422 1423 // If this is a constant sized memset of a constant value (e.g. 0) we can 1424 // transform it into a store of the expanded constant value. 1425 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 1426 assert(MSI->getRawDest() == Ptr && "Consistency error!"); 1427 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 1428 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); 1429 1430 // Compute the value replicated the right number of times. 1431 APInt APVal(NumBytes*8, Val); 1432 1433 // Splat the value if non-zero. 1434 if (Val) 1435 for (unsigned i = 1; i != NumBytes; ++i) 1436 APVal |= APVal << 8; 1437 1438 Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str()); 1439 Value *New = ConvertScalar_InsertValue(ConstantInt::get(APVal), Old, 1440 Offset, Builder); 1441 Builder.CreateStore(New, NewAI); 1442 MSI->eraseFromParent(); 1443 continue; 1444 } 1445 1446 1447 assert(0 && "Unsupported operation!"); 1448 abort(); 1449 } 1450} 1451 1452/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer 1453/// or vector value FromVal, extracting the bits from the offset specified by 1454/// Offset. This returns the value, which is of type ToType. 1455/// 1456/// This happens when we are converting an "integer union" to a single 1457/// integer scalar, or when we are converting a "vector union" to a vector with 1458/// insert/extractelement instructions. 1459/// 1460/// Offset is an offset from the original alloca, in bits that need to be 1461/// shifted to the right. 1462Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, 1463 uint64_t Offset, IRBuilder<> &Builder) { 1464 // If the load is of the whole new alloca, no conversion is needed. 1465 if (FromVal->getType() == ToType && Offset == 0) 1466 return FromVal; 1467 1468 // If the result alloca is a vector type, this is either an element 1469 // access or a bitcast to another vector type of the same size. 1470 if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) { 1471 if (isa<VectorType>(ToType)) 1472 return Builder.CreateBitCast(FromVal, ToType, "tmp"); 1473 1474 // Otherwise it must be an element access. 1475 unsigned Elt = 0; 1476 if (Offset) { 1477 unsigned EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType()); 1478 Elt = Offset/EltSize; 1479 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); 1480 } 1481 // Return the element extracted out of it. 1482 Value *V = Builder.CreateExtractElement(FromVal, 1483 ConstantInt::get(Type::Int32Ty,Elt), 1484 "tmp"); 1485 if (V->getType() != ToType) 1486 V = Builder.CreateBitCast(V, ToType, "tmp"); 1487 return V; 1488 } 1489 1490 // If ToType is a first class aggregate, extract out each of the pieces and 1491 // use insertvalue's to form the FCA. 1492 if (const StructType *ST = dyn_cast<StructType>(ToType)) { 1493 const StructLayout &Layout = *TD->getStructLayout(ST); 1494 Value *Res = UndefValue::get(ST); 1495 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1496 Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), 1497 Offset+Layout.getElementOffsetInBits(i), 1498 Builder); 1499 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 1500 } 1501 return Res; 1502 } 1503 1504 if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) { 1505 uint64_t EltSize = TD->getTypePaddedSizeInBits(AT->getElementType()); 1506 Value *Res = UndefValue::get(AT); 1507 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 1508 Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), 1509 Offset+i*EltSize, Builder); 1510 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 1511 } 1512 return Res; 1513 } 1514 1515 // Otherwise, this must be a union that was converted to an integer value. 1516 const IntegerType *NTy = cast<IntegerType>(FromVal->getType()); 1517 1518 // If this is a big-endian system and the load is narrower than the 1519 // full alloca type, we need to do a shift to get the right bits. 1520 int ShAmt = 0; 1521 if (TD->isBigEndian()) { 1522 // On big-endian machines, the lowest bit is stored at the bit offset 1523 // from the pointer given by getTypeStoreSizeInBits. This matters for 1524 // integers with a bitwidth that is not a multiple of 8. 1525 ShAmt = TD->getTypeStoreSizeInBits(NTy) - 1526 TD->getTypeStoreSizeInBits(ToType) - Offset; 1527 } else { 1528 ShAmt = Offset; 1529 } 1530 1531 // Note: we support negative bitwidths (with shl) which are not defined. 1532 // We do this to support (f.e.) loads off the end of a structure where 1533 // only some bits are used. 1534 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1535 FromVal = Builder.CreateLShr(FromVal, ConstantInt::get(FromVal->getType(), 1536 ShAmt), "tmp"); 1537 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1538 FromVal = Builder.CreateShl(FromVal, ConstantInt::get(FromVal->getType(), 1539 -ShAmt), "tmp"); 1540 1541 // Finally, unconditionally truncate the integer to the right width. 1542 unsigned LIBitWidth = TD->getTypeSizeInBits(ToType); 1543 if (LIBitWidth < NTy->getBitWidth()) 1544 FromVal = Builder.CreateTrunc(FromVal, IntegerType::get(LIBitWidth), "tmp"); 1545 else if (LIBitWidth > NTy->getBitWidth()) 1546 FromVal = Builder.CreateZExt(FromVal, IntegerType::get(LIBitWidth), "tmp"); 1547 1548 // If the result is an integer, this is a trunc or bitcast. 1549 if (isa<IntegerType>(ToType)) { 1550 // Should be done. 1551 } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) { 1552 // Just do a bitcast, we know the sizes match up. 1553 FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp"); 1554 } else { 1555 // Otherwise must be a pointer. 1556 FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp"); 1557 } 1558 assert(FromVal->getType() == ToType && "Didn't convert right?"); 1559 return FromVal; 1560} 1561 1562 1563/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer 1564/// or vector value "Old" at the offset specified by Offset. 1565/// 1566/// This happens when we are converting an "integer union" to a 1567/// single integer scalar, or when we are converting a "vector union" to a 1568/// vector with insert/extractelement instructions. 1569/// 1570/// Offset is an offset from the original alloca, in bits that need to be 1571/// shifted to the right. 1572Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, 1573 uint64_t Offset, IRBuilder<> &Builder) { 1574 1575 // Convert the stored type to the actual type, shift it left to insert 1576 // then 'or' into place. 1577 const Type *AllocaType = Old->getType(); 1578 1579 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { 1580 // If the result alloca is a vector type, this is either an element 1581 // access or a bitcast to another vector type. 1582 if (isa<VectorType>(SV->getType())) { 1583 SV = Builder.CreateBitCast(SV, AllocaType, "tmp"); 1584 } else { 1585 // Must be an element insertion. 1586 unsigned Elt = Offset/TD->getTypePaddedSizeInBits(VTy->getElementType()); 1587 1588 if (SV->getType() != VTy->getElementType()) 1589 SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp"); 1590 1591 SV = Builder.CreateInsertElement(Old, SV, 1592 ConstantInt::get(Type::Int32Ty, Elt), 1593 "tmp"); 1594 } 1595 return SV; 1596 } 1597 1598 // If SV is a first-class aggregate value, insert each value recursively. 1599 if (const StructType *ST = dyn_cast<StructType>(SV->getType())) { 1600 const StructLayout &Layout = *TD->getStructLayout(ST); 1601 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1602 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1603 Old = ConvertScalar_InsertValue(Elt, Old, 1604 Offset+Layout.getElementOffsetInBits(i), 1605 Builder); 1606 } 1607 return Old; 1608 } 1609 1610 if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { 1611 uint64_t EltSize = TD->getTypePaddedSizeInBits(AT->getElementType()); 1612 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 1613 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1614 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder); 1615 } 1616 return Old; 1617 } 1618 1619 // If SV is a float, convert it to the appropriate integer type. 1620 // If it is a pointer, do the same. 1621 unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType()); 1622 unsigned DestWidth = TD->getTypeSizeInBits(AllocaType); 1623 unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType()); 1624 unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType); 1625 if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType())) 1626 SV = Builder.CreateBitCast(SV, IntegerType::get(SrcWidth), "tmp"); 1627 else if (isa<PointerType>(SV->getType())) 1628 SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(), "tmp"); 1629 1630 // Zero extend or truncate the value if needed. 1631 if (SV->getType() != AllocaType) { 1632 if (SV->getType()->getPrimitiveSizeInBits() < 1633 AllocaType->getPrimitiveSizeInBits()) 1634 SV = Builder.CreateZExt(SV, AllocaType, "tmp"); 1635 else { 1636 // Truncation may be needed if storing more than the alloca can hold 1637 // (undefined behavior). 1638 SV = Builder.CreateTrunc(SV, AllocaType, "tmp"); 1639 SrcWidth = DestWidth; 1640 SrcStoreWidth = DestStoreWidth; 1641 } 1642 } 1643 1644 // If this is a big-endian system and the store is narrower than the 1645 // full alloca type, we need to do a shift to get the right bits. 1646 int ShAmt = 0; 1647 if (TD->isBigEndian()) { 1648 // On big-endian machines, the lowest bit is stored at the bit offset 1649 // from the pointer given by getTypeStoreSizeInBits. This matters for 1650 // integers with a bitwidth that is not a multiple of 8. 1651 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 1652 } else { 1653 ShAmt = Offset; 1654 } 1655 1656 // Note: we support negative bitwidths (with shr) which are not defined. 1657 // We do this to support (f.e.) stores off the end of a structure where 1658 // only some bits in the structure are set. 1659 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1660 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1661 SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), ShAmt), "tmp"); 1662 Mask <<= ShAmt; 1663 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1664 SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), -ShAmt), "tmp"); 1665 Mask = Mask.lshr(-ShAmt); 1666 } 1667 1668 // Mask out the bits we are about to insert from the old value, and or 1669 // in the new bits. 1670 if (SrcWidth != DestWidth) { 1671 assert(DestWidth > SrcWidth); 1672 Old = Builder.CreateAnd(Old, ConstantInt::get(~Mask), "mask"); 1673 SV = Builder.CreateOr(Old, SV, "ins"); 1674 } 1675 return SV; 1676} 1677 1678 1679 1680/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1681/// some part of a constant global variable. This intentionally only accepts 1682/// constant expressions because we don't can't rewrite arbitrary instructions. 1683static bool PointsToConstantGlobal(Value *V) { 1684 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1685 return GV->isConstant(); 1686 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1687 if (CE->getOpcode() == Instruction::BitCast || 1688 CE->getOpcode() == Instruction::GetElementPtr) 1689 return PointsToConstantGlobal(CE->getOperand(0)); 1690 return false; 1691} 1692 1693/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1694/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1695/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1696/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1697/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1698/// the alloca, and if the source pointer is a pointer to a constant global, we 1699/// can optimize this. 1700static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1701 bool isOffset) { 1702 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1703 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 1704 // Ignore non-volatile loads, they are always ok. 1705 if (!LI->isVolatile()) 1706 continue; 1707 1708 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1709 // If uses of the bitcast are ok, we are ok. 1710 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1711 return false; 1712 continue; 1713 } 1714 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1715 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1716 // doesn't, it does. 1717 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1718 isOffset || !GEP->hasAllZeroIndices())) 1719 return false; 1720 continue; 1721 } 1722 1723 // If this is isn't our memcpy/memmove, reject it as something we can't 1724 // handle. 1725 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1726 return false; 1727 1728 // If we already have seen a copy, reject the second one. 1729 if (TheCopy) return false; 1730 1731 // If the pointer has been offset from the start of the alloca, we can't 1732 // safely handle this. 1733 if (isOffset) return false; 1734 1735 // If the memintrinsic isn't using the alloca as the dest, reject it. 1736 if (UI.getOperandNo() != 1) return false; 1737 1738 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1739 1740 // If the source of the memcpy/move is not a constant global, reject it. 1741 if (!PointsToConstantGlobal(MI->getOperand(2))) 1742 return false; 1743 1744 // Otherwise, the transform is safe. Remember the copy instruction. 1745 TheCopy = MI; 1746 } 1747 return true; 1748} 1749 1750/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1751/// modified by a copy from a constant global. If we can prove this, we can 1752/// replace any uses of the alloca with uses of the global directly. 1753Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1754 Instruction *TheCopy = 0; 1755 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1756 return TheCopy; 1757 return 0; 1758} 1759