ScalarReplAggregates.cpp revision 59136251f348a02a26f7a711a0e7fc459a727093
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/Pass.h" 32#include "llvm/Analysis/Dominators.h" 33#include "llvm/Target/TargetData.h" 34#include "llvm/Transforms/Utils/PromoteMemToReg.h" 35#include "llvm/Transforms/Utils/Local.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Support/ErrorHandling.h" 38#include "llvm/Support/GetElementPtrTypeIterator.h" 39#include "llvm/Support/IRBuilder.h" 40#include "llvm/Support/MathExtras.h" 41#include "llvm/Support/raw_ostream.h" 42#include "llvm/ADT/SmallVector.h" 43#include "llvm/ADT/Statistic.h" 44using namespace llvm; 45 46STATISTIC(NumReplaced, "Number of allocas broken up"); 47STATISTIC(NumPromoted, "Number of allocas promoted"); 48STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 49STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 50 51namespace { 52 struct SROA : public FunctionPass { 53 static char ID; // Pass identification, replacement for typeid 54 explicit SROA(signed T = -1) : FunctionPass(&ID) { 55 if (T == -1) 56 SRThreshold = 128; 57 else 58 SRThreshold = T; 59 } 60 61 bool runOnFunction(Function &F); 62 63 bool performScalarRepl(Function &F); 64 bool performPromotion(Function &F); 65 66 // getAnalysisUsage - This pass does not require any passes, but we know it 67 // will not alter the CFG, so say so. 68 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 69 AU.addRequired<DominatorTree>(); 70 AU.addRequired<DominanceFrontier>(); 71 AU.setPreservesCFG(); 72 } 73 74 private: 75 TargetData *TD; 76 77 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 78 /// information about the uses. All these fields are initialized to false 79 /// and set to true when something is learned. 80 struct AllocaInfo { 81 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 82 bool isUnsafe : 1; 83 84 /// needsCleanup - This is set to true if there is some use of the alloca 85 /// that requires cleanup. 86 bool needsCleanup : 1; 87 88 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 89 bool isMemCpySrc : 1; 90 91 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 92 bool isMemCpyDst : 1; 93 94 AllocaInfo() 95 : isUnsafe(false), needsCleanup(false), 96 isMemCpySrc(false), isMemCpyDst(false) {} 97 }; 98 99 unsigned SRThreshold; 100 101 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 102 103 int isSafeAllocaToScalarRepl(AllocationInst *AI); 104 105 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 106 AllocaInfo &Info); 107 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 108 AllocaInfo &Info); 109 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 110 unsigned OpNo, AllocaInfo &Info); 111 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, 112 AllocaInfo &Info); 113 114 void DoScalarReplacement(AllocationInst *AI, 115 std::vector<AllocationInst*> &WorkList); 116 void CleanupGEP(GetElementPtrInst *GEP); 117 void CleanupAllocaUsers(AllocationInst *AI); 118 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 119 120 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 121 SmallVector<AllocaInst*, 32> &NewElts); 122 123 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 124 AllocationInst *AI, 125 SmallVector<AllocaInst*, 32> &NewElts); 126 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI, 127 SmallVector<AllocaInst*, 32> &NewElts); 128 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 129 SmallVector<AllocaInst*, 32> &NewElts); 130 131 bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 132 bool &SawVec, uint64_t Offset, unsigned AllocaSize); 133 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset); 134 Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType, 135 uint64_t Offset, IRBuilder<> &Builder); 136 Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, 137 uint64_t Offset, IRBuilder<> &Builder); 138 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 139 }; 140} 141 142char SROA::ID = 0; 143static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 144 145// Public interface to the ScalarReplAggregates pass 146FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 147 return new SROA(Threshold); 148} 149 150 151bool SROA::runOnFunction(Function &F) { 152 TD = getAnalysisIfAvailable<TargetData>(); 153 154 bool Changed = performPromotion(F); 155 156 // FIXME: ScalarRepl currently depends on TargetData more than it 157 // theoretically needs to. It should be refactored in order to support 158 // target-independent IR. Until this is done, just skip the actual 159 // scalar-replacement portion of this pass. 160 if (!TD) return Changed; 161 162 while (1) { 163 bool LocalChange = performScalarRepl(F); 164 if (!LocalChange) break; // No need to repromote if no scalarrepl 165 Changed = true; 166 LocalChange = performPromotion(F); 167 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 168 } 169 170 return Changed; 171} 172 173 174bool SROA::performPromotion(Function &F) { 175 std::vector<AllocaInst*> Allocas; 176 DominatorTree &DT = getAnalysis<DominatorTree>(); 177 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 178 179 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 180 181 bool Changed = false; 182 183 while (1) { 184 Allocas.clear(); 185 186 // Find allocas that are safe to promote, by looking at all instructions in 187 // the entry node 188 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 189 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 190 if (isAllocaPromotable(AI)) 191 Allocas.push_back(AI); 192 193 if (Allocas.empty()) break; 194 195 PromoteMemToReg(Allocas, DT, DF, F.getContext()); 196 NumPromoted += Allocas.size(); 197 Changed = true; 198 } 199 200 return Changed; 201} 202 203/// getNumSAElements - Return the number of elements in the specific struct or 204/// array. 205static uint64_t getNumSAElements(const Type *T) { 206 if (const StructType *ST = dyn_cast<StructType>(T)) 207 return ST->getNumElements(); 208 return cast<ArrayType>(T)->getNumElements(); 209} 210 211// performScalarRepl - This algorithm is a simple worklist driven algorithm, 212// which runs on all of the malloc/alloca instructions in the function, removing 213// them if they are only used by getelementptr instructions. 214// 215bool SROA::performScalarRepl(Function &F) { 216 std::vector<AllocationInst*> WorkList; 217 218 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 219 BasicBlock &BB = F.getEntryBlock(); 220 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 221 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 222 WorkList.push_back(A); 223 224 // Process the worklist 225 bool Changed = false; 226 while (!WorkList.empty()) { 227 AllocationInst *AI = WorkList.back(); 228 WorkList.pop_back(); 229 230 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 231 // with unused elements. 232 if (AI->use_empty()) { 233 AI->eraseFromParent(); 234 continue; 235 } 236 237 // If this alloca is impossible for us to promote, reject it early. 238 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) 239 continue; 240 241 // Check to see if this allocation is only modified by a memcpy/memmove from 242 // a constant global. If this is the case, we can change all users to use 243 // the constant global instead. This is commonly produced by the CFE by 244 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 245 // is only subsequently read. 246 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 247 DEBUG(errs() << "Found alloca equal to global: " << *AI << '\n'); 248 DEBUG(errs() << " memcpy = " << *TheCopy << '\n'); 249 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 250 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 251 TheCopy->eraseFromParent(); // Don't mutate the global. 252 AI->eraseFromParent(); 253 ++NumGlobals; 254 Changed = true; 255 continue; 256 } 257 258 // Check to see if we can perform the core SROA transformation. We cannot 259 // transform the allocation instruction if it is an array allocation 260 // (allocations OF arrays are ok though), and an allocation of a scalar 261 // value cannot be decomposed at all. 262 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 263 264 // Do not promote [0 x %struct]. 265 if (AllocaSize == 0) continue; 266 267 // Do not promote any struct whose size is too big. 268 if (AllocaSize > SRThreshold) continue; 269 270 if ((isa<StructType>(AI->getAllocatedType()) || 271 isa<ArrayType>(AI->getAllocatedType())) && 272 // Do not promote any struct into more than "32" separate vars. 273 getNumSAElements(AI->getAllocatedType()) <= SRThreshold/4) { 274 // Check that all of the users of the allocation are capable of being 275 // transformed. 276 switch (isSafeAllocaToScalarRepl(AI)) { 277 default: llvm_unreachable("Unexpected value!"); 278 case 0: // Not safe to scalar replace. 279 break; 280 case 1: // Safe, but requires cleanup/canonicalizations first 281 CleanupAllocaUsers(AI); 282 // FALL THROUGH. 283 case 3: // Safe to scalar replace. 284 DoScalarReplacement(AI, WorkList); 285 Changed = true; 286 continue; 287 } 288 } 289 290 // If we can turn this aggregate value (potentially with casts) into a 291 // simple scalar value that can be mem2reg'd into a register value. 292 // IsNotTrivial tracks whether this is something that mem2reg could have 293 // promoted itself. If so, we don't want to transform it needlessly. Note 294 // that we can't just check based on the type: the alloca may be of an i32 295 // but that has pointer arithmetic to set byte 3 of it or something. 296 bool IsNotTrivial = false; 297 const Type *VectorTy = 0; 298 bool HadAVector = false; 299 if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector, 300 0, unsigned(AllocaSize)) && IsNotTrivial) { 301 AllocaInst *NewAI; 302 // If we were able to find a vector type that can handle this with 303 // insert/extract elements, and if there was at least one use that had 304 // a vector type, promote this to a vector. We don't want to promote 305 // random stuff that doesn't use vectors (e.g. <9 x double>) because then 306 // we just get a lot of insert/extracts. If at least one vector is 307 // involved, then we probably really do have a union of vector/array. 308 if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) { 309 DEBUG(errs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = " 310 << *VectorTy << '\n'); 311 312 // Create and insert the vector alloca. 313 NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin()); 314 ConvertUsesToScalar(AI, NewAI, 0); 315 } else { 316 DEBUG(errs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"); 317 318 // Create and insert the integer alloca. 319 const Type *NewTy = IntegerType::get(AI->getContext(), AllocaSize*8); 320 NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin()); 321 ConvertUsesToScalar(AI, NewAI, 0); 322 } 323 NewAI->takeName(AI); 324 AI->eraseFromParent(); 325 ++NumConverted; 326 Changed = true; 327 continue; 328 } 329 330 // Otherwise, couldn't process this alloca. 331 } 332 333 return Changed; 334} 335 336/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 337/// predicate, do SROA now. 338void SROA::DoScalarReplacement(AllocationInst *AI, 339 std::vector<AllocationInst*> &WorkList) { 340 DEBUG(errs() << "Found inst to SROA: " << *AI << '\n'); 341 SmallVector<AllocaInst*, 32> ElementAllocas; 342 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 343 ElementAllocas.reserve(ST->getNumContainedTypes()); 344 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 345 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 346 AI->getAlignment(), 347 AI->getName() + "." + Twine(i), AI); 348 ElementAllocas.push_back(NA); 349 WorkList.push_back(NA); // Add to worklist for recursive processing 350 } 351 } else { 352 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 353 ElementAllocas.reserve(AT->getNumElements()); 354 const Type *ElTy = AT->getElementType(); 355 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 356 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 357 AI->getName() + "." + Twine(i), AI); 358 ElementAllocas.push_back(NA); 359 WorkList.push_back(NA); // Add to worklist for recursive processing 360 } 361 } 362 363 // Now that we have created the alloca instructions that we want to use, 364 // expand the getelementptr instructions to use them. 365 // 366 while (!AI->use_empty()) { 367 Instruction *User = cast<Instruction>(AI->use_back()); 368 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 369 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 370 BCInst->eraseFromParent(); 371 continue; 372 } 373 374 // Replace: 375 // %res = load { i32, i32 }* %alloc 376 // with: 377 // %load.0 = load i32* %alloc.0 378 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 379 // %load.1 = load i32* %alloc.1 380 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 381 // (Also works for arrays instead of structs) 382 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 383 Value *Insert = UndefValue::get(LI->getType()); 384 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 385 Value *Load = new LoadInst(ElementAllocas[i], "load", LI); 386 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); 387 } 388 LI->replaceAllUsesWith(Insert); 389 LI->eraseFromParent(); 390 continue; 391 } 392 393 // Replace: 394 // store { i32, i32 } %val, { i32, i32 }* %alloc 395 // with: 396 // %val.0 = extractvalue { i32, i32 } %val, 0 397 // store i32 %val.0, i32* %alloc.0 398 // %val.1 = extractvalue { i32, i32 } %val, 1 399 // store i32 %val.1, i32* %alloc.1 400 // (Also works for arrays instead of structs) 401 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 402 Value *Val = SI->getOperand(0); 403 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 404 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); 405 new StoreInst(Extract, ElementAllocas[i], SI); 406 } 407 SI->eraseFromParent(); 408 continue; 409 } 410 411 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 412 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 413 unsigned Idx = 414 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 415 416 assert(Idx < ElementAllocas.size() && "Index out of range?"); 417 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 418 419 Value *RepValue; 420 if (GEPI->getNumOperands() == 3) { 421 // Do not insert a new getelementptr instruction with zero indices, only 422 // to have it optimized out later. 423 RepValue = AllocaToUse; 424 } else { 425 // We are indexing deeply into the structure, so we still need a 426 // getelement ptr instruction to finish the indexing. This may be 427 // expanded itself once the worklist is rerun. 428 // 429 SmallVector<Value*, 8> NewArgs; 430 NewArgs.push_back(Constant::getNullValue( 431 Type::getInt32Ty(AI->getContext()))); 432 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 433 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), 434 NewArgs.end(), "", GEPI); 435 RepValue->takeName(GEPI); 436 } 437 438 // If this GEP is to the start of the aggregate, check for memcpys. 439 if (Idx == 0 && GEPI->hasAllZeroIndices()) 440 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 441 442 // Move all of the users over to the new GEP. 443 GEPI->replaceAllUsesWith(RepValue); 444 // Delete the old GEP 445 GEPI->eraseFromParent(); 446 } 447 448 // Finally, delete the Alloca instruction 449 AI->eraseFromParent(); 450 NumReplaced++; 451} 452 453 454/// isSafeElementUse - Check to see if this use is an allowed use for a 455/// getelementptr instruction of an array aggregate allocation. isFirstElt 456/// indicates whether Ptr is known to the start of the aggregate. 457/// 458void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 459 AllocaInfo &Info) { 460 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 461 I != E; ++I) { 462 Instruction *User = cast<Instruction>(*I); 463 switch (User->getOpcode()) { 464 case Instruction::Load: break; 465 case Instruction::Store: 466 // Store is ok if storing INTO the pointer, not storing the pointer 467 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 468 break; 469 case Instruction::GetElementPtr: { 470 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 471 bool AreAllZeroIndices = isFirstElt; 472 if (GEP->getNumOperands() > 1) { 473 if (!isa<ConstantInt>(GEP->getOperand(1)) || 474 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 475 // Using pointer arithmetic to navigate the array. 476 return MarkUnsafe(Info); 477 478 if (AreAllZeroIndices) 479 AreAllZeroIndices = GEP->hasAllZeroIndices(); 480 } 481 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 482 if (Info.isUnsafe) return; 483 break; 484 } 485 case Instruction::BitCast: 486 if (isFirstElt) { 487 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 488 if (Info.isUnsafe) return; 489 break; 490 } 491 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); 492 return MarkUnsafe(Info); 493 case Instruction::Call: 494 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 495 if (isFirstElt) { 496 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 497 if (Info.isUnsafe) return; 498 break; 499 } 500 } 501 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); 502 return MarkUnsafe(Info); 503 default: 504 DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); 505 return MarkUnsafe(Info); 506 } 507 } 508 return; // All users look ok :) 509} 510 511/// AllUsersAreLoads - Return true if all users of this value are loads. 512static bool AllUsersAreLoads(Value *Ptr) { 513 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 514 I != E; ++I) 515 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 516 return false; 517 return true; 518} 519 520/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 521/// aggregate allocation. 522/// 523void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 524 AllocaInfo &Info) { 525 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 526 return isSafeUseOfBitCastedAllocation(C, AI, Info); 527 528 if (LoadInst *LI = dyn_cast<LoadInst>(User)) 529 if (!LI->isVolatile()) 530 return;// Loads (returning a first class aggregrate) are always rewritable 531 532 if (StoreInst *SI = dyn_cast<StoreInst>(User)) 533 if (!SI->isVolatile() && SI->getOperand(0) != AI) 534 return;// Store is ok if storing INTO the pointer, not storing the pointer 535 536 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 537 if (GEPI == 0) 538 return MarkUnsafe(Info); 539 540 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 541 542 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 543 if (I == E || 544 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 545 return MarkUnsafe(Info); 546 } 547 548 ++I; 549 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 550 551 bool IsAllZeroIndices = true; 552 553 // If the first index is a non-constant index into an array, see if we can 554 // handle it as a special case. 555 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 556 if (!isa<ConstantInt>(I.getOperand())) { 557 IsAllZeroIndices = 0; 558 uint64_t NumElements = AT->getNumElements(); 559 560 // If this is an array index and the index is not constant, we cannot 561 // promote... that is unless the array has exactly one or two elements in 562 // it, in which case we CAN promote it, but we have to canonicalize this 563 // out if this is the only problem. 564 if ((NumElements == 1 || NumElements == 2) && 565 AllUsersAreLoads(GEPI)) { 566 Info.needsCleanup = true; 567 return; // Canonicalization required! 568 } 569 return MarkUnsafe(Info); 570 } 571 } 572 573 // Walk through the GEP type indices, checking the types that this indexes 574 // into. 575 for (; I != E; ++I) { 576 // Ignore struct elements, no extra checking needed for these. 577 if (isa<StructType>(*I)) 578 continue; 579 580 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 581 if (!IdxVal) return MarkUnsafe(Info); 582 583 // Are all indices still zero? 584 IsAllZeroIndices &= IdxVal->isZero(); 585 586 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 587 // This GEP indexes an array. Verify that this is an in-range constant 588 // integer. Specifically, consider A[0][i]. We cannot know that the user 589 // isn't doing invalid things like allowing i to index an out-of-range 590 // subscript that accesses A[1]. Because of this, we have to reject SROA 591 // of any accesses into structs where any of the components are variables. 592 if (IdxVal->getZExtValue() >= AT->getNumElements()) 593 return MarkUnsafe(Info); 594 } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) { 595 if (IdxVal->getZExtValue() >= VT->getNumElements()) 596 return MarkUnsafe(Info); 597 } 598 } 599 600 // If there are any non-simple uses of this getelementptr, make sure to reject 601 // them. 602 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 603} 604 605/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 606/// intrinsic can be promoted by SROA. At this point, we know that the operand 607/// of the memintrinsic is a pointer to the beginning of the allocation. 608void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 609 unsigned OpNo, AllocaInfo &Info) { 610 // If not constant length, give up. 611 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 612 if (!Length) return MarkUnsafe(Info); 613 614 // If not the whole aggregate, give up. 615 if (Length->getZExtValue() != 616 TD->getTypeAllocSize(AI->getType()->getElementType())) 617 return MarkUnsafe(Info); 618 619 // We only know about memcpy/memset/memmove. 620 if (!isa<MemIntrinsic>(MI)) 621 return MarkUnsafe(Info); 622 623 // Otherwise, we can transform it. Determine whether this is a memcpy/set 624 // into or out of the aggregate. 625 if (OpNo == 1) 626 Info.isMemCpyDst = true; 627 else { 628 assert(OpNo == 2); 629 Info.isMemCpySrc = true; 630 } 631} 632 633/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 634/// are 635void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, 636 AllocaInfo &Info) { 637 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 638 UI != E; ++UI) { 639 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 640 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 641 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 642 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 643 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 644 if (SI->isVolatile()) 645 return MarkUnsafe(Info); 646 647 // If storing the entire alloca in one chunk through a bitcasted pointer 648 // to integer, we can transform it. This happens (for example) when you 649 // cast a {i32,i32}* to i64* and store through it. This is similar to the 650 // memcpy case and occurs in various "byval" cases and emulated memcpys. 651 if (isa<IntegerType>(SI->getOperand(0)->getType()) && 652 TD->getTypeAllocSize(SI->getOperand(0)->getType()) == 653 TD->getTypeAllocSize(AI->getType()->getElementType())) { 654 Info.isMemCpyDst = true; 655 continue; 656 } 657 return MarkUnsafe(Info); 658 } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { 659 if (LI->isVolatile()) 660 return MarkUnsafe(Info); 661 662 // If loading the entire alloca in one chunk through a bitcasted pointer 663 // to integer, we can transform it. This happens (for example) when you 664 // cast a {i32,i32}* to i64* and load through it. This is similar to the 665 // memcpy case and occurs in various "byval" cases and emulated memcpys. 666 if (isa<IntegerType>(LI->getType()) && 667 TD->getTypeAllocSize(LI->getType()) == 668 TD->getTypeAllocSize(AI->getType()->getElementType())) { 669 Info.isMemCpySrc = true; 670 continue; 671 } 672 return MarkUnsafe(Info); 673 } else if (isa<DbgInfoIntrinsic>(UI)) { 674 // If one user is DbgInfoIntrinsic then check if all users are 675 // DbgInfoIntrinsics. 676 if (OnlyUsedByDbgInfoIntrinsics(BC)) { 677 Info.needsCleanup = true; 678 return; 679 } 680 else 681 MarkUnsafe(Info); 682 } 683 else { 684 return MarkUnsafe(Info); 685 } 686 if (Info.isUnsafe) return; 687 } 688} 689 690/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 691/// to its first element. Transform users of the cast to use the new values 692/// instead. 693void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 694 SmallVector<AllocaInst*, 32> &NewElts) { 695 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 696 while (UI != UE) { 697 Instruction *User = cast<Instruction>(*UI++); 698 if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) { 699 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 700 if (BCU->use_empty()) BCU->eraseFromParent(); 701 continue; 702 } 703 704 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 705 // This must be memcpy/memmove/memset of the entire aggregate. 706 // Split into one per element. 707 RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts); 708 continue; 709 } 710 711 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 712 // If this is a store of the entire alloca from an integer, rewrite it. 713 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); 714 continue; 715 } 716 717 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 718 // If this is a load of the entire alloca to an integer, rewrite it. 719 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); 720 continue; 721 } 722 723 // Otherwise it must be some other user of a gep of the first pointer. Just 724 // leave these alone. 725 continue; 726 } 727} 728 729/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. 730/// Rewrite it to copy or set the elements of the scalarized memory. 731void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, 732 AllocationInst *AI, 733 SmallVector<AllocaInst*, 32> &NewElts) { 734 735 // If this is a memcpy/memmove, construct the other pointer as the 736 // appropriate type. The "Other" pointer is the pointer that goes to memory 737 // that doesn't have anything to do with the alloca that we are promoting. For 738 // memset, this Value* stays null. 739 Value *OtherPtr = 0; 740 LLVMContext &Context = MI->getContext(); 741 unsigned MemAlignment = MI->getAlignment(); 742 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy 743 if (BCInst == MTI->getRawDest()) 744 OtherPtr = MTI->getRawSource(); 745 else { 746 assert(BCInst == MTI->getRawSource()); 747 OtherPtr = MTI->getRawDest(); 748 } 749 } 750 751 // If there is an other pointer, we want to convert it to the same pointer 752 // type as AI has, so we can GEP through it safely. 753 if (OtherPtr) { 754 // It is likely that OtherPtr is a bitcast, if so, remove it. 755 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 756 OtherPtr = BC->getOperand(0); 757 // All zero GEPs are effectively bitcasts. 758 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) 759 if (GEP->hasAllZeroIndices()) 760 OtherPtr = GEP->getOperand(0); 761 762 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 763 if (BCE->getOpcode() == Instruction::BitCast) 764 OtherPtr = BCE->getOperand(0); 765 766 // If the pointer is not the right type, insert a bitcast to the right 767 // type. 768 if (OtherPtr->getType() != AI->getType()) 769 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 770 MI); 771 } 772 773 // Process each element of the aggregate. 774 Value *TheFn = MI->getOperand(0); 775 const Type *BytePtrTy = MI->getRawDest()->getType(); 776 bool SROADest = MI->getRawDest() == BCInst; 777 778 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext())); 779 780 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 781 // If this is a memcpy/memmove, emit a GEP of the other element address. 782 Value *OtherElt = 0; 783 unsigned OtherEltAlign = MemAlignment; 784 785 if (OtherPtr) { 786 Value *Idx[2] = { Zero, 787 ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) }; 788 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, 789 OtherPtr->getNameStr()+"."+Twine(i), 790 MI); 791 uint64_t EltOffset; 792 const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); 793 if (const StructType *ST = 794 dyn_cast<StructType>(OtherPtrTy->getElementType())) { 795 EltOffset = TD->getStructLayout(ST)->getElementOffset(i); 796 } else { 797 const Type *EltTy = 798 cast<SequentialType>(OtherPtr->getType())->getElementType(); 799 EltOffset = TD->getTypeAllocSize(EltTy)*i; 800 } 801 802 // The alignment of the other pointer is the guaranteed alignment of the 803 // element, which is affected by both the known alignment of the whole 804 // mem intrinsic and the alignment of the element. If the alignment of 805 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the 806 // known alignment is just 4 bytes. 807 OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset); 808 } 809 810 Value *EltPtr = NewElts[i]; 811 const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType(); 812 813 // If we got down to a scalar, insert a load or store as appropriate. 814 if (EltTy->isSingleValueType()) { 815 if (isa<MemTransferInst>(MI)) { 816 if (SROADest) { 817 // From Other to Alloca. 818 Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI); 819 new StoreInst(Elt, EltPtr, MI); 820 } else { 821 // From Alloca to Other. 822 Value *Elt = new LoadInst(EltPtr, "tmp", MI); 823 new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI); 824 } 825 continue; 826 } 827 assert(isa<MemSetInst>(MI)); 828 829 // If the stored element is zero (common case), just store a null 830 // constant. 831 Constant *StoreVal; 832 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 833 if (CI->isZero()) { 834 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 835 } else { 836 // If EltTy is a vector type, get the element type. 837 const Type *ValTy = EltTy->getScalarType(); 838 839 // Construct an integer with the right value. 840 unsigned EltSize = TD->getTypeSizeInBits(ValTy); 841 APInt OneVal(EltSize, CI->getZExtValue()); 842 APInt TotalVal(OneVal); 843 // Set each byte. 844 for (unsigned i = 0; 8*i < EltSize; ++i) { 845 TotalVal = TotalVal.shl(8); 846 TotalVal |= OneVal; 847 } 848 849 // Convert the integer value to the appropriate type. 850 StoreVal = ConstantInt::get(Context, TotalVal); 851 if (isa<PointerType>(ValTy)) 852 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 853 else if (ValTy->isFloatingPoint()) 854 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 855 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 856 857 // If the requested value was a vector constant, create it. 858 if (EltTy != ValTy) { 859 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 860 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 861 StoreVal = ConstantVector::get(&Elts[0], NumElts); 862 } 863 } 864 new StoreInst(StoreVal, EltPtr, MI); 865 continue; 866 } 867 // Otherwise, if we're storing a byte variable, use a memset call for 868 // this element. 869 } 870 871 // Cast the element pointer to BytePtrTy. 872 if (EltPtr->getType() != BytePtrTy) 873 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 874 875 // Cast the other pointer (if we have one) to BytePtrTy. 876 if (OtherElt && OtherElt->getType() != BytePtrTy) 877 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 878 MI); 879 880 unsigned EltSize = TD->getTypeAllocSize(EltTy); 881 882 // Finally, insert the meminst for this element. 883 if (isa<MemTransferInst>(MI)) { 884 Value *Ops[] = { 885 SROADest ? EltPtr : OtherElt, // Dest ptr 886 SROADest ? OtherElt : EltPtr, // Src ptr 887 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 888 // Align 889 ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign) 890 }; 891 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 892 } else { 893 assert(isa<MemSetInst>(MI)); 894 Value *Ops[] = { 895 EltPtr, MI->getOperand(2), // Dest, Value, 896 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 897 Zero // Align 898 }; 899 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 900 } 901 } 902 MI->eraseFromParent(); 903} 904 905/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that 906/// overwrites the entire allocation. Extract out the pieces of the stored 907/// integer and store them individually. 908void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, 909 AllocationInst *AI, 910 SmallVector<AllocaInst*, 32> &NewElts){ 911 // Extract each element out of the integer according to its structure offset 912 // and store the element value to the individual alloca. 913 Value *SrcVal = SI->getOperand(0); 914 const Type *AllocaEltTy = AI->getType()->getElementType(); 915 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); 916 917 // If this isn't a store of an integer to the whole alloca, it may be a store 918 // to the first element. Just ignore the store in this case and normal SROA 919 // will handle it. 920 if (!isa<IntegerType>(SrcVal->getType()) || 921 TD->getTypeAllocSizeInBits(SrcVal->getType()) != AllocaSizeBits) 922 return; 923 // Handle tail padding by extending the operand 924 if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) 925 SrcVal = new ZExtInst(SrcVal, 926 IntegerType::get(SI->getContext(), AllocaSizeBits), 927 "", SI); 928 929 DEBUG(errs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI 930 << '\n'); 931 932 // There are two forms here: AI could be an array or struct. Both cases 933 // have different ways to compute the element offset. 934 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 935 const StructLayout *Layout = TD->getStructLayout(EltSTy); 936 937 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 938 // Get the number of bits to shift SrcVal to get the value. 939 const Type *FieldTy = EltSTy->getElementType(i); 940 uint64_t Shift = Layout->getElementOffsetInBits(i); 941 942 if (TD->isBigEndian()) 943 Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy); 944 945 Value *EltVal = SrcVal; 946 if (Shift) { 947 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 948 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 949 "sroa.store.elt", SI); 950 } 951 952 // Truncate down to an integer of the right size. 953 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 954 955 // Ignore zero sized fields like {}, they obviously contain no data. 956 if (FieldSizeBits == 0) continue; 957 958 if (FieldSizeBits != AllocaSizeBits) 959 EltVal = new TruncInst(EltVal, 960 IntegerType::get(SI->getContext(), FieldSizeBits), 961 "", SI); 962 Value *DestField = NewElts[i]; 963 if (EltVal->getType() == FieldTy) { 964 // Storing to an integer field of this size, just do it. 965 } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) { 966 // Bitcast to the right element type (for fp/vector values). 967 EltVal = new BitCastInst(EltVal, FieldTy, "", SI); 968 } else { 969 // Otherwise, bitcast the dest pointer (for aggregates). 970 DestField = new BitCastInst(DestField, 971 PointerType::getUnqual(EltVal->getType()), 972 "", SI); 973 } 974 new StoreInst(EltVal, DestField, SI); 975 } 976 977 } else { 978 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy); 979 const Type *ArrayEltTy = ATy->getElementType(); 980 uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); 981 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); 982 983 uint64_t Shift; 984 985 if (TD->isBigEndian()) 986 Shift = AllocaSizeBits-ElementOffset; 987 else 988 Shift = 0; 989 990 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 991 // Ignore zero sized fields like {}, they obviously contain no data. 992 if (ElementSizeBits == 0) continue; 993 994 Value *EltVal = SrcVal; 995 if (Shift) { 996 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); 997 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, 998 "sroa.store.elt", SI); 999 } 1000 1001 // Truncate down to an integer of the right size. 1002 if (ElementSizeBits != AllocaSizeBits) 1003 EltVal = new TruncInst(EltVal, 1004 IntegerType::get(SI->getContext(), 1005 ElementSizeBits),"",SI); 1006 Value *DestField = NewElts[i]; 1007 if (EltVal->getType() == ArrayEltTy) { 1008 // Storing to an integer field of this size, just do it. 1009 } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) { 1010 // Bitcast to the right element type (for fp/vector values). 1011 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI); 1012 } else { 1013 // Otherwise, bitcast the dest pointer (for aggregates). 1014 DestField = new BitCastInst(DestField, 1015 PointerType::getUnqual(EltVal->getType()), 1016 "", SI); 1017 } 1018 new StoreInst(EltVal, DestField, SI); 1019 1020 if (TD->isBigEndian()) 1021 Shift -= ElementOffset; 1022 else 1023 Shift += ElementOffset; 1024 } 1025 } 1026 1027 SI->eraseFromParent(); 1028} 1029 1030/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to 1031/// an integer. Load the individual pieces to form the aggregate value. 1032void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, 1033 SmallVector<AllocaInst*, 32> &NewElts) { 1034 // Extract each element out of the NewElts according to its structure offset 1035 // and form the result value. 1036 const Type *AllocaEltTy = AI->getType()->getElementType(); 1037 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); 1038 1039 // If this isn't a load of the whole alloca to an integer, it may be a load 1040 // of the first element. Just ignore the load in this case and normal SROA 1041 // will handle it. 1042 if (!isa<IntegerType>(LI->getType()) || 1043 TD->getTypeAllocSizeInBits(LI->getType()) != AllocaSizeBits) 1044 return; 1045 1046 DEBUG(errs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI 1047 << '\n'); 1048 1049 // There are two forms here: AI could be an array or struct. Both cases 1050 // have different ways to compute the element offset. 1051 const StructLayout *Layout = 0; 1052 uint64_t ArrayEltBitOffset = 0; 1053 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { 1054 Layout = TD->getStructLayout(EltSTy); 1055 } else { 1056 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); 1057 ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); 1058 } 1059 1060 Value *ResultVal = 1061 Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits)); 1062 1063 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 1064 // Load the value from the alloca. If the NewElt is an aggregate, cast 1065 // the pointer to an integer of the same size before doing the load. 1066 Value *SrcField = NewElts[i]; 1067 const Type *FieldTy = 1068 cast<PointerType>(SrcField->getType())->getElementType(); 1069 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); 1070 1071 // Ignore zero sized fields like {}, they obviously contain no data. 1072 if (FieldSizeBits == 0) continue; 1073 1074 const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(), 1075 FieldSizeBits); 1076 if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() && 1077 !isa<VectorType>(FieldTy)) 1078 SrcField = new BitCastInst(SrcField, 1079 PointerType::getUnqual(FieldIntTy), 1080 "", LI); 1081 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); 1082 1083 // If SrcField is a fp or vector of the right size but that isn't an 1084 // integer type, bitcast to an integer so we can shift it. 1085 if (SrcField->getType() != FieldIntTy) 1086 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); 1087 1088 // Zero extend the field to be the same size as the final alloca so that 1089 // we can shift and insert it. 1090 if (SrcField->getType() != ResultVal->getType()) 1091 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); 1092 1093 // Determine the number of bits to shift SrcField. 1094 uint64_t Shift; 1095 if (Layout) // Struct case. 1096 Shift = Layout->getElementOffsetInBits(i); 1097 else // Array case. 1098 Shift = i*ArrayEltBitOffset; 1099 1100 if (TD->isBigEndian()) 1101 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); 1102 1103 if (Shift) { 1104 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); 1105 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); 1106 } 1107 1108 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); 1109 } 1110 1111 // Handle tail padding by truncating the result 1112 if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits) 1113 ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI); 1114 1115 LI->replaceAllUsesWith(ResultVal); 1116 LI->eraseFromParent(); 1117} 1118 1119 1120/// HasPadding - Return true if the specified type has any structure or 1121/// alignment padding, false otherwise. 1122static bool HasPadding(const Type *Ty, const TargetData &TD) { 1123 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1124 const StructLayout *SL = TD.getStructLayout(STy); 1125 unsigned PrevFieldBitOffset = 0; 1126 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1127 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 1128 1129 // Padding in sub-elements? 1130 if (HasPadding(STy->getElementType(i), TD)) 1131 return true; 1132 1133 // Check to see if there is any padding between this element and the 1134 // previous one. 1135 if (i) { 1136 unsigned PrevFieldEnd = 1137 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 1138 if (PrevFieldEnd < FieldBitOffset) 1139 return true; 1140 } 1141 1142 PrevFieldBitOffset = FieldBitOffset; 1143 } 1144 1145 // Check for tail padding. 1146 if (unsigned EltCount = STy->getNumElements()) { 1147 unsigned PrevFieldEnd = PrevFieldBitOffset + 1148 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 1149 if (PrevFieldEnd < SL->getSizeInBits()) 1150 return true; 1151 } 1152 1153 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1154 return HasPadding(ATy->getElementType(), TD); 1155 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1156 return HasPadding(VTy->getElementType(), TD); 1157 } 1158 return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty); 1159} 1160 1161/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 1162/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 1163/// or 1 if safe after canonicalization has been performed. 1164/// 1165int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 1166 // Loop over the use list of the alloca. We can only transform it if all of 1167 // the users are safe to transform. 1168 AllocaInfo Info; 1169 1170 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 1171 I != E; ++I) { 1172 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 1173 if (Info.isUnsafe) { 1174 DEBUG(errs() << "Cannot transform: " << *AI << "\n due to user: " 1175 << **I << '\n'); 1176 return 0; 1177 } 1178 } 1179 1180 // Okay, we know all the users are promotable. If the aggregate is a memcpy 1181 // source and destination, we have to be careful. In particular, the memcpy 1182 // could be moving around elements that live in structure padding of the LLVM 1183 // types, but may actually be used. In these cases, we refuse to promote the 1184 // struct. 1185 if (Info.isMemCpySrc && Info.isMemCpyDst && 1186 HasPadding(AI->getType()->getElementType(), *TD)) 1187 return 0; 1188 1189 // If we require cleanup, return 1, otherwise return 3. 1190 return Info.needsCleanup ? 1 : 3; 1191} 1192 1193/// CleanupGEP - GEP is used by an Alloca, which can be prompted after the GEP 1194/// is canonicalized here. 1195void SROA::CleanupGEP(GetElementPtrInst *GEPI) { 1196 gep_type_iterator I = gep_type_begin(GEPI); 1197 ++I; 1198 1199 const ArrayType *AT = dyn_cast<ArrayType>(*I); 1200 if (!AT) 1201 return; 1202 1203 uint64_t NumElements = AT->getNumElements(); 1204 1205 if (isa<ConstantInt>(I.getOperand())) 1206 return; 1207 1208 if (NumElements == 1) { 1209 GEPI->setOperand(2, 1210 Constant::getNullValue(Type::getInt32Ty(GEPI->getContext()))); 1211 return; 1212 } 1213 1214 assert(NumElements == 2 && "Unhandled case!"); 1215 // All users of the GEP must be loads. At each use of the GEP, insert 1216 // two loads of the appropriate indexed GEP and select between them. 1217 Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(), 1218 Constant::getNullValue(I.getOperand()->getType()), 1219 "isone"); 1220 // Insert the new GEP instructions, which are properly indexed. 1221 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 1222 Indices[1] = Constant::getNullValue(Type::getInt32Ty(GEPI->getContext())); 1223 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1224 Indices.begin(), 1225 Indices.end(), 1226 GEPI->getName()+".0", GEPI); 1227 Indices[1] = ConstantInt::get(Type::getInt32Ty(GEPI->getContext()), 1); 1228 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 1229 Indices.begin(), 1230 Indices.end(), 1231 GEPI->getName()+".1", GEPI); 1232 // Replace all loads of the variable index GEP with loads from both 1233 // indexes and a select. 1234 while (!GEPI->use_empty()) { 1235 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 1236 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 1237 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 1238 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI); 1239 LI->replaceAllUsesWith(R); 1240 LI->eraseFromParent(); 1241 } 1242 GEPI->eraseFromParent(); 1243} 1244 1245 1246/// CleanupAllocaUsers - If SROA reported that it can promote the specified 1247/// allocation, but only if cleaned up, perform the cleanups required. 1248void SROA::CleanupAllocaUsers(AllocationInst *AI) { 1249 // At this point, we know that the end result will be SROA'd and promoted, so 1250 // we can insert ugly code if required so long as sroa+mem2reg will clean it 1251 // up. 1252 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 1253 UI != E; ) { 1254 User *U = *UI++; 1255 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) 1256 CleanupGEP(GEPI); 1257 else { 1258 Instruction *I = cast<Instruction>(U); 1259 SmallVector<DbgInfoIntrinsic *, 2> DbgInUses; 1260 if (!isa<StoreInst>(I) && OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) { 1261 // Safe to remove debug info uses. 1262 while (!DbgInUses.empty()) { 1263 DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back(); 1264 DI->eraseFromParent(); 1265 } 1266 I->eraseFromParent(); 1267 } 1268 } 1269 } 1270} 1271 1272/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at 1273/// the offset specified by Offset (which is specified in bytes). 1274/// 1275/// There are two cases we handle here: 1276/// 1) A union of vector types of the same size and potentially its elements. 1277/// Here we turn element accesses into insert/extract element operations. 1278/// This promotes a <4 x float> with a store of float to the third element 1279/// into a <4 x float> that uses insert element. 1280/// 2) A fully general blob of memory, which we turn into some (potentially 1281/// large) integer type with extract and insert operations where the loads 1282/// and stores would mutate the memory. 1283static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy, 1284 unsigned AllocaSize, const TargetData &TD, 1285 LLVMContext &Context) { 1286 // If this could be contributing to a vector, analyze it. 1287 if (VecTy != Type::getVoidTy(Context)) { // either null or a vector type. 1288 1289 // If the In type is a vector that is the same size as the alloca, see if it 1290 // matches the existing VecTy. 1291 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) { 1292 if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { 1293 // If we're storing/loading a vector of the right size, allow it as a 1294 // vector. If this the first vector we see, remember the type so that 1295 // we know the element size. 1296 if (VecTy == 0) 1297 VecTy = VInTy; 1298 return; 1299 } 1300 } else if (In == Type::getFloatTy(Context) || 1301 In == Type::getDoubleTy(Context) || 1302 (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 && 1303 isPowerOf2_32(In->getPrimitiveSizeInBits()))) { 1304 // If we're accessing something that could be an element of a vector, see 1305 // if the implied vector agrees with what we already have and if Offset is 1306 // compatible with it. 1307 unsigned EltSize = In->getPrimitiveSizeInBits()/8; 1308 if (Offset % EltSize == 0 && 1309 AllocaSize % EltSize == 0 && 1310 (VecTy == 0 || 1311 cast<VectorType>(VecTy)->getElementType() 1312 ->getPrimitiveSizeInBits()/8 == EltSize)) { 1313 if (VecTy == 0) 1314 VecTy = VectorType::get(In, AllocaSize/EltSize); 1315 return; 1316 } 1317 } 1318 } 1319 1320 // Otherwise, we have a case that we can't handle with an optimized vector 1321 // form. We can still turn this into a large integer. 1322 VecTy = Type::getVoidTy(Context); 1323} 1324 1325/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all 1326/// its accesses to use a to single vector type, return true, and set VecTy to 1327/// the new type. If we could convert the alloca into a single promotable 1328/// integer, return true but set VecTy to VoidTy. Further, if the use is not a 1329/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset 1330/// is the current offset from the base of the alloca being analyzed. 1331/// 1332/// If we see at least one access to the value that is as a vector type, set the 1333/// SawVec flag. 1334/// 1335bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, 1336 bool &SawVec, uint64_t Offset, 1337 unsigned AllocaSize) { 1338 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1339 Instruction *User = cast<Instruction>(*UI); 1340 1341 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1342 // Don't break volatile loads. 1343 if (LI->isVolatile()) 1344 return false; 1345 MergeInType(LI->getType(), Offset, VecTy, 1346 AllocaSize, *TD, V->getContext()); 1347 SawVec |= isa<VectorType>(LI->getType()); 1348 continue; 1349 } 1350 1351 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1352 // Storing the pointer, not into the value? 1353 if (SI->getOperand(0) == V || SI->isVolatile()) return 0; 1354 MergeInType(SI->getOperand(0)->getType(), Offset, 1355 VecTy, AllocaSize, *TD, V->getContext()); 1356 SawVec |= isa<VectorType>(SI->getOperand(0)->getType()); 1357 continue; 1358 } 1359 1360 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 1361 if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset, 1362 AllocaSize)) 1363 return false; 1364 IsNotTrivial = true; 1365 continue; 1366 } 1367 1368 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1369 // If this is a GEP with a variable indices, we can't handle it. 1370 if (!GEP->hasAllConstantIndices()) 1371 return false; 1372 1373 // Compute the offset that this GEP adds to the pointer. 1374 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1375 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1376 &Indices[0], Indices.size()); 1377 // See if all uses can be converted. 1378 if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset, 1379 AllocaSize)) 1380 return false; 1381 IsNotTrivial = true; 1382 continue; 1383 } 1384 1385 // If this is a constant sized memset of a constant value (e.g. 0) we can 1386 // handle it. 1387 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 1388 // Store of constant value and constant size. 1389 if (isa<ConstantInt>(MSI->getValue()) && 1390 isa<ConstantInt>(MSI->getLength())) { 1391 IsNotTrivial = true; 1392 continue; 1393 } 1394 } 1395 1396 // If this is a memcpy or memmove into or out of the whole allocation, we 1397 // can handle it like a load or store of the scalar type. 1398 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { 1399 if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength())) 1400 if (Len->getZExtValue() == AllocaSize && Offset == 0) { 1401 IsNotTrivial = true; 1402 continue; 1403 } 1404 } 1405 1406 // Ignore dbg intrinsic. 1407 if (isa<DbgInfoIntrinsic>(User)) 1408 continue; 1409 1410 // Otherwise, we cannot handle this! 1411 return false; 1412 } 1413 1414 return true; 1415} 1416 1417 1418/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1419/// directly. This happens when we are converting an "integer union" to a 1420/// single integer scalar, or when we are converting a "vector union" to a 1421/// vector with insert/extractelement instructions. 1422/// 1423/// Offset is an offset from the original alloca, in bits that need to be 1424/// shifted to the right. By the end of this, there should be no uses of Ptr. 1425void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { 1426 while (!Ptr->use_empty()) { 1427 Instruction *User = cast<Instruction>(Ptr->use_back()); 1428 1429 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1430 ConvertUsesToScalar(CI, NewAI, Offset); 1431 CI->eraseFromParent(); 1432 continue; 1433 } 1434 1435 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1436 // Compute the offset that this GEP adds to the pointer. 1437 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); 1438 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), 1439 &Indices[0], Indices.size()); 1440 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8); 1441 GEP->eraseFromParent(); 1442 continue; 1443 } 1444 1445 IRBuilder<> Builder(User->getParent(), User); 1446 1447 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1448 // The load is a bit extract from NewAI shifted right by Offset bits. 1449 Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp"); 1450 Value *NewLoadVal 1451 = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder); 1452 LI->replaceAllUsesWith(NewLoadVal); 1453 LI->eraseFromParent(); 1454 continue; 1455 } 1456 1457 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1458 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1459 // FIXME: Remove once builder has Twine API. 1460 Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").str().c_str()); 1461 Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, 1462 Builder); 1463 Builder.CreateStore(New, NewAI); 1464 SI->eraseFromParent(); 1465 continue; 1466 } 1467 1468 // If this is a constant sized memset of a constant value (e.g. 0) we can 1469 // transform it into a store of the expanded constant value. 1470 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { 1471 assert(MSI->getRawDest() == Ptr && "Consistency error!"); 1472 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 1473 if (NumBytes != 0) { 1474 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); 1475 1476 // Compute the value replicated the right number of times. 1477 APInt APVal(NumBytes*8, Val); 1478 1479 // Splat the value if non-zero. 1480 if (Val) 1481 for (unsigned i = 1; i != NumBytes; ++i) 1482 APVal |= APVal << 8; 1483 1484 // FIXME: Remove once builder has Twine API. 1485 Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").str().c_str()); 1486 Value *New = ConvertScalar_InsertValue( 1487 ConstantInt::get(User->getContext(), APVal), 1488 Old, Offset, Builder); 1489 Builder.CreateStore(New, NewAI); 1490 } 1491 MSI->eraseFromParent(); 1492 continue; 1493 } 1494 1495 // If this is a memcpy or memmove into or out of the whole allocation, we 1496 // can handle it like a load or store of the scalar type. 1497 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { 1498 assert(Offset == 0 && "must be store to start of alloca"); 1499 1500 // If the source and destination are both to the same alloca, then this is 1501 // a noop copy-to-self, just delete it. Otherwise, emit a load and store 1502 // as appropriate. 1503 AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject()); 1504 1505 if (MTI->getSource()->getUnderlyingObject() != OrigAI) { 1506 // Dest must be OrigAI, change this to be a load from the original 1507 // pointer (bitcasted), then a store to our new alloca. 1508 assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?"); 1509 Value *SrcPtr = MTI->getSource(); 1510 SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType()); 1511 1512 LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval"); 1513 SrcVal->setAlignment(MTI->getAlignment()); 1514 Builder.CreateStore(SrcVal, NewAI); 1515 } else if (MTI->getDest()->getUnderlyingObject() != OrigAI) { 1516 // Src must be OrigAI, change this to be a load from NewAI then a store 1517 // through the original dest pointer (bitcasted). 1518 assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?"); 1519 LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval"); 1520 1521 Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType()); 1522 StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr); 1523 NewStore->setAlignment(MTI->getAlignment()); 1524 } else { 1525 // Noop transfer. Src == Dst 1526 } 1527 1528 1529 MTI->eraseFromParent(); 1530 continue; 1531 } 1532 1533 // If user is a dbg info intrinsic then it is safe to remove it. 1534 if (isa<DbgInfoIntrinsic>(User)) { 1535 User->eraseFromParent(); 1536 continue; 1537 } 1538 1539 llvm_unreachable("Unsupported operation!"); 1540 } 1541} 1542 1543/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer 1544/// or vector value FromVal, extracting the bits from the offset specified by 1545/// Offset. This returns the value, which is of type ToType. 1546/// 1547/// This happens when we are converting an "integer union" to a single 1548/// integer scalar, or when we are converting a "vector union" to a vector with 1549/// insert/extractelement instructions. 1550/// 1551/// Offset is an offset from the original alloca, in bits that need to be 1552/// shifted to the right. 1553Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, 1554 uint64_t Offset, IRBuilder<> &Builder) { 1555 // If the load is of the whole new alloca, no conversion is needed. 1556 if (FromVal->getType() == ToType && Offset == 0) 1557 return FromVal; 1558 1559 // If the result alloca is a vector type, this is either an element 1560 // access or a bitcast to another vector type of the same size. 1561 if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) { 1562 if (isa<VectorType>(ToType)) 1563 return Builder.CreateBitCast(FromVal, ToType, "tmp"); 1564 1565 // Otherwise it must be an element access. 1566 unsigned Elt = 0; 1567 if (Offset) { 1568 unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType()); 1569 Elt = Offset/EltSize; 1570 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); 1571 } 1572 // Return the element extracted out of it. 1573 Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get( 1574 Type::getInt32Ty(FromVal->getContext()), Elt), "tmp"); 1575 if (V->getType() != ToType) 1576 V = Builder.CreateBitCast(V, ToType, "tmp"); 1577 return V; 1578 } 1579 1580 // If ToType is a first class aggregate, extract out each of the pieces and 1581 // use insertvalue's to form the FCA. 1582 if (const StructType *ST = dyn_cast<StructType>(ToType)) { 1583 const StructLayout &Layout = *TD->getStructLayout(ST); 1584 Value *Res = UndefValue::get(ST); 1585 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1586 Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), 1587 Offset+Layout.getElementOffsetInBits(i), 1588 Builder); 1589 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 1590 } 1591 return Res; 1592 } 1593 1594 if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) { 1595 uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType()); 1596 Value *Res = UndefValue::get(AT); 1597 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 1598 Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), 1599 Offset+i*EltSize, Builder); 1600 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp"); 1601 } 1602 return Res; 1603 } 1604 1605 // Otherwise, this must be a union that was converted to an integer value. 1606 const IntegerType *NTy = cast<IntegerType>(FromVal->getType()); 1607 1608 // If this is a big-endian system and the load is narrower than the 1609 // full alloca type, we need to do a shift to get the right bits. 1610 int ShAmt = 0; 1611 if (TD->isBigEndian()) { 1612 // On big-endian machines, the lowest bit is stored at the bit offset 1613 // from the pointer given by getTypeStoreSizeInBits. This matters for 1614 // integers with a bitwidth that is not a multiple of 8. 1615 ShAmt = TD->getTypeStoreSizeInBits(NTy) - 1616 TD->getTypeStoreSizeInBits(ToType) - Offset; 1617 } else { 1618 ShAmt = Offset; 1619 } 1620 1621 // Note: we support negative bitwidths (with shl) which are not defined. 1622 // We do this to support (f.e.) loads off the end of a structure where 1623 // only some bits are used. 1624 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1625 FromVal = Builder.CreateLShr(FromVal, 1626 ConstantInt::get(FromVal->getType(), 1627 ShAmt), "tmp"); 1628 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1629 FromVal = Builder.CreateShl(FromVal, 1630 ConstantInt::get(FromVal->getType(), 1631 -ShAmt), "tmp"); 1632 1633 // Finally, unconditionally truncate the integer to the right width. 1634 unsigned LIBitWidth = TD->getTypeSizeInBits(ToType); 1635 if (LIBitWidth < NTy->getBitWidth()) 1636 FromVal = 1637 Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(), 1638 LIBitWidth), "tmp"); 1639 else if (LIBitWidth > NTy->getBitWidth()) 1640 FromVal = 1641 Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(), 1642 LIBitWidth), "tmp"); 1643 1644 // If the result is an integer, this is a trunc or bitcast. 1645 if (isa<IntegerType>(ToType)) { 1646 // Should be done. 1647 } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) { 1648 // Just do a bitcast, we know the sizes match up. 1649 FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp"); 1650 } else { 1651 // Otherwise must be a pointer. 1652 FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp"); 1653 } 1654 assert(FromVal->getType() == ToType && "Didn't convert right?"); 1655 return FromVal; 1656} 1657 1658 1659/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer 1660/// or vector value "Old" at the offset specified by Offset. 1661/// 1662/// This happens when we are converting an "integer union" to a 1663/// single integer scalar, or when we are converting a "vector union" to a 1664/// vector with insert/extractelement instructions. 1665/// 1666/// Offset is an offset from the original alloca, in bits that need to be 1667/// shifted to the right. 1668Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, 1669 uint64_t Offset, IRBuilder<> &Builder) { 1670 1671 // Convert the stored type to the actual type, shift it left to insert 1672 // then 'or' into place. 1673 const Type *AllocaType = Old->getType(); 1674 LLVMContext &Context = Old->getContext(); 1675 1676 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { 1677 uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy); 1678 uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType()); 1679 1680 // Changing the whole vector with memset or with an access of a different 1681 // vector type? 1682 if (ValSize == VecSize) 1683 return Builder.CreateBitCast(SV, AllocaType, "tmp"); 1684 1685 uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType()); 1686 1687 // Must be an element insertion. 1688 unsigned Elt = Offset/EltSize; 1689 1690 if (SV->getType() != VTy->getElementType()) 1691 SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp"); 1692 1693 SV = Builder.CreateInsertElement(Old, SV, 1694 ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt), 1695 "tmp"); 1696 return SV; 1697 } 1698 1699 // If SV is a first-class aggregate value, insert each value recursively. 1700 if (const StructType *ST = dyn_cast<StructType>(SV->getType())) { 1701 const StructLayout &Layout = *TD->getStructLayout(ST); 1702 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1703 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1704 Old = ConvertScalar_InsertValue(Elt, Old, 1705 Offset+Layout.getElementOffsetInBits(i), 1706 Builder); 1707 } 1708 return Old; 1709 } 1710 1711 if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { 1712 uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType()); 1713 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 1714 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp"); 1715 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder); 1716 } 1717 return Old; 1718 } 1719 1720 // If SV is a float, convert it to the appropriate integer type. 1721 // If it is a pointer, do the same. 1722 unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType()); 1723 unsigned DestWidth = TD->getTypeSizeInBits(AllocaType); 1724 unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType()); 1725 unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType); 1726 if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType())) 1727 SV = Builder.CreateBitCast(SV, 1728 IntegerType::get(SV->getContext(),SrcWidth), "tmp"); 1729 else if (isa<PointerType>(SV->getType())) 1730 SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(SV->getContext()), "tmp"); 1731 1732 // Zero extend or truncate the value if needed. 1733 if (SV->getType() != AllocaType) { 1734 if (SV->getType()->getPrimitiveSizeInBits() < 1735 AllocaType->getPrimitiveSizeInBits()) 1736 SV = Builder.CreateZExt(SV, AllocaType, "tmp"); 1737 else { 1738 // Truncation may be needed if storing more than the alloca can hold 1739 // (undefined behavior). 1740 SV = Builder.CreateTrunc(SV, AllocaType, "tmp"); 1741 SrcWidth = DestWidth; 1742 SrcStoreWidth = DestStoreWidth; 1743 } 1744 } 1745 1746 // If this is a big-endian system and the store is narrower than the 1747 // full alloca type, we need to do a shift to get the right bits. 1748 int ShAmt = 0; 1749 if (TD->isBigEndian()) { 1750 // On big-endian machines, the lowest bit is stored at the bit offset 1751 // from the pointer given by getTypeStoreSizeInBits. This matters for 1752 // integers with a bitwidth that is not a multiple of 8. 1753 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 1754 } else { 1755 ShAmt = Offset; 1756 } 1757 1758 // Note: we support negative bitwidths (with shr) which are not defined. 1759 // We do this to support (f.e.) stores off the end of a structure where 1760 // only some bits in the structure are set. 1761 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1762 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1763 SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), 1764 ShAmt), "tmp"); 1765 Mask <<= ShAmt; 1766 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1767 SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), 1768 -ShAmt), "tmp"); 1769 Mask = Mask.lshr(-ShAmt); 1770 } 1771 1772 // Mask out the bits we are about to insert from the old value, and or 1773 // in the new bits. 1774 if (SrcWidth != DestWidth) { 1775 assert(DestWidth > SrcWidth); 1776 Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask"); 1777 SV = Builder.CreateOr(Old, SV, "ins"); 1778 } 1779 return SV; 1780} 1781 1782 1783 1784/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1785/// some part of a constant global variable. This intentionally only accepts 1786/// constant expressions because we don't can't rewrite arbitrary instructions. 1787static bool PointsToConstantGlobal(Value *V) { 1788 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1789 return GV->isConstant(); 1790 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1791 if (CE->getOpcode() == Instruction::BitCast || 1792 CE->getOpcode() == Instruction::GetElementPtr) 1793 return PointsToConstantGlobal(CE->getOperand(0)); 1794 return false; 1795} 1796 1797/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1798/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1799/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1800/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1801/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1802/// the alloca, and if the source pointer is a pointer to a constant global, we 1803/// can optimize this. 1804static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1805 bool isOffset) { 1806 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1807 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 1808 // Ignore non-volatile loads, they are always ok. 1809 if (!LI->isVolatile()) 1810 continue; 1811 1812 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1813 // If uses of the bitcast are ok, we are ok. 1814 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1815 return false; 1816 continue; 1817 } 1818 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1819 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1820 // doesn't, it does. 1821 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1822 isOffset || !GEP->hasAllZeroIndices())) 1823 return false; 1824 continue; 1825 } 1826 1827 // If this is isn't our memcpy/memmove, reject it as something we can't 1828 // handle. 1829 if (!isa<MemTransferInst>(*UI)) 1830 return false; 1831 1832 // If we already have seen a copy, reject the second one. 1833 if (TheCopy) return false; 1834 1835 // If the pointer has been offset from the start of the alloca, we can't 1836 // safely handle this. 1837 if (isOffset) return false; 1838 1839 // If the memintrinsic isn't using the alloca as the dest, reject it. 1840 if (UI.getOperandNo() != 1) return false; 1841 1842 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1843 1844 // If the source of the memcpy/move is not a constant global, reject it. 1845 if (!PointsToConstantGlobal(MI->getOperand(2))) 1846 return false; 1847 1848 // Otherwise, the transform is safe. Remember the copy instruction. 1849 TheCopy = MI; 1850 } 1851 return true; 1852} 1853 1854/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1855/// modified by a copy from a constant global. If we can prove this, we can 1856/// replace any uses of the alloca with uses of the global directly. 1857Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1858 Instruction *TheCopy = 0; 1859 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1860 return TheCopy; 1861 return 0; 1862} 1863