ScalarReplAggregates.cpp revision 7139406707eb3869183fd6a3329fe4a77d309692
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/Compiler.h" 38#include "llvm/ADT/SmallVector.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/ADT/StringExtras.h" 41using namespace llvm; 42 43STATISTIC(NumReplaced, "Number of allocas broken up"); 44STATISTIC(NumPromoted, "Number of allocas promoted"); 45STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 46STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 47 48namespace { 49 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 50 static char ID; // Pass identification, replacement for typeid 51 SROA() : FunctionPass((intptr_t)&ID) {} 52 53 bool runOnFunction(Function &F); 54 55 bool performScalarRepl(Function &F); 56 bool performPromotion(Function &F); 57 58 // getAnalysisUsage - This pass does not require any passes, but we know it 59 // will not alter the CFG, so say so. 60 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.addRequired<ETForest>(); 62 AU.addRequired<DominanceFrontier>(); 63 AU.addRequired<TargetData>(); 64 AU.setPreservesCFG(); 65 } 66 67 private: 68 int isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI); 69 int isSafeUseOfAllocation(Instruction *User, AllocationInst *AI); 70 bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI); 71 bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI); 72 int isSafeAllocaToScalarRepl(AllocationInst *AI); 73 void DoScalarReplacement(AllocationInst *AI, 74 std::vector<AllocationInst*> &WorkList); 75 void CanonicalizeAllocaUsers(AllocationInst *AI); 76 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 77 78 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 79 SmallVector<AllocaInst*, 32> &NewElts); 80 81 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 82 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 83 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 84 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 85 }; 86 87 char SROA::ID = 0; 88 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 89} 90 91// Public interface to the ScalarReplAggregates pass 92FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); } 93 94 95bool SROA::runOnFunction(Function &F) { 96 bool Changed = performPromotion(F); 97 while (1) { 98 bool LocalChange = performScalarRepl(F); 99 if (!LocalChange) break; // No need to repromote if no scalarrepl 100 Changed = true; 101 LocalChange = performPromotion(F); 102 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 103 } 104 105 return Changed; 106} 107 108 109bool SROA::performPromotion(Function &F) { 110 std::vector<AllocaInst*> Allocas; 111 ETForest &ET = getAnalysis<ETForest>(); 112 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 113 114 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 115 116 bool Changed = false; 117 118 while (1) { 119 Allocas.clear(); 120 121 // Find allocas that are safe to promote, by looking at all instructions in 122 // the entry node 123 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 124 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 125 if (isAllocaPromotable(AI)) 126 Allocas.push_back(AI); 127 128 if (Allocas.empty()) break; 129 130 PromoteMemToReg(Allocas, ET, DF); 131 NumPromoted += Allocas.size(); 132 Changed = true; 133 } 134 135 return Changed; 136} 137 138// performScalarRepl - This algorithm is a simple worklist driven algorithm, 139// which runs on all of the malloc/alloca instructions in the function, removing 140// them if they are only used by getelementptr instructions. 141// 142bool SROA::performScalarRepl(Function &F) { 143 std::vector<AllocationInst*> WorkList; 144 145 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 146 BasicBlock &BB = F.getEntryBlock(); 147 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 148 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 149 WorkList.push_back(A); 150 151 const TargetData &TD = getAnalysis<TargetData>(); 152 153 // Process the worklist 154 bool Changed = false; 155 while (!WorkList.empty()) { 156 AllocationInst *AI = WorkList.back(); 157 WorkList.pop_back(); 158 159 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 160 // with unused elements. 161 if (AI->use_empty()) { 162 AI->eraseFromParent(); 163 continue; 164 } 165 166 // If we can turn this aggregate value (potentially with casts) into a 167 // simple scalar value that can be mem2reg'd into a register value. 168 bool IsNotTrivial = false; 169 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 170 if (IsNotTrivial && ActualType != Type::VoidTy) { 171 ConvertToScalar(AI, ActualType); 172 Changed = true; 173 continue; 174 } 175 176 // Check to see if we can perform the core SROA transformation. We cannot 177 // transform the allocation instruction if it is an array allocation 178 // (allocations OF arrays are ok though), and an allocation of a scalar 179 // value cannot be decomposed at all. 180 if (!AI->isArrayAllocation() && 181 (isa<StructType>(AI->getAllocatedType()) || 182 isa<ArrayType>(AI->getAllocatedType())) && 183 AI->getAllocatedType()->isSized() && 184 TD.getTypeSize(AI->getAllocatedType()) < 128) { 185 // Check that all of the users of the allocation are capable of being 186 // transformed. 187 switch (isSafeAllocaToScalarRepl(AI)) { 188 default: assert(0 && "Unexpected value!"); 189 case 0: // Not safe to scalar replace. 190 break; 191 case 1: // Safe, but requires cleanup/canonicalizations first 192 CanonicalizeAllocaUsers(AI); 193 // FALL THROUGH. 194 case 3: // Safe to scalar replace. 195 DoScalarReplacement(AI, WorkList); 196 Changed = true; 197 continue; 198 } 199 } 200 201 // Check to see if this allocation is only modified by a memcpy/memmove from 202 // a constant global. If this is the case, we can change all users to use 203 // the constant global instead. This is commonly produced by the CFE by 204 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 205 // is only subsequently read. 206 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 207 DOUT << "Found alloca equal to global: " << *AI; 208 DOUT << " memcpy = " << *TheCopy; 209 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 210 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 211 TheCopy->eraseFromParent(); // Don't mutate the global. 212 AI->eraseFromParent(); 213 ++NumGlobals; 214 Changed = true; 215 continue; 216 } 217 218 // Otherwise, couldn't process this. 219 } 220 221 return Changed; 222} 223 224/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 225/// predicate, do SROA now. 226void SROA::DoScalarReplacement(AllocationInst *AI, 227 std::vector<AllocationInst*> &WorkList) { 228 DOUT << "Found inst to SROA: " << *AI; 229 SmallVector<AllocaInst*, 32> ElementAllocas; 230 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 231 ElementAllocas.reserve(ST->getNumContainedTypes()); 232 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 233 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 234 AI->getAlignment(), 235 AI->getName() + "." + utostr(i), AI); 236 ElementAllocas.push_back(NA); 237 WorkList.push_back(NA); // Add to worklist for recursive processing 238 } 239 } else { 240 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 241 ElementAllocas.reserve(AT->getNumElements()); 242 const Type *ElTy = AT->getElementType(); 243 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 244 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 245 AI->getName() + "." + utostr(i), AI); 246 ElementAllocas.push_back(NA); 247 WorkList.push_back(NA); // Add to worklist for recursive processing 248 } 249 } 250 251 // Now that we have created the alloca instructions that we want to use, 252 // expand the getelementptr instructions to use them. 253 // 254 while (!AI->use_empty()) { 255 Instruction *User = cast<Instruction>(AI->use_back()); 256 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 257 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 258 BCInst->eraseFromParent(); 259 continue; 260 } 261 262 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 263 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 264 unsigned Idx = 265 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 266 267 assert(Idx < ElementAllocas.size() && "Index out of range?"); 268 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 269 270 Value *RepValue; 271 if (GEPI->getNumOperands() == 3) { 272 // Do not insert a new getelementptr instruction with zero indices, only 273 // to have it optimized out later. 274 RepValue = AllocaToUse; 275 } else { 276 // We are indexing deeply into the structure, so we still need a 277 // getelement ptr instruction to finish the indexing. This may be 278 // expanded itself once the worklist is rerun. 279 // 280 SmallVector<Value*, 8> NewArgs; 281 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 282 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 283 RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0], 284 NewArgs.size(), "", GEPI); 285 RepValue->takeName(GEPI); 286 } 287 288 // If this GEP is to the start of the aggregate, check for memcpys. 289 if (Idx == 0) { 290 bool IsStartOfAggregateGEP = true; 291 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) { 292 if (!isa<ConstantInt>(GEPI->getOperand(i))) { 293 IsStartOfAggregateGEP = false; 294 break; 295 } 296 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) { 297 IsStartOfAggregateGEP = false; 298 break; 299 } 300 } 301 302 if (IsStartOfAggregateGEP) 303 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 304 } 305 306 307 // Move all of the users over to the new GEP. 308 GEPI->replaceAllUsesWith(RepValue); 309 // Delete the old GEP 310 GEPI->eraseFromParent(); 311 } 312 313 // Finally, delete the Alloca instruction 314 AI->eraseFromParent(); 315 NumReplaced++; 316} 317 318 319/// isSafeElementUse - Check to see if this use is an allowed use for a 320/// getelementptr instruction of an array aggregate allocation. isFirstElt 321/// indicates whether Ptr is known to the start of the aggregate. 322/// 323int SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI) { 324 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 325 I != E; ++I) { 326 Instruction *User = cast<Instruction>(*I); 327 switch (User->getOpcode()) { 328 case Instruction::Load: break; 329 case Instruction::Store: 330 // Store is ok if storing INTO the pointer, not storing the pointer 331 if (User->getOperand(0) == Ptr) return 0; 332 break; 333 case Instruction::GetElementPtr: { 334 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 335 bool AreAllZeroIndices = isFirstElt; 336 if (GEP->getNumOperands() > 1) { 337 if (!isa<ConstantInt>(GEP->getOperand(1)) || 338 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 339 return 0; // Using pointer arithmetic to navigate the array. 340 341 if (AreAllZeroIndices) { 342 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) { 343 if (!isa<ConstantInt>(GEP->getOperand(i)) || 344 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) { 345 AreAllZeroIndices = false; 346 break; 347 } 348 } 349 } 350 } 351 if (!isSafeElementUse(GEP, AreAllZeroIndices, AI)) return 0; 352 break; 353 } 354 case Instruction::BitCast: 355 if (isFirstElt && 356 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI)) 357 break; 358 DOUT << " Transformation preventing inst: " << *User; 359 return 0; 360 case Instruction::Call: 361 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 362 if (isFirstElt && isSafeMemIntrinsicOnAllocation(MI, AI)) 363 break; 364 } 365 DOUT << " Transformation preventing inst: " << *User; 366 return 0; 367 default: 368 DOUT << " Transformation preventing inst: " << *User; 369 return 0; 370 } 371 } 372 return 3; // All users look ok :) 373} 374 375/// AllUsersAreLoads - Return true if all users of this value are loads. 376static bool AllUsersAreLoads(Value *Ptr) { 377 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 378 I != E; ++I) 379 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 380 return false; 381 return true; 382} 383 384/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 385/// aggregate allocation. 386/// 387int SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI) { 388 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 389 return isSafeUseOfBitCastedAllocation(C, AI) ? 3 : 0; 390 if (!isa<GetElementPtrInst>(User)) return 0; 391 392 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 393 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 394 395 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 396 if (I == E || 397 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) 398 return 0; 399 400 ++I; 401 if (I == E) return 0; // ran out of GEP indices?? 402 403 bool IsAllZeroIndices = true; 404 405 // If this is a use of an array allocation, do a bit more checking for sanity. 406 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 407 uint64_t NumElements = AT->getNumElements(); 408 409 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) { 410 IsAllZeroIndices &= Idx->isZero(); 411 412 // Check to make sure that index falls within the array. If not, 413 // something funny is going on, so we won't do the optimization. 414 // 415 if (Idx->getZExtValue() >= NumElements) 416 return 0; 417 418 // We cannot scalar repl this level of the array unless any array 419 // sub-indices are in-range constants. In particular, consider: 420 // A[0][i]. We cannot know that the user isn't doing invalid things like 421 // allowing i to index an out-of-range subscript that accesses A[1]. 422 // 423 // Scalar replacing *just* the outer index of the array is probably not 424 // going to be a win anyway, so just give up. 425 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) { 426 uint64_t NumElements; 427 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I)) 428 NumElements = SubArrayTy->getNumElements(); 429 else 430 NumElements = cast<VectorType>(*I)->getNumElements(); 431 432 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 433 if (!IdxVal) return 0; 434 if (IdxVal->getZExtValue() >= NumElements) 435 return 0; 436 IsAllZeroIndices &= IdxVal->isZero(); 437 } 438 439 } else { 440 IsAllZeroIndices = 0; 441 442 // If this is an array index and the index is not constant, we cannot 443 // promote... that is unless the array has exactly one or two elements in 444 // it, in which case we CAN promote it, but we have to canonicalize this 445 // out if this is the only problem. 446 if ((NumElements == 1 || NumElements == 2) && 447 AllUsersAreLoads(GEPI)) 448 return 1; // Canonicalization required! 449 return 0; 450 } 451 } 452 453 // If there are any non-simple uses of this getelementptr, make sure to reject 454 // them. 455 return isSafeElementUse(GEPI, IsAllZeroIndices, AI); 456} 457 458/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 459/// intrinsic can be promoted by SROA. At this point, we know that the operand 460/// of the memintrinsic is a pointer to the beginning of the allocation. 461bool SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI){ 462 // If not constant length, give up. 463 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 464 if (!Length) return false; 465 466 // If not the whole aggregate, give up. 467 const TargetData &TD = getAnalysis<TargetData>(); 468 if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType())) 469 return false; 470 471 // We only know about memcpy/memset/memmove. 472 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 473 return false; 474 // Otherwise, we can transform it. 475 return true; 476} 477 478/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 479/// are 480bool SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI) { 481 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 482 UI != E; ++UI) { 483 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 484 if (!isSafeUseOfBitCastedAllocation(BCU, AI)) 485 return false; 486 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 487 if (!isSafeMemIntrinsicOnAllocation(MI, AI)) 488 return false; 489 } else { 490 return false; 491 } 492 } 493 return true; 494} 495 496/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 497/// to its first element. Transform users of the cast to use the new values 498/// instead. 499void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 500 SmallVector<AllocaInst*, 32> &NewElts) { 501 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 502 const TargetData &TD = getAnalysis<TargetData>(); 503 504 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 505 while (UI != UE) { 506 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) { 507 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 508 ++UI; 509 BCU->eraseFromParent(); 510 continue; 511 } 512 513 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split 514 // into one per element. 515 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI); 516 517 // If it's not a mem intrinsic, it must be some other user of a gep of the 518 // first pointer. Just leave these alone. 519 if (!MI) { 520 ++UI; 521 continue; 522 } 523 524 // If this is a memcpy/memmove, construct the other pointer as the 525 // appropriate type. 526 Value *OtherPtr = 0; 527 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 528 if (BCInst == MCI->getRawDest()) 529 OtherPtr = MCI->getRawSource(); 530 else { 531 assert(BCInst == MCI->getRawSource()); 532 OtherPtr = MCI->getRawDest(); 533 } 534 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 535 if (BCInst == MMI->getRawDest()) 536 OtherPtr = MMI->getRawSource(); 537 else { 538 assert(BCInst == MMI->getRawSource()); 539 OtherPtr = MMI->getRawDest(); 540 } 541 } 542 543 // If there is an other pointer, we want to convert it to the same pointer 544 // type as AI has, so we can GEP through it. 545 if (OtherPtr) { 546 // It is likely that OtherPtr is a bitcast, if so, remove it. 547 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 548 OtherPtr = BC->getOperand(0); 549 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 550 if (BCE->getOpcode() == Instruction::BitCast) 551 OtherPtr = BCE->getOperand(0); 552 553 // If the pointer is not the right type, insert a bitcast to the right 554 // type. 555 if (OtherPtr->getType() != AI->getType()) 556 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 557 MI); 558 } 559 560 // Process each element of the aggregate. 561 Value *TheFn = MI->getOperand(0); 562 const Type *BytePtrTy = MI->getRawDest()->getType(); 563 bool SROADest = MI->getRawDest() == BCInst; 564 565 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 566 // If this is a memcpy/memmove, emit a GEP of the other element address. 567 Value *OtherElt = 0; 568 if (OtherPtr) { 569 OtherElt = new GetElementPtrInst(OtherPtr, Zero, 570 ConstantInt::get(Type::Int32Ty, i), 571 OtherPtr->getNameStr()+"."+utostr(i), 572 MI); 573 } 574 575 Value *EltPtr = NewElts[i]; 576 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 577 578 // If we got down to a scalar, insert a load or store as appropriate. 579 if (EltTy->isFirstClassType()) { 580 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 581 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 582 MI); 583 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 584 continue; 585 } else { 586 assert(isa<MemSetInst>(MI)); 587 588 // If the stored element is zero (common case), just store a null 589 // constant. 590 Constant *StoreVal; 591 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 592 if (CI->isZero()) { 593 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 594 } else { 595 // If EltTy is a packed type, get the element type. 596 const Type *ValTy = EltTy; 597 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 598 ValTy = VTy->getElementType(); 599 600 // Construct an integer with the right value. 601 unsigned EltSize = TD.getTypeSize(ValTy); 602 APInt OneVal(EltSize*8, CI->getZExtValue()); 603 APInt TotalVal(OneVal); 604 // Set each byte. 605 for (unsigned i = 0; i != EltSize-1; ++i) { 606 TotalVal = TotalVal.shl(8); 607 TotalVal |= OneVal; 608 } 609 610 // Convert the integer value to the appropriate type. 611 StoreVal = ConstantInt::get(TotalVal); 612 if (isa<PointerType>(ValTy)) 613 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 614 else if (ValTy->isFloatingPoint()) 615 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 616 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 617 618 // If the requested value was a vector constant, create it. 619 if (EltTy != ValTy) { 620 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 621 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 622 StoreVal = ConstantVector::get(&Elts[0], NumElts); 623 } 624 } 625 new StoreInst(StoreVal, EltPtr, MI); 626 continue; 627 } 628 // Otherwise, if we're storing a byte variable, use a memset call for 629 // this element. 630 } 631 } 632 633 // Cast the element pointer to BytePtrTy. 634 if (EltPtr->getType() != BytePtrTy) 635 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 636 637 // Cast the other pointer (if we have one) to BytePtrTy. 638 if (OtherElt && OtherElt->getType() != BytePtrTy) 639 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 640 MI); 641 642 unsigned EltSize = TD.getTypeSize(EltTy); 643 644 // Finally, insert the meminst for this element. 645 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 646 Value *Ops[] = { 647 SROADest ? EltPtr : OtherElt, // Dest ptr 648 SROADest ? OtherElt : EltPtr, // Src ptr 649 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 650 Zero // Align 651 }; 652 new CallInst(TheFn, Ops, 4, "", MI); 653 } else { 654 assert(isa<MemSetInst>(MI)); 655 Value *Ops[] = { 656 EltPtr, MI->getOperand(2), // Dest, Value, 657 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 658 Zero // Align 659 }; 660 new CallInst(TheFn, Ops, 4, "", MI); 661 } 662 } 663 664 // Finally, MI is now dead, as we've modified its actions to occur on all of 665 // the elements of the aggregate. 666 ++UI; 667 MI->eraseFromParent(); 668 } 669} 670 671 672/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 673/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 674/// or 1 if safe after canonicalization has been performed. 675/// 676int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 677 // Loop over the use list of the alloca. We can only transform it if all of 678 // the users are safe to transform. 679 // 680 int isSafe = 3; 681 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 682 I != E; ++I) { 683 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I), AI); 684 if (isSafe == 0) { 685 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 686 return 0; 687 } 688 } 689 // If we require cleanup, isSafe is now 1, otherwise it is 3. 690 return isSafe; 691} 692 693/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 694/// allocation, but only if cleaned up, perform the cleanups required. 695void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 696 // At this point, we know that the end result will be SROA'd and promoted, so 697 // we can insert ugly code if required so long as sroa+mem2reg will clean it 698 // up. 699 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 700 UI != E; ) { 701 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++); 702 if (!GEPI) continue; 703 gep_type_iterator I = gep_type_begin(GEPI); 704 ++I; 705 706 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 707 uint64_t NumElements = AT->getNumElements(); 708 709 if (!isa<ConstantInt>(I.getOperand())) { 710 if (NumElements == 1) { 711 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 712 } else { 713 assert(NumElements == 2 && "Unhandled case!"); 714 // All users of the GEP must be loads. At each use of the GEP, insert 715 // two loads of the appropriate indexed GEP and select between them. 716 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 717 Constant::getNullValue(I.getOperand()->getType()), 718 "isone", GEPI); 719 // Insert the new GEP instructions, which are properly indexed. 720 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 721 Indices[1] = Constant::getNullValue(Type::Int32Ty); 722 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), 723 &Indices[0], Indices.size(), 724 GEPI->getName()+".0", GEPI); 725 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 726 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), 727 &Indices[0], Indices.size(), 728 GEPI->getName()+".1", GEPI); 729 // Replace all loads of the variable index GEP with loads from both 730 // indexes and a select. 731 while (!GEPI->use_empty()) { 732 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 733 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 734 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 735 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI); 736 LI->replaceAllUsesWith(R); 737 LI->eraseFromParent(); 738 } 739 GEPI->eraseFromParent(); 740 } 741 } 742 } 743 } 744} 745 746/// MergeInType - Add the 'In' type to the accumulated type so far. If the 747/// types are incompatible, return true, otherwise update Accum and return 748/// false. 749/// 750/// There are three cases we handle here: 751/// 1) An effectively-integer union, where the pieces are stored into as 752/// smaller integers (common with byte swap and other idioms). 753/// 2) A union of vector types of the same size and potentially its elements. 754/// Here we turn element accesses into insert/extract element operations. 755/// 3) A union of scalar types, such as int/float or int/pointer. Here we 756/// merge together into integers, allowing the xform to work with #1 as 757/// well. 758static bool MergeInType(const Type *In, const Type *&Accum, 759 const TargetData &TD) { 760 // If this is our first type, just use it. 761 const VectorType *PTy; 762 if (Accum == Type::VoidTy || In == Accum) { 763 Accum = In; 764 } else if (In == Type::VoidTy) { 765 // Noop. 766 } else if (In->isInteger() && Accum->isInteger()) { // integer union. 767 // Otherwise pick whichever type is larger. 768 if (cast<IntegerType>(In)->getBitWidth() > 769 cast<IntegerType>(Accum)->getBitWidth()) 770 Accum = In; 771 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) { 772 // Pointer unions just stay as one of the pointers. 773 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) { 774 if ((PTy = dyn_cast<VectorType>(Accum)) && 775 PTy->getElementType() == In) { 776 // Accum is a vector, and we are accessing an element: ok. 777 } else if ((PTy = dyn_cast<VectorType>(In)) && 778 PTy->getElementType() == Accum) { 779 // In is a vector, and accum is an element: ok, remember In. 780 Accum = In; 781 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) && 782 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) { 783 // Two vectors of the same size: keep Accum. 784 } else { 785 // Cannot insert an short into a <4 x int> or handle 786 // <2 x int> -> <4 x int> 787 return true; 788 } 789 } else { 790 // Pointer/FP/Integer unions merge together as integers. 791 switch (Accum->getTypeID()) { 792 case Type::PointerTyID: Accum = TD.getIntPtrType(); break; 793 case Type::FloatTyID: Accum = Type::Int32Ty; break; 794 case Type::DoubleTyID: Accum = Type::Int64Ty; break; 795 default: 796 assert(Accum->isInteger() && "Unknown FP type!"); 797 break; 798 } 799 800 switch (In->getTypeID()) { 801 case Type::PointerTyID: In = TD.getIntPtrType(); break; 802 case Type::FloatTyID: In = Type::Int32Ty; break; 803 case Type::DoubleTyID: In = Type::Int64Ty; break; 804 default: 805 assert(In->isInteger() && "Unknown FP type!"); 806 break; 807 } 808 return MergeInType(In, Accum, TD); 809 } 810 return false; 811} 812 813/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least 814/// as big as the specified type. If there is no suitable type, this returns 815/// null. 816const Type *getUIntAtLeastAsBitAs(unsigned NumBits) { 817 if (NumBits > 64) return 0; 818 if (NumBits > 32) return Type::Int64Ty; 819 if (NumBits > 16) return Type::Int32Ty; 820 if (NumBits > 8) return Type::Int16Ty; 821 return Type::Int8Ty; 822} 823 824/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 825/// single scalar integer type, return that type. Further, if the use is not 826/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 827/// there are no uses of this pointer, return Type::VoidTy to differentiate from 828/// failure. 829/// 830const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 831 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 832 const TargetData &TD = getAnalysis<TargetData>(); 833 const PointerType *PTy = cast<PointerType>(V->getType()); 834 835 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 836 Instruction *User = cast<Instruction>(*UI); 837 838 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 839 if (MergeInType(LI->getType(), UsedType, TD)) 840 return 0; 841 842 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 843 // Storing the pointer, not into the value? 844 if (SI->getOperand(0) == V) return 0; 845 846 // NOTE: We could handle storing of FP imms into integers here! 847 848 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD)) 849 return 0; 850 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 851 IsNotTrivial = true; 852 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 853 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0; 854 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 855 // Check to see if this is stepping over an element: GEP Ptr, int C 856 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 857 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 858 unsigned ElSize = TD.getTypeSize(PTy->getElementType()); 859 unsigned BitOffset = Idx*ElSize*8; 860 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 861 862 IsNotTrivial = true; 863 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 864 if (SubElt == 0) return 0; 865 if (SubElt != Type::VoidTy && SubElt->isInteger()) { 866 const Type *NewTy = 867 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset); 868 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0; 869 continue; 870 } 871 } else if (GEP->getNumOperands() == 3 && 872 isa<ConstantInt>(GEP->getOperand(1)) && 873 isa<ConstantInt>(GEP->getOperand(2)) && 874 cast<ConstantInt>(GEP->getOperand(1))->isZero()) { 875 // We are stepping into an element, e.g. a structure or an array: 876 // GEP Ptr, int 0, uint C 877 const Type *AggTy = PTy->getElementType(); 878 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 879 880 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 881 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 882 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) { 883 // Getting an element of the packed vector. 884 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range. 885 886 // Merge in the vector type. 887 if (MergeInType(VectorTy, UsedType, TD)) return 0; 888 889 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 890 if (SubTy == 0) return 0; 891 892 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 893 return 0; 894 895 // We'll need to change this to an insert/extract element operation. 896 IsNotTrivial = true; 897 continue; // Everything looks ok 898 899 } else if (isa<StructType>(AggTy)) { 900 // Structs are always ok. 901 } else { 902 return 0; 903 } 904 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8); 905 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0; 906 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 907 if (SubTy == 0) return 0; 908 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 909 return 0; 910 continue; // Everything looks ok 911 } 912 return 0; 913 } else { 914 // Cannot handle this! 915 return 0; 916 } 917 } 918 919 return UsedType; 920} 921 922/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 923/// predicate and is non-trivial. Convert it to something that can be trivially 924/// promoted into a register by mem2reg. 925void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 926 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = " 927 << *ActualTy << "\n"; 928 ++NumConverted; 929 930 BasicBlock *EntryBlock = AI->getParent(); 931 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() && 932 "Not in the entry block!"); 933 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 934 935 // Create and insert the alloca. 936 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(), 937 EntryBlock->begin()); 938 ConvertUsesToScalar(AI, NewAI, 0); 939 delete AI; 940} 941 942 943/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 944/// directly. This happens when we are converting an "integer union" to a 945/// single integer scalar, or when we are converting a "vector union" to a 946/// vector with insert/extractelement instructions. 947/// 948/// Offset is an offset from the original alloca, in bits that need to be 949/// shifted to the right. By the end of this, there should be no uses of Ptr. 950void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 951 const TargetData &TD = getAnalysis<TargetData>(); 952 while (!Ptr->use_empty()) { 953 Instruction *User = cast<Instruction>(Ptr->use_back()); 954 955 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 956 // The load is a bit extract from NewAI shifted right by Offset bits. 957 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 958 if (NV->getType() == LI->getType()) { 959 // We win, no conversion needed. 960 } else if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) { 961 // If the result alloca is a vector type, this is either an element 962 // access or a bitcast to another vector type. 963 if (isa<VectorType>(LI->getType())) { 964 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 965 } else { 966 // Must be an element access. 967 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 968 NV = new ExtractElementInst( 969 NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI); 970 } 971 } else if (isa<PointerType>(NV->getType())) { 972 assert(isa<PointerType>(LI->getType())); 973 // Must be ptr->ptr cast. Anything else would result in NV being 974 // an integer. 975 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 976 } else { 977 const IntegerType *NTy = cast<IntegerType>(NV->getType()); 978 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType()); 979 980 // If this is a big-endian system and the load is narrower than the 981 // full alloca type, we need to do a shift to get the right bits. 982 int ShAmt = 0; 983 if (TD.isBigEndian()) { 984 ShAmt = NTy->getBitWidth()-LIBitWidth-Offset; 985 } else { 986 ShAmt = Offset; 987 } 988 989 // Note: we support negative bitwidths (with shl) which are not defined. 990 // We do this to support (f.e.) loads off the end of a structure where 991 // only some bits are used. 992 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 993 NV = BinaryOperator::createLShr(NV, 994 ConstantInt::get(NV->getType(),ShAmt), 995 LI->getName(), LI); 996 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 997 NV = BinaryOperator::createShl(NV, 998 ConstantInt::get(NV->getType(),-ShAmt), 999 LI->getName(), LI); 1000 1001 // Finally, unconditionally truncate the integer to the right width. 1002 if (LIBitWidth < NTy->getBitWidth()) 1003 NV = new TruncInst(NV, IntegerType::get(LIBitWidth), 1004 LI->getName(), LI); 1005 1006 // If the result is an integer, this is a trunc or bitcast. 1007 if (isa<IntegerType>(LI->getType())) { 1008 assert(NV->getType() == LI->getType() && "Truncate wasn't enough?"); 1009 } else if (LI->getType()->isFloatingPoint()) { 1010 // Just do a bitcast, we know the sizes match up. 1011 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1012 } else { 1013 // Otherwise must be a pointer. 1014 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI); 1015 } 1016 } 1017 LI->replaceAllUsesWith(NV); 1018 LI->eraseFromParent(); 1019 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1020 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1021 1022 // Convert the stored type to the actual type, shift it left to insert 1023 // then 'or' into place. 1024 Value *SV = SI->getOperand(0); 1025 const Type *AllocaType = NewAI->getType()->getElementType(); 1026 if (SV->getType() == AllocaType) { 1027 // All is well. 1028 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) { 1029 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1030 1031 // If the result alloca is a vector type, this is either an element 1032 // access or a bitcast to another vector type. 1033 if (isa<VectorType>(SV->getType())) { 1034 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1035 } else { 1036 // Must be an element insertion. 1037 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 1038 SV = new InsertElementInst(Old, SV, 1039 ConstantInt::get(Type::Int32Ty, Elt), 1040 "tmp", SI); 1041 } 1042 } else if (isa<PointerType>(AllocaType)) { 1043 // If the alloca type is a pointer, then all the elements must be 1044 // pointers. 1045 if (SV->getType() != AllocaType) 1046 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1047 } else { 1048 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1049 1050 // If SV is a float, convert it to the appropriate integer type. 1051 // If it is a pointer, do the same, and also handle ptr->ptr casts 1052 // here. 1053 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType()); 1054 unsigned DestWidth = AllocaType->getPrimitiveSizeInBits(); 1055 if (SV->getType()->isFloatingPoint()) 1056 SV = new BitCastInst(SV, IntegerType::get(SrcWidth), 1057 SV->getName(), SI); 1058 else if (isa<PointerType>(SV->getType())) 1059 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI); 1060 1061 // Always zero extend the value if needed. 1062 if (SV->getType() != AllocaType) 1063 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI); 1064 1065 // If this is a big-endian system and the store is narrower than the 1066 // full alloca type, we need to do a shift to get the right bits. 1067 int ShAmt = 0; 1068 if (TD.isBigEndian()) { 1069 ShAmt = DestWidth-SrcWidth-Offset; 1070 } else { 1071 ShAmt = Offset; 1072 } 1073 1074 // Note: we support negative bitwidths (with shr) which are not defined. 1075 // We do this to support (f.e.) stores off the end of a structure where 1076 // only some bits in the structure are set. 1077 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1078 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1079 SV = BinaryOperator::createShl(SV, 1080 ConstantInt::get(SV->getType(), ShAmt), 1081 SV->getName(), SI); 1082 Mask <<= ShAmt; 1083 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1084 SV = BinaryOperator::createLShr(SV, 1085 ConstantInt::get(SV->getType(),-ShAmt), 1086 SV->getName(), SI); 1087 Mask = Mask.lshr(ShAmt); 1088 } 1089 1090 // Mask out the bits we are about to insert from the old value, and or 1091 // in the new bits. 1092 if (SrcWidth != DestWidth) { 1093 assert(DestWidth > SrcWidth); 1094 Old = BinaryOperator::createAnd(Old, ConstantInt::get(~Mask), 1095 Old->getName()+".mask", SI); 1096 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI); 1097 } 1098 } 1099 new StoreInst(SV, NewAI, SI); 1100 SI->eraseFromParent(); 1101 1102 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1103 ConvertUsesToScalar(CI, NewAI, Offset); 1104 CI->eraseFromParent(); 1105 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1106 const PointerType *AggPtrTy = 1107 cast<PointerType>(GEP->getOperand(0)->getType()); 1108 const TargetData &TD = getAnalysis<TargetData>(); 1109 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8; 1110 1111 // Check to see if this is stepping over an element: GEP Ptr, int C 1112 unsigned NewOffset = Offset; 1113 if (GEP->getNumOperands() == 2) { 1114 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 1115 unsigned BitOffset = Idx*AggSizeInBits; 1116 1117 NewOffset += BitOffset; 1118 } else if (GEP->getNumOperands() == 3) { 1119 // We know that operand #2 is zero. 1120 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1121 const Type *AggTy = AggPtrTy->getElementType(); 1122 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 1123 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8; 1124 1125 NewOffset += ElSizeBits*Idx; 1126 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 1127 unsigned EltBitOffset = 1128 TD.getStructLayout(STy)->getElementOffset(Idx)*8; 1129 1130 NewOffset += EltBitOffset; 1131 } else { 1132 assert(0 && "Unsupported operation!"); 1133 abort(); 1134 } 1135 } else { 1136 assert(0 && "Unsupported operation!"); 1137 abort(); 1138 } 1139 ConvertUsesToScalar(GEP, NewAI, NewOffset); 1140 GEP->eraseFromParent(); 1141 } else { 1142 assert(0 && "Unsupported operation!"); 1143 abort(); 1144 } 1145 } 1146} 1147 1148 1149/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1150/// some part of a constant global variable. This intentionally only accepts 1151/// constant expressions because we don't can't rewrite arbitrary instructions. 1152static bool PointsToConstantGlobal(Value *V) { 1153 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1154 return GV->isConstant(); 1155 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1156 if (CE->getOpcode() == Instruction::BitCast || 1157 CE->getOpcode() == Instruction::GetElementPtr) 1158 return PointsToConstantGlobal(CE->getOperand(0)); 1159 return false; 1160} 1161 1162/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1163/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1164/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1165/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1166/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1167/// the alloca, and if the source pointer is a pointer to a constant global, we 1168/// can optimize this. 1169static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1170 bool isOffset) { 1171 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1172 if (isa<LoadInst>(*UI)) { 1173 // Ignore loads, they are always ok. 1174 continue; 1175 } 1176 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1177 // If uses of the bitcast are ok, we are ok. 1178 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1179 return false; 1180 continue; 1181 } 1182 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1183 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1184 // doesn't, it does. 1185 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1186 isOffset || !GEP->hasAllZeroIndices())) 1187 return false; 1188 continue; 1189 } 1190 1191 // If this is isn't our memcpy/memmove, reject it as something we can't 1192 // handle. 1193 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1194 return false; 1195 1196 // If we already have seen a copy, reject the second one. 1197 if (TheCopy) return false; 1198 1199 // If the pointer has been offset from the start of the alloca, we can't 1200 // safely handle this. 1201 if (isOffset) return false; 1202 1203 // If the memintrinsic isn't using the alloca as the dest, reject it. 1204 if (UI.getOperandNo() != 1) return false; 1205 1206 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1207 1208 // If the source of the memcpy/move is not a constant global, reject it. 1209 if (!PointsToConstantGlobal(MI->getOperand(2))) 1210 return false; 1211 1212 // Otherwise, the transform is safe. Remember the copy instruction. 1213 TheCopy = MI; 1214 } 1215 return true; 1216} 1217 1218/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1219/// modified by a copy from a constant global. If we can prove this, we can 1220/// replace any uses of the alloca with uses of the global directly. 1221Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1222 Instruction *TheCopy = 0; 1223 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1224 return TheCopy; 1225 return 0; 1226} 1227