ScalarReplAggregates.cpp revision 79b3bd395dc3303cde65e18e0524ed2f70268c99
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/Compiler.h" 38#include "llvm/ADT/SmallVector.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/ADT/StringExtras.h" 41using namespace llvm; 42 43STATISTIC(NumReplaced, "Number of allocas broken up"); 44STATISTIC(NumPromoted, "Number of allocas promoted"); 45STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 46STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 47 48namespace { 49 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 50 bool runOnFunction(Function &F); 51 52 bool performScalarRepl(Function &F); 53 bool performPromotion(Function &F); 54 55 // getAnalysisUsage - This pass does not require any passes, but we know it 56 // will not alter the CFG, so say so. 57 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 58 AU.addRequired<ETForest>(); 59 AU.addRequired<DominanceFrontier>(); 60 AU.addRequired<TargetData>(); 61 AU.setPreservesCFG(); 62 } 63 64 private: 65 int isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI); 66 int isSafeUseOfAllocation(Instruction *User, AllocationInst *AI); 67 bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI); 68 bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI); 69 int isSafeAllocaToScalarRepl(AllocationInst *AI); 70 void DoScalarReplacement(AllocationInst *AI, 71 std::vector<AllocationInst*> &WorkList); 72 void CanonicalizeAllocaUsers(AllocationInst *AI); 73 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 74 75 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 76 SmallVector<AllocaInst*, 32> &NewElts); 77 78 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 79 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 80 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 81 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 82 }; 83 84 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 85} 86 87// Public interface to the ScalarReplAggregates pass 88FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); } 89 90 91bool SROA::runOnFunction(Function &F) { 92 bool Changed = performPromotion(F); 93 while (1) { 94 bool LocalChange = performScalarRepl(F); 95 if (!LocalChange) break; // No need to repromote if no scalarrepl 96 Changed = true; 97 LocalChange = performPromotion(F); 98 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 99 } 100 101 return Changed; 102} 103 104 105bool SROA::performPromotion(Function &F) { 106 std::vector<AllocaInst*> Allocas; 107 const TargetData &TD = getAnalysis<TargetData>(); 108 ETForest &ET = getAnalysis<ETForest>(); 109 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 110 111 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 112 113 bool Changed = false; 114 115 while (1) { 116 Allocas.clear(); 117 118 // Find allocas that are safe to promote, by looking at all instructions in 119 // the entry node 120 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 121 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 122 if (isAllocaPromotable(AI, TD)) 123 Allocas.push_back(AI); 124 125 if (Allocas.empty()) break; 126 127 PromoteMemToReg(Allocas, ET, DF, TD); 128 NumPromoted += Allocas.size(); 129 Changed = true; 130 } 131 132 return Changed; 133} 134 135// performScalarRepl - This algorithm is a simple worklist driven algorithm, 136// which runs on all of the malloc/alloca instructions in the function, removing 137// them if they are only used by getelementptr instructions. 138// 139bool SROA::performScalarRepl(Function &F) { 140 std::vector<AllocationInst*> WorkList; 141 142 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 143 BasicBlock &BB = F.getEntryBlock(); 144 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 145 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 146 WorkList.push_back(A); 147 148 // Process the worklist 149 bool Changed = false; 150 while (!WorkList.empty()) { 151 AllocationInst *AI = WorkList.back(); 152 WorkList.pop_back(); 153 154 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 155 // with unused elements. 156 if (AI->use_empty()) { 157 AI->eraseFromParent(); 158 continue; 159 } 160 161 // If we can turn this aggregate value (potentially with casts) into a 162 // simple scalar value that can be mem2reg'd into a register value. 163 bool IsNotTrivial = false; 164 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 165 if (IsNotTrivial && ActualType != Type::VoidTy) { 166 ConvertToScalar(AI, ActualType); 167 Changed = true; 168 continue; 169 } 170 171 // Check to see if we can perform the core SROA transformation. We cannot 172 // transform the allocation instruction if it is an array allocation 173 // (allocations OF arrays are ok though), and an allocation of a scalar 174 // value cannot be decomposed at all. 175 if (!AI->isArrayAllocation() && 176 (isa<StructType>(AI->getAllocatedType()) || 177 isa<ArrayType>(AI->getAllocatedType()))) { 178 // Check that all of the users of the allocation are capable of being 179 // transformed. 180 switch (isSafeAllocaToScalarRepl(AI)) { 181 default: assert(0 && "Unexpected value!"); 182 case 0: // Not safe to scalar replace. 183 break; 184 case 1: // Safe, but requires cleanup/canonicalizations first 185 CanonicalizeAllocaUsers(AI); 186 // FALL THROUGH. 187 case 3: // Safe to scalar replace. 188 DoScalarReplacement(AI, WorkList); 189 Changed = true; 190 continue; 191 } 192 } 193 194 // Check to see if this allocation is only modified by a memcpy/memmove from 195 // a constant global. If this is the case, we can change all users to use 196 // the constant global instead. This is commonly produced by the CFE by 197 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 198 // is only subsequently read. 199 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 200 DOUT << "Found alloca equal to global: " << *AI; 201 DOUT << " memcpy = " << *TheCopy; 202 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 203 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 204 TheCopy->eraseFromParent(); // Don't mutate the global. 205 AI->eraseFromParent(); 206 ++NumGlobals; 207 Changed = true; 208 continue; 209 } 210 211 // Otherwise, couldn't process this. 212 } 213 214 return Changed; 215} 216 217/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 218/// predicate, do SROA now. 219void SROA::DoScalarReplacement(AllocationInst *AI, 220 std::vector<AllocationInst*> &WorkList) { 221 DOUT << "Found inst to SROA: " << *AI; 222 SmallVector<AllocaInst*, 32> ElementAllocas; 223 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 224 ElementAllocas.reserve(ST->getNumContainedTypes()); 225 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 226 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 227 AI->getAlignment(), 228 AI->getName() + "." + utostr(i), AI); 229 ElementAllocas.push_back(NA); 230 WorkList.push_back(NA); // Add to worklist for recursive processing 231 } 232 } else { 233 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 234 ElementAllocas.reserve(AT->getNumElements()); 235 const Type *ElTy = AT->getElementType(); 236 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 237 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 238 AI->getName() + "." + utostr(i), AI); 239 ElementAllocas.push_back(NA); 240 WorkList.push_back(NA); // Add to worklist for recursive processing 241 } 242 } 243 244 // Now that we have created the alloca instructions that we want to use, 245 // expand the getelementptr instructions to use them. 246 // 247 while (!AI->use_empty()) { 248 Instruction *User = cast<Instruction>(AI->use_back()); 249 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 250 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 251 BCInst->eraseFromParent(); 252 continue; 253 } 254 255 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 256 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 257 unsigned Idx = 258 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 259 260 assert(Idx < ElementAllocas.size() && "Index out of range?"); 261 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 262 263 Value *RepValue; 264 if (GEPI->getNumOperands() == 3) { 265 // Do not insert a new getelementptr instruction with zero indices, only 266 // to have it optimized out later. 267 RepValue = AllocaToUse; 268 } else { 269 // We are indexing deeply into the structure, so we still need a 270 // getelement ptr instruction to finish the indexing. This may be 271 // expanded itself once the worklist is rerun. 272 // 273 SmallVector<Value*, 8> NewArgs; 274 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 275 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 276 RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0], 277 NewArgs.size(), "", GEPI); 278 RepValue->takeName(GEPI); 279 } 280 281 // If this GEP is to the start of the aggregate, check for memcpys. 282 if (Idx == 0) { 283 bool IsStartOfAggregateGEP = true; 284 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) { 285 if (!isa<ConstantInt>(GEPI->getOperand(i))) { 286 IsStartOfAggregateGEP = false; 287 break; 288 } 289 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) { 290 IsStartOfAggregateGEP = false; 291 break; 292 } 293 } 294 295 if (IsStartOfAggregateGEP) 296 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 297 } 298 299 300 // Move all of the users over to the new GEP. 301 GEPI->replaceAllUsesWith(RepValue); 302 // Delete the old GEP 303 GEPI->eraseFromParent(); 304 } 305 306 // Finally, delete the Alloca instruction 307 AI->eraseFromParent(); 308 NumReplaced++; 309} 310 311 312/// isSafeElementUse - Check to see if this use is an allowed use for a 313/// getelementptr instruction of an array aggregate allocation. isFirstElt 314/// indicates whether Ptr is known to the start of the aggregate. 315/// 316int SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI) { 317 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 318 I != E; ++I) { 319 Instruction *User = cast<Instruction>(*I); 320 switch (User->getOpcode()) { 321 case Instruction::Load: break; 322 case Instruction::Store: 323 // Store is ok if storing INTO the pointer, not storing the pointer 324 if (User->getOperand(0) == Ptr) return 0; 325 break; 326 case Instruction::GetElementPtr: { 327 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 328 bool AreAllZeroIndices = isFirstElt; 329 if (GEP->getNumOperands() > 1) { 330 if (!isa<ConstantInt>(GEP->getOperand(1)) || 331 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 332 return 0; // Using pointer arithmetic to navigate the array. 333 334 if (AreAllZeroIndices) { 335 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) { 336 if (!isa<ConstantInt>(GEP->getOperand(i)) || 337 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) { 338 AreAllZeroIndices = false; 339 break; 340 } 341 } 342 } 343 } 344 if (!isSafeElementUse(GEP, AreAllZeroIndices, AI)) return 0; 345 break; 346 } 347 case Instruction::BitCast: 348 if (isFirstElt && 349 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI)) 350 break; 351 DOUT << " Transformation preventing inst: " << *User; 352 return 0; 353 case Instruction::Call: 354 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 355 if (isFirstElt && isSafeMemIntrinsicOnAllocation(MI, AI)) 356 break; 357 } 358 DOUT << " Transformation preventing inst: " << *User; 359 return 0; 360 default: 361 DOUT << " Transformation preventing inst: " << *User; 362 return 0; 363 } 364 } 365 return 3; // All users look ok :) 366} 367 368/// AllUsersAreLoads - Return true if all users of this value are loads. 369static bool AllUsersAreLoads(Value *Ptr) { 370 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 371 I != E; ++I) 372 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 373 return false; 374 return true; 375} 376 377/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 378/// aggregate allocation. 379/// 380int SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI) { 381 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 382 return isSafeUseOfBitCastedAllocation(C, AI) ? 3 : 0; 383 if (!isa<GetElementPtrInst>(User)) return 0; 384 385 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 386 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 387 388 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 389 if (I == E || 390 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) 391 return 0; 392 393 ++I; 394 if (I == E) return 0; // ran out of GEP indices?? 395 396 bool IsAllZeroIndices = true; 397 398 // If this is a use of an array allocation, do a bit more checking for sanity. 399 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 400 uint64_t NumElements = AT->getNumElements(); 401 402 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) { 403 IsAllZeroIndices &= Idx->isZero(); 404 405 // Check to make sure that index falls within the array. If not, 406 // something funny is going on, so we won't do the optimization. 407 // 408 if (Idx->getZExtValue() >= NumElements) 409 return 0; 410 411 // We cannot scalar repl this level of the array unless any array 412 // sub-indices are in-range constants. In particular, consider: 413 // A[0][i]. We cannot know that the user isn't doing invalid things like 414 // allowing i to index an out-of-range subscript that accesses A[1]. 415 // 416 // Scalar replacing *just* the outer index of the array is probably not 417 // going to be a win anyway, so just give up. 418 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) { 419 uint64_t NumElements; 420 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I)) 421 NumElements = SubArrayTy->getNumElements(); 422 else 423 NumElements = cast<VectorType>(*I)->getNumElements(); 424 425 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 426 if (!IdxVal) return 0; 427 if (IdxVal->getZExtValue() >= NumElements) 428 return 0; 429 IsAllZeroIndices &= IdxVal->isZero(); 430 } 431 432 } else { 433 IsAllZeroIndices = 0; 434 435 // If this is an array index and the index is not constant, we cannot 436 // promote... that is unless the array has exactly one or two elements in 437 // it, in which case we CAN promote it, but we have to canonicalize this 438 // out if this is the only problem. 439 if ((NumElements == 1 || NumElements == 2) && 440 AllUsersAreLoads(GEPI)) 441 return 1; // Canonicalization required! 442 return 0; 443 } 444 } 445 446 // If there are any non-simple uses of this getelementptr, make sure to reject 447 // them. 448 return isSafeElementUse(GEPI, IsAllZeroIndices, AI); 449} 450 451/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 452/// intrinsic can be promoted by SROA. At this point, we know that the operand 453/// of the memintrinsic is a pointer to the beginning of the allocation. 454bool SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI){ 455 // If not constant length, give up. 456 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 457 if (!Length) return false; 458 459 // If not the whole aggregate, give up. 460 const TargetData &TD = getAnalysis<TargetData>(); 461 if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType())) 462 return false; 463 464 // We only know about memcpy/memset/memmove. 465 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 466 return false; 467 // Otherwise, we can transform it. 468 return true; 469} 470 471/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 472/// are 473bool SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI) { 474 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 475 UI != E; ++UI) { 476 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 477 if (!isSafeUseOfBitCastedAllocation(BCU, AI)) 478 return false; 479 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 480 if (!isSafeMemIntrinsicOnAllocation(MI, AI)) 481 return false; 482 } else { 483 return false; 484 } 485 } 486 return true; 487} 488 489/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 490/// to its first element. Transform users of the cast to use the new values 491/// instead. 492void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 493 SmallVector<AllocaInst*, 32> &NewElts) { 494 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 495 const TargetData &TD = getAnalysis<TargetData>(); 496 497 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 498 while (UI != UE) { 499 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) { 500 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 501 ++UI; 502 BCU->eraseFromParent(); 503 continue; 504 } 505 506 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split 507 // into one per element. 508 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI); 509 510 // If it's not a mem intrinsic, it must be some other user of a gep of the 511 // first pointer. Just leave these alone. 512 if (!MI) { 513 ++UI; 514 continue; 515 } 516 517 // If this is a memcpy/memmove, construct the other pointer as the 518 // appropriate type. 519 Value *OtherPtr = 0; 520 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 521 if (BCInst == MCI->getRawDest()) 522 OtherPtr = MCI->getRawSource(); 523 else { 524 assert(BCInst == MCI->getRawSource()); 525 OtherPtr = MCI->getRawDest(); 526 } 527 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 528 if (BCInst == MMI->getRawDest()) 529 OtherPtr = MMI->getRawSource(); 530 else { 531 assert(BCInst == MMI->getRawSource()); 532 OtherPtr = MMI->getRawDest(); 533 } 534 } 535 536 // If there is an other pointer, we want to convert it to the same pointer 537 // type as AI has, so we can GEP through it. 538 if (OtherPtr) { 539 // It is likely that OtherPtr is a bitcast, if so, remove it. 540 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 541 OtherPtr = BC->getOperand(0); 542 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 543 if (BCE->getOpcode() == Instruction::BitCast) 544 OtherPtr = BCE->getOperand(0); 545 546 // If the pointer is not the right type, insert a bitcast to the right 547 // type. 548 if (OtherPtr->getType() != AI->getType()) 549 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 550 MI); 551 } 552 553 // Process each element of the aggregate. 554 Value *TheFn = MI->getOperand(0); 555 const Type *BytePtrTy = MI->getRawDest()->getType(); 556 bool SROADest = MI->getRawDest() == BCInst; 557 558 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 559 // If this is a memcpy/memmove, emit a GEP of the other element address. 560 Value *OtherElt = 0; 561 if (OtherPtr) { 562 OtherElt = new GetElementPtrInst(OtherPtr, Zero, 563 ConstantInt::get(Type::Int32Ty, i), 564 OtherPtr->getNameStr()+"."+utostr(i), 565 MI); 566 } 567 568 Value *EltPtr = NewElts[i]; 569 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 570 571 // If we got down to a scalar, insert a load or store as appropriate. 572 if (EltTy->isFirstClassType()) { 573 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 574 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 575 MI); 576 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 577 continue; 578 } else { 579 assert(isa<MemSetInst>(MI)); 580 581 // If the stored element is zero (common case), just store a null 582 // constant. 583 Constant *StoreVal; 584 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 585 if (CI->isZero()) { 586 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 587 } else { 588 // If EltTy is a packed type, get the element type. 589 const Type *ValTy = EltTy; 590 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 591 ValTy = VTy->getElementType(); 592 593 // Construct an integer with the right value. 594 unsigned EltSize = TD.getTypeSize(ValTy); 595 APInt OneVal(EltSize*8, CI->getZExtValue()); 596 APInt TotalVal(OneVal); 597 // Set each byte. 598 for (unsigned i = 0; i != EltSize-1; ++i) { 599 TotalVal = TotalVal.shl(8); 600 TotalVal |= OneVal; 601 } 602 603 // Convert the integer value to the appropriate type. 604 StoreVal = ConstantInt::get(TotalVal); 605 if (isa<PointerType>(ValTy)) 606 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 607 else if (ValTy->isFloatingPoint()) 608 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 609 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 610 611 // If the requested value was a vector constant, create it. 612 if (EltTy != ValTy) { 613 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 614 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 615 StoreVal = ConstantVector::get(&Elts[0], NumElts); 616 } 617 } 618 new StoreInst(StoreVal, EltPtr, MI); 619 continue; 620 } 621 // Otherwise, if we're storing a byte variable, use a memset call for 622 // this element. 623 } 624 } 625 626 // Cast the element pointer to BytePtrTy. 627 if (EltPtr->getType() != BytePtrTy) 628 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 629 630 // Cast the other pointer (if we have one) to BytePtrTy. 631 if (OtherElt && OtherElt->getType() != BytePtrTy) 632 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 633 MI); 634 635 unsigned EltSize = TD.getTypeSize(EltTy); 636 637 // Finally, insert the meminst for this element. 638 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 639 Value *Ops[] = { 640 SROADest ? EltPtr : OtherElt, // Dest ptr 641 SROADest ? OtherElt : EltPtr, // Src ptr 642 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 643 Zero // Align 644 }; 645 new CallInst(TheFn, Ops, 4, "", MI); 646 } else { 647 assert(isa<MemSetInst>(MI)); 648 Value *Ops[] = { 649 EltPtr, MI->getOperand(2), // Dest, Value, 650 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 651 Zero // Align 652 }; 653 new CallInst(TheFn, Ops, 4, "", MI); 654 } 655 } 656 657 // Finally, MI is now dead, as we've modified its actions to occur on all of 658 // the elements of the aggregate. 659 ++UI; 660 MI->eraseFromParent(); 661 } 662} 663 664 665/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 666/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 667/// or 1 if safe after canonicalization has been performed. 668/// 669int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 670 // Loop over the use list of the alloca. We can only transform it if all of 671 // the users are safe to transform. 672 // 673 int isSafe = 3; 674 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 675 I != E; ++I) { 676 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I), AI); 677 if (isSafe == 0) { 678 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 679 return 0; 680 } 681 } 682 // If we require cleanup, isSafe is now 1, otherwise it is 3. 683 return isSafe; 684} 685 686/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 687/// allocation, but only if cleaned up, perform the cleanups required. 688void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 689 // At this point, we know that the end result will be SROA'd and promoted, so 690 // we can insert ugly code if required so long as sroa+mem2reg will clean it 691 // up. 692 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 693 UI != E; ) { 694 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++); 695 if (!GEPI) continue; 696 gep_type_iterator I = gep_type_begin(GEPI); 697 ++I; 698 699 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 700 uint64_t NumElements = AT->getNumElements(); 701 702 if (!isa<ConstantInt>(I.getOperand())) { 703 if (NumElements == 1) { 704 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 705 } else { 706 assert(NumElements == 2 && "Unhandled case!"); 707 // All users of the GEP must be loads. At each use of the GEP, insert 708 // two loads of the appropriate indexed GEP and select between them. 709 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 710 Constant::getNullValue(I.getOperand()->getType()), 711 "isone", GEPI); 712 // Insert the new GEP instructions, which are properly indexed. 713 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 714 Indices[1] = Constant::getNullValue(Type::Int32Ty); 715 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), 716 &Indices[0], Indices.size(), 717 GEPI->getName()+".0", GEPI); 718 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 719 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), 720 &Indices[0], Indices.size(), 721 GEPI->getName()+".1", GEPI); 722 // Replace all loads of the variable index GEP with loads from both 723 // indexes and a select. 724 while (!GEPI->use_empty()) { 725 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 726 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 727 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 728 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI); 729 LI->replaceAllUsesWith(R); 730 LI->eraseFromParent(); 731 } 732 GEPI->eraseFromParent(); 733 } 734 } 735 } 736 } 737} 738 739/// MergeInType - Add the 'In' type to the accumulated type so far. If the 740/// types are incompatible, return true, otherwise update Accum and return 741/// false. 742/// 743/// There are three cases we handle here: 744/// 1) An effectively-integer union, where the pieces are stored into as 745/// smaller integers (common with byte swap and other idioms). 746/// 2) A union of vector types of the same size and potentially its elements. 747/// Here we turn element accesses into insert/extract element operations. 748/// 3) A union of scalar types, such as int/float or int/pointer. Here we 749/// merge together into integers, allowing the xform to work with #1 as 750/// well. 751static bool MergeInType(const Type *In, const Type *&Accum, 752 const TargetData &TD) { 753 // If this is our first type, just use it. 754 const VectorType *PTy; 755 if (Accum == Type::VoidTy || In == Accum) { 756 Accum = In; 757 } else if (In == Type::VoidTy) { 758 // Noop. 759 } else if (In->isInteger() && Accum->isInteger()) { // integer union. 760 // Otherwise pick whichever type is larger. 761 if (cast<IntegerType>(In)->getBitWidth() > 762 cast<IntegerType>(Accum)->getBitWidth()) 763 Accum = In; 764 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) { 765 // Pointer unions just stay as one of the pointers. 766 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) { 767 if ((PTy = dyn_cast<VectorType>(Accum)) && 768 PTy->getElementType() == In) { 769 // Accum is a vector, and we are accessing an element: ok. 770 } else if ((PTy = dyn_cast<VectorType>(In)) && 771 PTy->getElementType() == Accum) { 772 // In is a vector, and accum is an element: ok, remember In. 773 Accum = In; 774 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) && 775 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) { 776 // Two vectors of the same size: keep Accum. 777 } else { 778 // Cannot insert an short into a <4 x int> or handle 779 // <2 x int> -> <4 x int> 780 return true; 781 } 782 } else { 783 // Pointer/FP/Integer unions merge together as integers. 784 switch (Accum->getTypeID()) { 785 case Type::PointerTyID: Accum = TD.getIntPtrType(); break; 786 case Type::FloatTyID: Accum = Type::Int32Ty; break; 787 case Type::DoubleTyID: Accum = Type::Int64Ty; break; 788 default: 789 assert(Accum->isInteger() && "Unknown FP type!"); 790 break; 791 } 792 793 switch (In->getTypeID()) { 794 case Type::PointerTyID: In = TD.getIntPtrType(); break; 795 case Type::FloatTyID: In = Type::Int32Ty; break; 796 case Type::DoubleTyID: In = Type::Int64Ty; break; 797 default: 798 assert(In->isInteger() && "Unknown FP type!"); 799 break; 800 } 801 return MergeInType(In, Accum, TD); 802 } 803 return false; 804} 805 806/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least 807/// as big as the specified type. If there is no suitable type, this returns 808/// null. 809const Type *getUIntAtLeastAsBitAs(unsigned NumBits) { 810 if (NumBits > 64) return 0; 811 if (NumBits > 32) return Type::Int64Ty; 812 if (NumBits > 16) return Type::Int32Ty; 813 if (NumBits > 8) return Type::Int16Ty; 814 return Type::Int8Ty; 815} 816 817/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 818/// single scalar integer type, return that type. Further, if the use is not 819/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 820/// there are no uses of this pointer, return Type::VoidTy to differentiate from 821/// failure. 822/// 823const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 824 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 825 const TargetData &TD = getAnalysis<TargetData>(); 826 const PointerType *PTy = cast<PointerType>(V->getType()); 827 828 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 829 Instruction *User = cast<Instruction>(*UI); 830 831 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 832 if (MergeInType(LI->getType(), UsedType, TD)) 833 return 0; 834 835 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 836 // Storing the pointer, not into the value? 837 if (SI->getOperand(0) == V) return 0; 838 839 // NOTE: We could handle storing of FP imms into integers here! 840 841 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD)) 842 return 0; 843 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 844 IsNotTrivial = true; 845 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 846 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0; 847 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 848 // Check to see if this is stepping over an element: GEP Ptr, int C 849 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 850 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 851 unsigned ElSize = TD.getTypeSize(PTy->getElementType()); 852 unsigned BitOffset = Idx*ElSize*8; 853 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 854 855 IsNotTrivial = true; 856 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 857 if (SubElt == 0) return 0; 858 if (SubElt != Type::VoidTy && SubElt->isInteger()) { 859 const Type *NewTy = 860 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset); 861 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0; 862 continue; 863 } 864 } else if (GEP->getNumOperands() == 3 && 865 isa<ConstantInt>(GEP->getOperand(1)) && 866 isa<ConstantInt>(GEP->getOperand(2)) && 867 cast<ConstantInt>(GEP->getOperand(1))->isZero()) { 868 // We are stepping into an element, e.g. a structure or an array: 869 // GEP Ptr, int 0, uint C 870 const Type *AggTy = PTy->getElementType(); 871 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 872 873 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 874 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 875 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) { 876 // Getting an element of the packed vector. 877 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range. 878 879 // Merge in the vector type. 880 if (MergeInType(VectorTy, UsedType, TD)) return 0; 881 882 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 883 if (SubTy == 0) return 0; 884 885 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 886 return 0; 887 888 // We'll need to change this to an insert/extract element operation. 889 IsNotTrivial = true; 890 continue; // Everything looks ok 891 892 } else if (isa<StructType>(AggTy)) { 893 // Structs are always ok. 894 } else { 895 return 0; 896 } 897 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8); 898 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0; 899 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 900 if (SubTy == 0) return 0; 901 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 902 return 0; 903 continue; // Everything looks ok 904 } 905 return 0; 906 } else { 907 // Cannot handle this! 908 return 0; 909 } 910 } 911 912 return UsedType; 913} 914 915/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 916/// predicate and is non-trivial. Convert it to something that can be trivially 917/// promoted into a register by mem2reg. 918void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 919 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = " 920 << *ActualTy << "\n"; 921 ++NumConverted; 922 923 BasicBlock *EntryBlock = AI->getParent(); 924 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() && 925 "Not in the entry block!"); 926 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 927 928 // Create and insert the alloca. 929 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(), 930 EntryBlock->begin()); 931 ConvertUsesToScalar(AI, NewAI, 0); 932 delete AI; 933} 934 935 936/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 937/// directly. This happens when we are converting an "integer union" to a 938/// single integer scalar, or when we are converting a "vector union" to a 939/// vector with insert/extractelement instructions. 940/// 941/// Offset is an offset from the original alloca, in bits that need to be 942/// shifted to the right. By the end of this, there should be no uses of Ptr. 943void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 944 const TargetData &TD = getAnalysis<TargetData>(); 945 while (!Ptr->use_empty()) { 946 Instruction *User = cast<Instruction>(Ptr->use_back()); 947 948 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 949 // The load is a bit extract from NewAI shifted right by Offset bits. 950 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 951 if (NV->getType() == LI->getType()) { 952 // We win, no conversion needed. 953 } else if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) { 954 // If the result alloca is a vector type, this is either an element 955 // access or a bitcast to another vector type. 956 if (isa<VectorType>(LI->getType())) { 957 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 958 } else { 959 // Must be an element access. 960 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 961 NV = new ExtractElementInst( 962 NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI); 963 } 964 } else if (isa<PointerType>(NV->getType())) { 965 assert(isa<PointerType>(LI->getType())); 966 // Must be ptr->ptr cast. Anything else would result in NV being 967 // an integer. 968 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 969 } else { 970 const IntegerType *NTy = cast<IntegerType>(NV->getType()); 971 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType()); 972 973 // If this is a big-endian system and the load is narrower than the 974 // full alloca type, we need to do a shift to get the right bits. 975 int ShAmt = 0; 976 if (TD.isBigEndian()) { 977 ShAmt = NTy->getBitWidth()-LIBitWidth-Offset; 978 } else { 979 ShAmt = Offset; 980 } 981 982 // Note: we support negative bitwidths (with shl) which are not defined. 983 // We do this to support (f.e.) loads off the end of a structure where 984 // only some bits are used. 985 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 986 NV = BinaryOperator::createLShr(NV, 987 ConstantInt::get(NV->getType(),ShAmt), 988 LI->getName(), LI); 989 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 990 NV = BinaryOperator::createShl(NV, 991 ConstantInt::get(NV->getType(),-ShAmt), 992 LI->getName(), LI); 993 994 // Finally, unconditionally truncate the integer to the right width. 995 if (LIBitWidth < NTy->getBitWidth()) 996 NV = new TruncInst(NV, IntegerType::get(LIBitWidth), 997 LI->getName(), LI); 998 999 // If the result is an integer, this is a trunc or bitcast. 1000 if (isa<IntegerType>(LI->getType())) { 1001 assert(NV->getType() == LI->getType() && "Truncate wasn't enough?"); 1002 } else if (LI->getType()->isFloatingPoint()) { 1003 // Just do a bitcast, we know the sizes match up. 1004 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1005 } else { 1006 // Otherwise must be a pointer. 1007 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI); 1008 } 1009 } 1010 LI->replaceAllUsesWith(NV); 1011 LI->eraseFromParent(); 1012 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1013 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1014 1015 // Convert the stored type to the actual type, shift it left to insert 1016 // then 'or' into place. 1017 Value *SV = SI->getOperand(0); 1018 const Type *AllocaType = NewAI->getType()->getElementType(); 1019 if (SV->getType() == AllocaType) { 1020 // All is well. 1021 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) { 1022 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1023 1024 // If the result alloca is a vector type, this is either an element 1025 // access or a bitcast to another vector type. 1026 if (isa<VectorType>(SV->getType())) { 1027 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1028 } else { 1029 // Must be an element insertion. 1030 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 1031 SV = new InsertElementInst(Old, SV, 1032 ConstantInt::get(Type::Int32Ty, Elt), 1033 "tmp", SI); 1034 } 1035 } else if (isa<PointerType>(AllocaType)) { 1036 // If the alloca type is a pointer, then all the elements must be 1037 // pointers. 1038 if (SV->getType() != AllocaType) 1039 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1040 } else { 1041 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1042 1043 // If SV is a float, convert it to the appropriate integer type. 1044 // If it is a pointer, do the same, and also handle ptr->ptr casts 1045 // here. 1046 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType()); 1047 unsigned DestWidth = AllocaType->getPrimitiveSizeInBits(); 1048 if (SV->getType()->isFloatingPoint()) 1049 SV = new BitCastInst(SV, IntegerType::get(SrcWidth), 1050 SV->getName(), SI); 1051 else if (isa<PointerType>(SV->getType())) 1052 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI); 1053 1054 // Always zero extend the value if needed. 1055 if (SV->getType() != AllocaType) 1056 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI); 1057 1058 // If this is a big-endian system and the store is narrower than the 1059 // full alloca type, we need to do a shift to get the right bits. 1060 int ShAmt = 0; 1061 if (TD.isBigEndian()) { 1062 ShAmt = DestWidth-SrcWidth-Offset; 1063 } else { 1064 ShAmt = Offset; 1065 } 1066 1067 // Note: we support negative bitwidths (with shr) which are not defined. 1068 // We do this to support (f.e.) stores off the end of a structure where 1069 // only some bits in the structure are set. 1070 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1071 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1072 SV = BinaryOperator::createShl(SV, 1073 ConstantInt::get(SV->getType(), ShAmt), 1074 SV->getName(), SI); 1075 Mask <<= ShAmt; 1076 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1077 SV = BinaryOperator::createLShr(SV, 1078 ConstantInt::get(SV->getType(),-ShAmt), 1079 SV->getName(), SI); 1080 Mask = Mask.lshr(ShAmt); 1081 } 1082 1083 // Mask out the bits we are about to insert from the old value, and or 1084 // in the new bits. 1085 if (SrcWidth != DestWidth) { 1086 assert(DestWidth > SrcWidth); 1087 Old = BinaryOperator::createAnd(Old, ConstantInt::get(~Mask), 1088 Old->getName()+".mask", SI); 1089 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI); 1090 } 1091 } 1092 new StoreInst(SV, NewAI, SI); 1093 SI->eraseFromParent(); 1094 1095 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1096 ConvertUsesToScalar(CI, NewAI, Offset); 1097 CI->eraseFromParent(); 1098 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1099 const PointerType *AggPtrTy = 1100 cast<PointerType>(GEP->getOperand(0)->getType()); 1101 const TargetData &TD = getAnalysis<TargetData>(); 1102 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8; 1103 1104 // Check to see if this is stepping over an element: GEP Ptr, int C 1105 unsigned NewOffset = Offset; 1106 if (GEP->getNumOperands() == 2) { 1107 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 1108 unsigned BitOffset = Idx*AggSizeInBits; 1109 1110 NewOffset += BitOffset; 1111 } else if (GEP->getNumOperands() == 3) { 1112 // We know that operand #2 is zero. 1113 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1114 const Type *AggTy = AggPtrTy->getElementType(); 1115 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 1116 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8; 1117 1118 NewOffset += ElSizeBits*Idx; 1119 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 1120 unsigned EltBitOffset = 1121 TD.getStructLayout(STy)->getElementOffset(Idx)*8; 1122 1123 NewOffset += EltBitOffset; 1124 } else { 1125 assert(0 && "Unsupported operation!"); 1126 abort(); 1127 } 1128 } else { 1129 assert(0 && "Unsupported operation!"); 1130 abort(); 1131 } 1132 ConvertUsesToScalar(GEP, NewAI, NewOffset); 1133 GEP->eraseFromParent(); 1134 } else { 1135 assert(0 && "Unsupported operation!"); 1136 abort(); 1137 } 1138 } 1139} 1140 1141 1142/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1143/// some part of a constant global variable. This intentionally only accepts 1144/// constant expressions because we don't can't rewrite arbitrary instructions. 1145static bool PointsToConstantGlobal(Value *V) { 1146 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1147 return GV->isConstant(); 1148 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1149 if (CE->getOpcode() == Instruction::BitCast || 1150 CE->getOpcode() == Instruction::GetElementPtr) 1151 return PointsToConstantGlobal(CE->getOperand(0)); 1152 return false; 1153} 1154 1155/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1156/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1157/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1158/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1159/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1160/// the alloca, and if the source pointer is a pointer to a constant global, we 1161/// can optimize this. 1162static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1163 bool isOffset) { 1164 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1165 if (isa<LoadInst>(*UI)) { 1166 // Ignore loads, they are always ok. 1167 continue; 1168 } 1169 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1170 // If uses of the bitcast are ok, we are ok. 1171 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1172 return false; 1173 continue; 1174 } 1175 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1176 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1177 // doesn't, it does. 1178 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1179 isOffset || !GEP->hasAllZeroIndices())) 1180 return false; 1181 continue; 1182 } 1183 1184 // If this is isn't our memcpy/memmove, reject it as something we can't 1185 // handle. 1186 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1187 return false; 1188 1189 // If we already have seen a copy, reject the second one. 1190 if (TheCopy) return false; 1191 1192 // If the pointer has been offset from the start of the alloca, we can't 1193 // safely handle this. 1194 if (isOffset) return false; 1195 1196 // If the memintrinsic isn't using the alloca as the dest, reject it. 1197 if (UI.getOperandNo() != 1) return false; 1198 1199 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1200 1201 // If the source of the memcpy/move is not a constant global, reject it. 1202 if (!PointsToConstantGlobal(MI->getOperand(2))) 1203 return false; 1204 1205 // Otherwise, the transform is safe. Remember the copy instruction. 1206 TheCopy = MI; 1207 } 1208 return true; 1209} 1210 1211/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1212/// modified by a copy from a constant global. If we can prove this, we can 1213/// replace any uses of the alloca with uses of the global directly. 1214Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1215 Instruction *TheCopy = 0; 1216 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1217 return TheCopy; 1218 return 0; 1219} 1220