ScalarReplAggregates.cpp revision 8bf991193245bb8b7e497e8c16545a206fbe5eef
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Pass.h" 30#include "llvm/Analysis/Dominators.h" 31#include "llvm/Target/TargetData.h" 32#include "llvm/Transforms/Utils/PromoteMemToReg.h" 33#include "llvm/Support/Debug.h" 34#include "llvm/Support/GetElementPtrTypeIterator.h" 35#include "llvm/Support/MathExtras.h" 36#include "llvm/Support/Compiler.h" 37#include "llvm/ADT/SmallVector.h" 38#include "llvm/ADT/Statistic.h" 39#include "llvm/ADT/StringExtras.h" 40using namespace llvm; 41 42STATISTIC(NumReplaced, "Number of allocas broken up"); 43STATISTIC(NumPromoted, "Number of allocas promoted"); 44STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 45 46namespace { 47 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 48 bool runOnFunction(Function &F); 49 50 bool performScalarRepl(Function &F); 51 bool performPromotion(Function &F); 52 53 // getAnalysisUsage - This pass does not require any passes, but we know it 54 // will not alter the CFG, so say so. 55 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 56 AU.addRequired<DominatorTree>(); 57 AU.addRequired<DominanceFrontier>(); 58 AU.addRequired<TargetData>(); 59 AU.setPreservesCFG(); 60 } 61 62 private: 63 int isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI); 64 int isSafeUseOfAllocation(Instruction *User, AllocationInst *AI); 65 bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI); 66 bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI); 67 int isSafeAllocaToScalarRepl(AllocationInst *AI); 68 void CanonicalizeAllocaUsers(AllocationInst *AI); 69 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 70 71 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 72 SmallVector<AllocaInst*, 32> &NewElts); 73 74 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 75 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 76 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 77 }; 78 79 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 80} 81 82// Public interface to the ScalarReplAggregates pass 83FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); } 84 85 86bool SROA::runOnFunction(Function &F) { 87 bool Changed = performPromotion(F); 88 while (1) { 89 bool LocalChange = performScalarRepl(F); 90 if (!LocalChange) break; // No need to repromote if no scalarrepl 91 Changed = true; 92 LocalChange = performPromotion(F); 93 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 94 } 95 96 return Changed; 97} 98 99 100bool SROA::performPromotion(Function &F) { 101 std::vector<AllocaInst*> Allocas; 102 const TargetData &TD = getAnalysis<TargetData>(); 103 DominatorTree &DT = getAnalysis<DominatorTree>(); 104 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 105 106 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 107 108 bool Changed = false; 109 110 while (1) { 111 Allocas.clear(); 112 113 // Find allocas that are safe to promote, by looking at all instructions in 114 // the entry node 115 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 116 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 117 if (isAllocaPromotable(AI, TD)) 118 Allocas.push_back(AI); 119 120 if (Allocas.empty()) break; 121 122 PromoteMemToReg(Allocas, DT, DF, TD); 123 NumPromoted += Allocas.size(); 124 Changed = true; 125 } 126 127 return Changed; 128} 129 130// performScalarRepl - This algorithm is a simple worklist driven algorithm, 131// which runs on all of the malloc/alloca instructions in the function, removing 132// them if they are only used by getelementptr instructions. 133// 134bool SROA::performScalarRepl(Function &F) { 135 std::vector<AllocationInst*> WorkList; 136 137 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 138 BasicBlock &BB = F.getEntryBlock(); 139 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 140 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 141 WorkList.push_back(A); 142 143 // Process the worklist 144 bool Changed = false; 145 while (!WorkList.empty()) { 146 AllocationInst *AI = WorkList.back(); 147 WorkList.pop_back(); 148 149 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 150 // with unused elements. 151 if (AI->use_empty()) { 152 AI->eraseFromParent(); 153 continue; 154 } 155 156 // If we can turn this aggregate value (potentially with casts) into a 157 // simple scalar value that can be mem2reg'd into a register value. 158 bool IsNotTrivial = false; 159 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 160 if (IsNotTrivial && ActualType != Type::VoidTy) { 161 ConvertToScalar(AI, ActualType); 162 Changed = true; 163 continue; 164 } 165 166 // We cannot transform the allocation instruction if it is an array 167 // allocation (allocations OF arrays are ok though), and an allocation of a 168 // scalar value cannot be decomposed at all. 169 // 170 if (AI->isArrayAllocation() || 171 (!isa<StructType>(AI->getAllocatedType()) && 172 !isa<ArrayType>(AI->getAllocatedType()))) continue; 173 174 // Check that all of the users of the allocation are capable of being 175 // transformed. 176 switch (isSafeAllocaToScalarRepl(AI)) { 177 default: assert(0 && "Unexpected value!"); 178 case 0: // Not safe to scalar replace. 179 continue; 180 case 1: // Safe, but requires cleanup/canonicalizations first 181 CanonicalizeAllocaUsers(AI); 182 case 3: // Safe to scalar replace. 183 break; 184 } 185 186 DOUT << "Found inst to xform: " << *AI; 187 Changed = true; 188 189 SmallVector<AllocaInst*, 32> ElementAllocas; 190 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 191 ElementAllocas.reserve(ST->getNumContainedTypes()); 192 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 193 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 194 AI->getAlignment(), 195 AI->getName() + "." + utostr(i), AI); 196 ElementAllocas.push_back(NA); 197 WorkList.push_back(NA); // Add to worklist for recursive processing 198 } 199 } else { 200 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 201 ElementAllocas.reserve(AT->getNumElements()); 202 const Type *ElTy = AT->getElementType(); 203 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 204 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 205 AI->getName() + "." + utostr(i), AI); 206 ElementAllocas.push_back(NA); 207 WorkList.push_back(NA); // Add to worklist for recursive processing 208 } 209 } 210 211 // Now that we have created the alloca instructions that we want to use, 212 // expand the getelementptr instructions to use them. 213 // 214 while (!AI->use_empty()) { 215 Instruction *User = cast<Instruction>(AI->use_back()); 216 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 217 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 218 BCInst->eraseFromParent(); 219 continue; 220 } 221 222 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 223 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 224 unsigned Idx = 225 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 226 227 assert(Idx < ElementAllocas.size() && "Index out of range?"); 228 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 229 230 Value *RepValue; 231 if (GEPI->getNumOperands() == 3) { 232 // Do not insert a new getelementptr instruction with zero indices, only 233 // to have it optimized out later. 234 RepValue = AllocaToUse; 235 } else { 236 // We are indexing deeply into the structure, so we still need a 237 // getelement ptr instruction to finish the indexing. This may be 238 // expanded itself once the worklist is rerun. 239 // 240 SmallVector<Value*, 8> NewArgs; 241 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 242 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 243 RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0], 244 NewArgs.size(), "", GEPI); 245 RepValue->takeName(GEPI); 246 } 247 248 // If this GEP is to the start of the aggregate, check for memcpys. 249 if (Idx == 0) { 250 bool IsStartOfAggregateGEP = true; 251 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) { 252 if (!isa<ConstantInt>(GEPI->getOperand(i))) { 253 IsStartOfAggregateGEP = false; 254 break; 255 } 256 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) { 257 IsStartOfAggregateGEP = false; 258 break; 259 } 260 } 261 262 if (IsStartOfAggregateGEP) 263 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 264 } 265 266 267 // Move all of the users over to the new GEP. 268 GEPI->replaceAllUsesWith(RepValue); 269 // Delete the old GEP 270 GEPI->eraseFromParent(); 271 } 272 273 // Finally, delete the Alloca instruction 274 AI->eraseFromParent(); 275 NumReplaced++; 276 } 277 278 return Changed; 279} 280 281 282/// isSafeElementUse - Check to see if this use is an allowed use for a 283/// getelementptr instruction of an array aggregate allocation. isFirstElt 284/// indicates whether Ptr is known to the start of the aggregate. 285/// 286int SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI) { 287 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 288 I != E; ++I) { 289 Instruction *User = cast<Instruction>(*I); 290 switch (User->getOpcode()) { 291 case Instruction::Load: break; 292 case Instruction::Store: 293 // Store is ok if storing INTO the pointer, not storing the pointer 294 if (User->getOperand(0) == Ptr) return 0; 295 break; 296 case Instruction::GetElementPtr: { 297 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 298 bool AreAllZeroIndices = isFirstElt; 299 if (GEP->getNumOperands() > 1) { 300 if (!isa<ConstantInt>(GEP->getOperand(1)) || 301 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 302 return 0; // Using pointer arithmetic to navigate the array. 303 304 if (AreAllZeroIndices) { 305 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) { 306 if (!isa<ConstantInt>(GEP->getOperand(i)) || 307 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) { 308 AreAllZeroIndices = false; 309 break; 310 } 311 } 312 } 313 } 314 if (!isSafeElementUse(GEP, AreAllZeroIndices, AI)) return 0; 315 break; 316 } 317 case Instruction::BitCast: 318 if (isFirstElt && 319 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI)) 320 break; 321 DOUT << " Transformation preventing inst: " << *User; 322 return 0; 323 case Instruction::Call: 324 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 325 if (isFirstElt && isSafeMemIntrinsicOnAllocation(MI, AI)) 326 break; 327 } 328 DOUT << " Transformation preventing inst: " << *User; 329 return 0; 330 default: 331 DOUT << " Transformation preventing inst: " << *User; 332 return 0; 333 } 334 } 335 return 3; // All users look ok :) 336} 337 338/// AllUsersAreLoads - Return true if all users of this value are loads. 339static bool AllUsersAreLoads(Value *Ptr) { 340 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 341 I != E; ++I) 342 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 343 return false; 344 return true; 345} 346 347/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 348/// aggregate allocation. 349/// 350int SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI) { 351 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 352 return isSafeUseOfBitCastedAllocation(C, AI) ? 3 : 0; 353 if (!isa<GetElementPtrInst>(User)) return 0; 354 355 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 356 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 357 358 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 359 if (I == E || 360 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) 361 return 0; 362 363 ++I; 364 if (I == E) return 0; // ran out of GEP indices?? 365 366 bool IsAllZeroIndices = true; 367 368 // If this is a use of an array allocation, do a bit more checking for sanity. 369 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 370 uint64_t NumElements = AT->getNumElements(); 371 372 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) { 373 IsAllZeroIndices &= Idx->isZero(); 374 375 // Check to make sure that index falls within the array. If not, 376 // something funny is going on, so we won't do the optimization. 377 // 378 if (Idx->getZExtValue() >= NumElements) 379 return 0; 380 381 // We cannot scalar repl this level of the array unless any array 382 // sub-indices are in-range constants. In particular, consider: 383 // A[0][i]. We cannot know that the user isn't doing invalid things like 384 // allowing i to index an out-of-range subscript that accesses A[1]. 385 // 386 // Scalar replacing *just* the outer index of the array is probably not 387 // going to be a win anyway, so just give up. 388 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) { 389 uint64_t NumElements; 390 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I)) 391 NumElements = SubArrayTy->getNumElements(); 392 else 393 NumElements = cast<VectorType>(*I)->getNumElements(); 394 395 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 396 if (!IdxVal) return 0; 397 if (IdxVal->getZExtValue() >= NumElements) 398 return 0; 399 IsAllZeroIndices &= IdxVal->isZero(); 400 } 401 402 } else { 403 IsAllZeroIndices = 0; 404 405 // If this is an array index and the index is not constant, we cannot 406 // promote... that is unless the array has exactly one or two elements in 407 // it, in which case we CAN promote it, but we have to canonicalize this 408 // out if this is the only problem. 409 if ((NumElements == 1 || NumElements == 2) && 410 AllUsersAreLoads(GEPI)) 411 return 1; // Canonicalization required! 412 return 0; 413 } 414 } 415 416 // If there are any non-simple uses of this getelementptr, make sure to reject 417 // them. 418 return isSafeElementUse(GEPI, IsAllZeroIndices, AI); 419} 420 421/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 422/// intrinsic can be promoted by SROA. At this point, we know that the operand 423/// of the memintrinsic is a pointer to the beginning of the allocation. 424bool SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI){ 425 // If not constant length, give up. 426 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 427 if (!Length) return false; 428 429 // If not the whole aggregate, give up. 430 const TargetData &TD = getAnalysis<TargetData>(); 431 if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType())) 432 return false; 433 434 // We only know about memcpy/memset/memmove. 435 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 436 return false; 437 // Otherwise, we can transform it. 438 return true; 439} 440 441/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 442/// are 443bool SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI) { 444 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 445 UI != E; ++UI) { 446 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 447 if (!isSafeUseOfBitCastedAllocation(BCU, AI)) 448 return false; 449 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 450 if (!isSafeMemIntrinsicOnAllocation(MI, AI)) 451 return false; 452 } else { 453 return false; 454 } 455 } 456 return true; 457} 458 459/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 460/// to its first element. Transform users of the cast to use the new values 461/// instead. 462void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 463 SmallVector<AllocaInst*, 32> &NewElts) { 464 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 465 const TargetData &TD = getAnalysis<TargetData>(); 466 467 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 468 while (UI != UE) { 469 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) { 470 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 471 ++UI; 472 BCU->eraseFromParent(); 473 continue; 474 } 475 476 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split 477 // into one per element. 478 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI); 479 480 // If it's not a mem intrinsic, it must be some other user of a gep of the 481 // first pointer. Just leave these alone. 482 if (!MI) { 483 ++UI; 484 continue; 485 } 486 487 // If this is a memcpy/memmove, construct the other pointer as the 488 // appropriate type. 489 Value *OtherPtr = 0; 490 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 491 if (BCInst == MCI->getRawDest()) 492 OtherPtr = MCI->getRawSource(); 493 else { 494 assert(BCInst == MCI->getRawSource()); 495 OtherPtr = MCI->getRawDest(); 496 } 497 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 498 if (BCInst == MMI->getRawDest()) 499 OtherPtr = MMI->getRawSource(); 500 else { 501 assert(BCInst == MMI->getRawSource()); 502 OtherPtr = MMI->getRawDest(); 503 } 504 } 505 506 // If there is an other pointer, we want to convert it to the same pointer 507 // type as AI has, so we can GEP through it. 508 if (OtherPtr) { 509 // It is likely that OtherPtr is a bitcast, if so, remove it. 510 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 511 OtherPtr = BC->getOperand(0); 512 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 513 if (BCE->getOpcode() == Instruction::BitCast) 514 OtherPtr = BCE->getOperand(0); 515 516 // If the pointer is not the right type, insert a bitcast to the right 517 // type. 518 if (OtherPtr->getType() != AI->getType()) 519 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 520 MI); 521 } 522 523 // Process each element of the aggregate. 524 Value *TheFn = MI->getOperand(0); 525 const Type *BytePtrTy = MI->getRawDest()->getType(); 526 bool SROADest = MI->getRawDest() == BCInst; 527 528 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 529 // If this is a memcpy/memmove, emit a GEP of the other element address. 530 Value *OtherElt = 0; 531 if (OtherPtr) { 532 OtherElt = new GetElementPtrInst(OtherPtr, Zero, 533 ConstantInt::get(Type::Int32Ty, i), 534 OtherPtr->getNameStr()+"."+utostr(i), 535 MI); 536 } 537 538 Value *EltPtr = NewElts[i]; 539 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 540 541 // If we got down to a scalar, insert a load or store as appropriate. 542 if (EltTy->isFirstClassType()) { 543 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 544 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 545 MI); 546 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 547 continue; 548 } else { 549 assert(isa<MemSetInst>(MI)); 550 551 // If the stored element is zero (common case), just store a null 552 // constant. 553 Constant *StoreVal; 554 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 555 if (CI->isZero()) { 556 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 557 } else { 558 // If EltTy is a packed type, get the element type. 559 const Type *ValTy = EltTy; 560 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 561 ValTy = VTy->getElementType(); 562 563 // Construct an integer with the right value. 564 unsigned EltSize = TD.getTypeSize(ValTy); 565 APInt OneVal(EltSize*8, CI->getZExtValue()); 566 APInt TotalVal(OneVal); 567 // Set each byte. 568 for (unsigned i = 0; i != EltSize-1; ++i) { 569 TotalVal = TotalVal.shl(8); 570 TotalVal |= OneVal; 571 } 572 573 // Convert the integer value to the appropriate type. 574 StoreVal = ConstantInt::get(TotalVal); 575 if (isa<PointerType>(ValTy)) 576 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 577 else if (ValTy->isFloatingPoint()) 578 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 579 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 580 581 // If the requested value was a vector constant, create it. 582 if (EltTy != ValTy) { 583 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 584 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 585 StoreVal = ConstantVector::get(&Elts[0], NumElts); 586 } 587 } 588 new StoreInst(StoreVal, EltPtr, MI); 589 continue; 590 } 591 // Otherwise, if we're storing a byte variable, use a memset call for 592 // this element. 593 } 594 } 595 596 // Cast the element pointer to BytePtrTy. 597 if (EltPtr->getType() != BytePtrTy) 598 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 599 600 // Cast the other pointer (if we have one) to BytePtrTy. 601 if (OtherElt && OtherElt->getType() != BytePtrTy) 602 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 603 MI); 604 605 unsigned EltSize = TD.getTypeSize(EltTy); 606 607 // Finally, insert the meminst for this element. 608 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 609 Value *Ops[] = { 610 SROADest ? EltPtr : OtherElt, // Dest ptr 611 SROADest ? OtherElt : EltPtr, // Src ptr 612 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 613 Zero // Align 614 }; 615 new CallInst(TheFn, Ops, 4, "", MI); 616 } else { 617 assert(isa<MemSetInst>(MI)); 618 Value *Ops[] = { 619 EltPtr, MI->getOperand(2), // Dest, Value, 620 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 621 Zero // Align 622 }; 623 new CallInst(TheFn, Ops, 4, "", MI); 624 } 625 } 626 627 // Finally, MI is now dead, as we've modified its actions to occur on all of 628 // the elements of the aggregate. 629 ++UI; 630 MI->eraseFromParent(); 631 } 632} 633 634 635/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 636/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 637/// or 1 if safe after canonicalization has been performed. 638/// 639int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 640 // Loop over the use list of the alloca. We can only transform it if all of 641 // the users are safe to transform. 642 // 643 int isSafe = 3; 644 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 645 I != E; ++I) { 646 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I), AI); 647 if (isSafe == 0) { 648 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 649 return 0; 650 } 651 } 652 // If we require cleanup, isSafe is now 1, otherwise it is 3. 653 return isSafe; 654} 655 656/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 657/// allocation, but only if cleaned up, perform the cleanups required. 658void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 659 // At this point, we know that the end result will be SROA'd and promoted, so 660 // we can insert ugly code if required so long as sroa+mem2reg will clean it 661 // up. 662 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 663 UI != E; ) { 664 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(*UI++); 665 gep_type_iterator I = gep_type_begin(GEPI); 666 ++I; 667 668 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 669 uint64_t NumElements = AT->getNumElements(); 670 671 if (!isa<ConstantInt>(I.getOperand())) { 672 if (NumElements == 1) { 673 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 674 } else { 675 assert(NumElements == 2 && "Unhandled case!"); 676 // All users of the GEP must be loads. At each use of the GEP, insert 677 // two loads of the appropriate indexed GEP and select between them. 678 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 679 Constant::getNullValue(I.getOperand()->getType()), 680 "isone", GEPI); 681 // Insert the new GEP instructions, which are properly indexed. 682 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 683 Indices[1] = Constant::getNullValue(Type::Int32Ty); 684 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), 685 &Indices[0], Indices.size(), 686 GEPI->getName()+".0", GEPI); 687 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 688 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), 689 &Indices[0], Indices.size(), 690 GEPI->getName()+".1", GEPI); 691 // Replace all loads of the variable index GEP with loads from both 692 // indexes and a select. 693 while (!GEPI->use_empty()) { 694 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 695 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 696 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 697 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI); 698 LI->replaceAllUsesWith(R); 699 LI->eraseFromParent(); 700 } 701 GEPI->eraseFromParent(); 702 } 703 } 704 } 705 } 706} 707 708/// MergeInType - Add the 'In' type to the accumulated type so far. If the 709/// types are incompatible, return true, otherwise update Accum and return 710/// false. 711/// 712/// There are three cases we handle here: 713/// 1) An effectively-integer union, where the pieces are stored into as 714/// smaller integers (common with byte swap and other idioms). 715/// 2) A union of vector types of the same size and potentially its elements. 716/// Here we turn element accesses into insert/extract element operations. 717/// 3) A union of scalar types, such as int/float or int/pointer. Here we 718/// merge together into integers, allowing the xform to work with #1 as 719/// well. 720static bool MergeInType(const Type *In, const Type *&Accum, 721 const TargetData &TD) { 722 // If this is our first type, just use it. 723 const VectorType *PTy; 724 if (Accum == Type::VoidTy || In == Accum) { 725 Accum = In; 726 } else if (In == Type::VoidTy) { 727 // Noop. 728 } else if (In->isInteger() && Accum->isInteger()) { // integer union. 729 // Otherwise pick whichever type is larger. 730 if (cast<IntegerType>(In)->getBitWidth() > 731 cast<IntegerType>(Accum)->getBitWidth()) 732 Accum = In; 733 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) { 734 // Pointer unions just stay as one of the pointers. 735 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) { 736 if ((PTy = dyn_cast<VectorType>(Accum)) && 737 PTy->getElementType() == In) { 738 // Accum is a vector, and we are accessing an element: ok. 739 } else if ((PTy = dyn_cast<VectorType>(In)) && 740 PTy->getElementType() == Accum) { 741 // In is a vector, and accum is an element: ok, remember In. 742 Accum = In; 743 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) && 744 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) { 745 // Two vectors of the same size: keep Accum. 746 } else { 747 // Cannot insert an short into a <4 x int> or handle 748 // <2 x int> -> <4 x int> 749 return true; 750 } 751 } else { 752 // Pointer/FP/Integer unions merge together as integers. 753 switch (Accum->getTypeID()) { 754 case Type::PointerTyID: Accum = TD.getIntPtrType(); break; 755 case Type::FloatTyID: Accum = Type::Int32Ty; break; 756 case Type::DoubleTyID: Accum = Type::Int64Ty; break; 757 default: 758 assert(Accum->isInteger() && "Unknown FP type!"); 759 break; 760 } 761 762 switch (In->getTypeID()) { 763 case Type::PointerTyID: In = TD.getIntPtrType(); break; 764 case Type::FloatTyID: In = Type::Int32Ty; break; 765 case Type::DoubleTyID: In = Type::Int64Ty; break; 766 default: 767 assert(In->isInteger() && "Unknown FP type!"); 768 break; 769 } 770 return MergeInType(In, Accum, TD); 771 } 772 return false; 773} 774 775/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least 776/// as big as the specified type. If there is no suitable type, this returns 777/// null. 778const Type *getUIntAtLeastAsBitAs(unsigned NumBits) { 779 if (NumBits > 64) return 0; 780 if (NumBits > 32) return Type::Int64Ty; 781 if (NumBits > 16) return Type::Int32Ty; 782 if (NumBits > 8) return Type::Int16Ty; 783 return Type::Int8Ty; 784} 785 786/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 787/// single scalar integer type, return that type. Further, if the use is not 788/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 789/// there are no uses of this pointer, return Type::VoidTy to differentiate from 790/// failure. 791/// 792const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 793 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 794 const TargetData &TD = getAnalysis<TargetData>(); 795 const PointerType *PTy = cast<PointerType>(V->getType()); 796 797 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 798 Instruction *User = cast<Instruction>(*UI); 799 800 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 801 if (MergeInType(LI->getType(), UsedType, TD)) 802 return 0; 803 804 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 805 // Storing the pointer, not into the value? 806 if (SI->getOperand(0) == V) return 0; 807 808 // NOTE: We could handle storing of FP imms into integers here! 809 810 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD)) 811 return 0; 812 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 813 IsNotTrivial = true; 814 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 815 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0; 816 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 817 // Check to see if this is stepping over an element: GEP Ptr, int C 818 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 819 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 820 unsigned ElSize = TD.getTypeSize(PTy->getElementType()); 821 unsigned BitOffset = Idx*ElSize*8; 822 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 823 824 IsNotTrivial = true; 825 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 826 if (SubElt == 0) return 0; 827 if (SubElt != Type::VoidTy && SubElt->isInteger()) { 828 const Type *NewTy = 829 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset); 830 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0; 831 continue; 832 } 833 } else if (GEP->getNumOperands() == 3 && 834 isa<ConstantInt>(GEP->getOperand(1)) && 835 isa<ConstantInt>(GEP->getOperand(2)) && 836 cast<Constant>(GEP->getOperand(1))->isNullValue()) { 837 // We are stepping into an element, e.g. a structure or an array: 838 // GEP Ptr, int 0, uint C 839 const Type *AggTy = PTy->getElementType(); 840 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 841 842 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 843 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 844 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) { 845 // Getting an element of the packed vector. 846 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range. 847 848 // Merge in the vector type. 849 if (MergeInType(VectorTy, UsedType, TD)) return 0; 850 851 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 852 if (SubTy == 0) return 0; 853 854 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 855 return 0; 856 857 // We'll need to change this to an insert/extract element operation. 858 IsNotTrivial = true; 859 continue; // Everything looks ok 860 861 } else if (isa<StructType>(AggTy)) { 862 // Structs are always ok. 863 } else { 864 return 0; 865 } 866 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8); 867 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0; 868 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 869 if (SubTy == 0) return 0; 870 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 871 return 0; 872 continue; // Everything looks ok 873 } 874 return 0; 875 } else { 876 // Cannot handle this! 877 return 0; 878 } 879 } 880 881 return UsedType; 882} 883 884/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 885/// predicate and is non-trivial. Convert it to something that can be trivially 886/// promoted into a register by mem2reg. 887void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 888 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = " 889 << *ActualTy << "\n"; 890 ++NumConverted; 891 892 BasicBlock *EntryBlock = AI->getParent(); 893 assert(EntryBlock == &EntryBlock->getParent()->front() && 894 "Not in the entry block!"); 895 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 896 897 // Create and insert the alloca. 898 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(), 899 EntryBlock->begin()); 900 ConvertUsesToScalar(AI, NewAI, 0); 901 delete AI; 902} 903 904 905/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 906/// directly. This happens when we are converting an "integer union" to a 907/// single integer scalar, or when we are converting a "vector union" to a 908/// vector with insert/extractelement instructions. 909/// 910/// Offset is an offset from the original alloca, in bits that need to be 911/// shifted to the right. By the end of this, there should be no uses of Ptr. 912void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 913 bool isVectorInsert = isa<VectorType>(NewAI->getType()->getElementType()); 914 const TargetData &TD = getAnalysis<TargetData>(); 915 while (!Ptr->use_empty()) { 916 Instruction *User = cast<Instruction>(Ptr->use_back()); 917 918 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 919 // The load is a bit extract from NewAI shifted right by Offset bits. 920 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 921 if (NV->getType() != LI->getType()) { 922 if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) { 923 // If the result alloca is a vector type, this is either an element 924 // access or a bitcast to another vector type. 925 if (isa<VectorType>(LI->getType())) { 926 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 927 } else { 928 // Must be an element access. 929 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 930 NV = new ExtractElementInst( 931 NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI); 932 } 933 } else if (isa<PointerType>(NV->getType())) { 934 assert(isa<PointerType>(LI->getType())); 935 // Must be ptr->ptr cast. Anything else would result in NV being 936 // an integer. 937 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 938 } else { 939 assert(NV->getType()->isInteger() && "Unknown promotion!"); 940 if (Offset && Offset < TD.getTypeSize(NV->getType())*8) { 941 NV = BinaryOperator::createLShr(NV, 942 ConstantInt::get(NV->getType(), Offset), 943 LI->getName(), LI); 944 } 945 946 // If the result is an integer, this is a trunc or bitcast. 947 if (LI->getType()->isInteger()) { 948 NV = CastInst::createTruncOrBitCast(NV, LI->getType(), 949 LI->getName(), LI); 950 } else if (LI->getType()->isFloatingPoint()) { 951 // If needed, truncate the integer to the appropriate size. 952 if (NV->getType()->getPrimitiveSizeInBits() > 953 LI->getType()->getPrimitiveSizeInBits()) { 954 switch (LI->getType()->getTypeID()) { 955 default: assert(0 && "Unknown FP type!"); 956 case Type::FloatTyID: 957 NV = new TruncInst(NV, Type::Int32Ty, LI->getName(), LI); 958 break; 959 case Type::DoubleTyID: 960 NV = new TruncInst(NV, Type::Int64Ty, LI->getName(), LI); 961 break; 962 } 963 } 964 965 // Then do a bitcast. 966 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 967 } else { 968 // Otherwise must be a pointer. 969 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI); 970 } 971 } 972 } 973 LI->replaceAllUsesWith(NV); 974 LI->eraseFromParent(); 975 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 976 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 977 978 // Convert the stored type to the actual type, shift it left to insert 979 // then 'or' into place. 980 Value *SV = SI->getOperand(0); 981 const Type *AllocaType = NewAI->getType()->getElementType(); 982 if (SV->getType() != AllocaType) { 983 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 984 985 if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) { 986 // If the result alloca is a vector type, this is either an element 987 // access or a bitcast to another vector type. 988 if (isa<VectorType>(SV->getType())) { 989 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 990 } else { 991 // Must be an element insertion. 992 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 993 SV = new InsertElementInst(Old, SV, 994 ConstantInt::get(Type::Int32Ty, Elt), 995 "tmp", SI); 996 } 997 } else { 998 // If SV is a float, convert it to the appropriate integer type. 999 // If it is a pointer, do the same, and also handle ptr->ptr casts 1000 // here. 1001 switch (SV->getType()->getTypeID()) { 1002 default: 1003 assert(!SV->getType()->isFloatingPoint() && "Unknown FP type!"); 1004 break; 1005 case Type::FloatTyID: 1006 SV = new BitCastInst(SV, Type::Int32Ty, SV->getName(), SI); 1007 break; 1008 case Type::DoubleTyID: 1009 SV = new BitCastInst(SV, Type::Int64Ty, SV->getName(), SI); 1010 break; 1011 case Type::PointerTyID: 1012 if (isa<PointerType>(AllocaType)) 1013 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1014 else 1015 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI); 1016 break; 1017 } 1018 1019 unsigned SrcSize = TD.getTypeSize(SV->getType())*8; 1020 1021 // Always zero extend the value if needed. 1022 if (SV->getType() != AllocaType) 1023 SV = CastInst::createZExtOrBitCast(SV, AllocaType, 1024 SV->getName(), SI); 1025 if (Offset && Offset < AllocaType->getPrimitiveSizeInBits()) 1026 SV = BinaryOperator::createShl(SV, 1027 ConstantInt::get(SV->getType(), Offset), 1028 SV->getName()+".adj", SI); 1029 // Mask out the bits we are about to insert from the old value. 1030 unsigned TotalBits = TD.getTypeSize(SV->getType())*8; 1031 if (TotalBits != SrcSize) { 1032 assert(TotalBits > SrcSize); 1033 uint64_t Mask = ~(((1ULL << SrcSize)-1) << Offset); 1034 Mask = Mask & cast<IntegerType>(SV->getType())->getBitMask(); 1035 Old = BinaryOperator::createAnd(Old, 1036 ConstantInt::get(Old->getType(), Mask), 1037 Old->getName()+".mask", SI); 1038 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI); 1039 } 1040 } 1041 } 1042 new StoreInst(SV, NewAI, SI); 1043 SI->eraseFromParent(); 1044 1045 } else if (CastInst *CI = dyn_cast<CastInst>(User)) { 1046 unsigned NewOff = Offset; 1047 const TargetData &TD = getAnalysis<TargetData>(); 1048 if (TD.isBigEndian() && !isVectorInsert) { 1049 // Adjust the pointer. For example, storing 16-bits into a 32-bit 1050 // alloca with just a cast makes it modify the top 16-bits. 1051 const Type *SrcTy = cast<PointerType>(Ptr->getType())->getElementType(); 1052 const Type *DstTy = cast<PointerType>(CI->getType())->getElementType(); 1053 int PtrDiffBits = TD.getTypeSize(SrcTy)*8-TD.getTypeSize(DstTy)*8; 1054 NewOff += PtrDiffBits; 1055 } 1056 ConvertUsesToScalar(CI, NewAI, NewOff); 1057 CI->eraseFromParent(); 1058 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1059 const PointerType *AggPtrTy = 1060 cast<PointerType>(GEP->getOperand(0)->getType()); 1061 const TargetData &TD = getAnalysis<TargetData>(); 1062 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8; 1063 1064 // Check to see if this is stepping over an element: GEP Ptr, int C 1065 unsigned NewOffset = Offset; 1066 if (GEP->getNumOperands() == 2) { 1067 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 1068 unsigned BitOffset = Idx*AggSizeInBits; 1069 1070 if (TD.isLittleEndian() || isVectorInsert) 1071 NewOffset += BitOffset; 1072 else 1073 NewOffset -= BitOffset; 1074 1075 } else if (GEP->getNumOperands() == 3) { 1076 // We know that operand #2 is zero. 1077 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1078 const Type *AggTy = AggPtrTy->getElementType(); 1079 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 1080 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8; 1081 1082 if (TD.isLittleEndian() || isVectorInsert) 1083 NewOffset += ElSizeBits*Idx; 1084 else 1085 NewOffset += AggSizeInBits-ElSizeBits*(Idx+1); 1086 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 1087 unsigned EltBitOffset = 1088 TD.getStructLayout(STy)->getElementOffset(Idx)*8; 1089 1090 if (TD.isLittleEndian() || isVectorInsert) 1091 NewOffset += EltBitOffset; 1092 else { 1093 const PointerType *ElPtrTy = cast<PointerType>(GEP->getType()); 1094 unsigned ElSizeBits = TD.getTypeSize(ElPtrTy->getElementType())*8; 1095 NewOffset += AggSizeInBits-(EltBitOffset+ElSizeBits); 1096 } 1097 1098 } else { 1099 assert(0 && "Unsupported operation!"); 1100 abort(); 1101 } 1102 } else { 1103 assert(0 && "Unsupported operation!"); 1104 abort(); 1105 } 1106 ConvertUsesToScalar(GEP, NewAI, NewOffset); 1107 GEP->eraseFromParent(); 1108 } else { 1109 assert(0 && "Unsupported operation!"); 1110 abort(); 1111 } 1112 } 1113} 1114