ScalarReplAggregates.cpp revision 3822ff5c71478c7c90a50ca57045fb676fcb5005
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#include "llvm/Transforms/Scalar.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/Function.h" 26#include "llvm/Pass.h" 27#include "llvm/Instructions.h" 28#include "llvm/Analysis/Dominators.h" 29#include "llvm/Target/TargetData.h" 30#include "llvm/Transforms/Utils/PromoteMemToReg.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/GetElementPtrTypeIterator.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/Support/Compiler.h" 35#include "llvm/ADT/Statistic.h" 36#include "llvm/ADT/StringExtras.h" 37#include <iostream> 38using namespace llvm; 39 40namespace { 41 Statistic<> NumReplaced("scalarrepl", "Number of allocas broken up"); 42 Statistic<> NumPromoted("scalarrepl", "Number of allocas promoted"); 43 Statistic<> NumConverted("scalarrepl", 44 "Number of aggregates converted to scalar"); 45 46 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 47 bool runOnFunction(Function &F); 48 49 bool performScalarRepl(Function &F); 50 bool performPromotion(Function &F); 51 52 // getAnalysisUsage - This pass does not require any passes, but we know it 53 // will not alter the CFG, so say so. 54 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 55 AU.addRequired<DominatorTree>(); 56 AU.addRequired<DominanceFrontier>(); 57 AU.addRequired<TargetData>(); 58 AU.setPreservesCFG(); 59 } 60 61 private: 62 int isSafeElementUse(Value *Ptr); 63 int isSafeUseOfAllocation(Instruction *User); 64 int isSafeAllocaToScalarRepl(AllocationInst *AI); 65 void CanonicalizeAllocaUsers(AllocationInst *AI); 66 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 67 68 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 69 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 70 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 71 }; 72 73 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 74} 75 76// Public interface to the ScalarReplAggregates pass 77FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); } 78 79 80bool SROA::runOnFunction(Function &F) { 81 bool Changed = performPromotion(F); 82 while (1) { 83 bool LocalChange = performScalarRepl(F); 84 if (!LocalChange) break; // No need to repromote if no scalarrepl 85 Changed = true; 86 LocalChange = performPromotion(F); 87 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 88 } 89 90 return Changed; 91} 92 93 94bool SROA::performPromotion(Function &F) { 95 std::vector<AllocaInst*> Allocas; 96 const TargetData &TD = getAnalysis<TargetData>(); 97 DominatorTree &DT = getAnalysis<DominatorTree>(); 98 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 99 100 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 101 102 bool Changed = false; 103 104 while (1) { 105 Allocas.clear(); 106 107 // Find allocas that are safe to promote, by looking at all instructions in 108 // the entry node 109 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 110 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 111 if (isAllocaPromotable(AI, TD)) 112 Allocas.push_back(AI); 113 114 if (Allocas.empty()) break; 115 116 PromoteMemToReg(Allocas, DT, DF, TD); 117 NumPromoted += Allocas.size(); 118 Changed = true; 119 } 120 121 return Changed; 122} 123 124// performScalarRepl - This algorithm is a simple worklist driven algorithm, 125// which runs on all of the malloc/alloca instructions in the function, removing 126// them if they are only used by getelementptr instructions. 127// 128bool SROA::performScalarRepl(Function &F) { 129 std::vector<AllocationInst*> WorkList; 130 131 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 132 BasicBlock &BB = F.getEntryBlock(); 133 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 134 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 135 WorkList.push_back(A); 136 137 // Process the worklist 138 bool Changed = false; 139 while (!WorkList.empty()) { 140 AllocationInst *AI = WorkList.back(); 141 WorkList.pop_back(); 142 143 // If we can turn this aggregate value (potentially with casts) into a 144 // simple scalar value that can be mem2reg'd into a register value. 145 bool IsNotTrivial = false; 146 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 147 if (IsNotTrivial && ActualType != Type::VoidTy) { 148 ConvertToScalar(AI, ActualType); 149 Changed = true; 150 continue; 151 } 152 153 // We cannot transform the allocation instruction if it is an array 154 // allocation (allocations OF arrays are ok though), and an allocation of a 155 // scalar value cannot be decomposed at all. 156 // 157 if (AI->isArrayAllocation() || 158 (!isa<StructType>(AI->getAllocatedType()) && 159 !isa<ArrayType>(AI->getAllocatedType()))) continue; 160 161 // Check that all of the users of the allocation are capable of being 162 // transformed. 163 switch (isSafeAllocaToScalarRepl(AI)) { 164 default: assert(0 && "Unexpected value!"); 165 case 0: // Not safe to scalar replace. 166 continue; 167 case 1: // Safe, but requires cleanup/canonicalizations first 168 CanonicalizeAllocaUsers(AI); 169 case 3: // Safe to scalar replace. 170 break; 171 } 172 173 DEBUG(std::cerr << "Found inst to xform: " << *AI); 174 Changed = true; 175 176 std::vector<AllocaInst*> ElementAllocas; 177 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 178 ElementAllocas.reserve(ST->getNumContainedTypes()); 179 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 180 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 181 AI->getAlignment(), 182 AI->getName() + "." + utostr(i), AI); 183 ElementAllocas.push_back(NA); 184 WorkList.push_back(NA); // Add to worklist for recursive processing 185 } 186 } else { 187 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 188 ElementAllocas.reserve(AT->getNumElements()); 189 const Type *ElTy = AT->getElementType(); 190 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 191 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 192 AI->getName() + "." + utostr(i), AI); 193 ElementAllocas.push_back(NA); 194 WorkList.push_back(NA); // Add to worklist for recursive processing 195 } 196 } 197 198 // Now that we have created the alloca instructions that we want to use, 199 // expand the getelementptr instructions to use them. 200 // 201 while (!AI->use_empty()) { 202 Instruction *User = cast<Instruction>(AI->use_back()); 203 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 204 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 205 unsigned Idx = 206 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 207 208 assert(Idx < ElementAllocas.size() && "Index out of range?"); 209 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 210 211 Value *RepValue; 212 if (GEPI->getNumOperands() == 3) { 213 // Do not insert a new getelementptr instruction with zero indices, only 214 // to have it optimized out later. 215 RepValue = AllocaToUse; 216 } else { 217 // We are indexing deeply into the structure, so we still need a 218 // getelement ptr instruction to finish the indexing. This may be 219 // expanded itself once the worklist is rerun. 220 // 221 std::string OldName = GEPI->getName(); // Steal the old name. 222 std::vector<Value*> NewArgs; 223 NewArgs.push_back(Constant::getNullValue(Type::IntTy)); 224 NewArgs.insert(NewArgs.end(), GEPI->op_begin()+3, GEPI->op_end()); 225 GEPI->setName(""); 226 RepValue = new GetElementPtrInst(AllocaToUse, NewArgs, OldName, GEPI); 227 } 228 229 // Move all of the users over to the new GEP. 230 GEPI->replaceAllUsesWith(RepValue); 231 // Delete the old GEP 232 GEPI->eraseFromParent(); 233 } 234 235 // Finally, delete the Alloca instruction 236 AI->getParent()->getInstList().erase(AI); 237 NumReplaced++; 238 } 239 240 return Changed; 241} 242 243 244/// isSafeElementUse - Check to see if this use is an allowed use for a 245/// getelementptr instruction of an array aggregate allocation. 246/// 247int SROA::isSafeElementUse(Value *Ptr) { 248 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 249 I != E; ++I) { 250 Instruction *User = cast<Instruction>(*I); 251 switch (User->getOpcode()) { 252 case Instruction::Load: break; 253 case Instruction::Store: 254 // Store is ok if storing INTO the pointer, not storing the pointer 255 if (User->getOperand(0) == Ptr) return 0; 256 break; 257 case Instruction::GetElementPtr: { 258 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 259 if (GEP->getNumOperands() > 1) { 260 if (!isa<Constant>(GEP->getOperand(1)) || 261 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 262 return 0; // Using pointer arithmetic to navigate the array... 263 } 264 if (!isSafeElementUse(GEP)) return 0; 265 break; 266 } 267 default: 268 DEBUG(std::cerr << " Transformation preventing inst: " << *User); 269 return 0; 270 } 271 } 272 return 3; // All users look ok :) 273} 274 275/// AllUsersAreLoads - Return true if all users of this value are loads. 276static bool AllUsersAreLoads(Value *Ptr) { 277 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 278 I != E; ++I) 279 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 280 return false; 281 return true; 282} 283 284/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 285/// aggregate allocation. 286/// 287int SROA::isSafeUseOfAllocation(Instruction *User) { 288 if (!isa<GetElementPtrInst>(User)) return 0; 289 290 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 291 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 292 293 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 294 if (I == E || 295 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) 296 return 0; 297 298 ++I; 299 if (I == E) return 0; // ran out of GEP indices?? 300 301 // If this is a use of an array allocation, do a bit more checking for sanity. 302 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 303 uint64_t NumElements = AT->getNumElements(); 304 305 if (isa<ConstantInt>(I.getOperand())) { 306 // Check to make sure that index falls within the array. If not, 307 // something funny is going on, so we won't do the optimization. 308 // 309 if (cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue() >= NumElements) 310 return 0; 311 312 // We cannot scalar repl this level of the array unless any array 313 // sub-indices are in-range constants. In particular, consider: 314 // A[0][i]. We cannot know that the user isn't doing invalid things like 315 // allowing i to index an out-of-range subscript that accesses A[1]. 316 // 317 // Scalar replacing *just* the outer index of the array is probably not 318 // going to be a win anyway, so just give up. 319 for (++I; I != E && (isa<ArrayType>(*I) || isa<PackedType>(*I)); ++I) { 320 uint64_t NumElements; 321 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I)) 322 NumElements = SubArrayTy->getNumElements(); 323 else 324 NumElements = cast<PackedType>(*I)->getNumElements(); 325 326 if (!isa<ConstantInt>(I.getOperand())) return 0; 327 if (cast<ConstantInt>(I.getOperand())->getZExtValue() >= NumElements) 328 return 0; 329 } 330 331 } else { 332 // If this is an array index and the index is not constant, we cannot 333 // promote... that is unless the array has exactly one or two elements in 334 // it, in which case we CAN promote it, but we have to canonicalize this 335 // out if this is the only problem. 336 if ((NumElements == 1 || NumElements == 2) && 337 AllUsersAreLoads(GEPI)) 338 return 1; // Canonicalization required! 339 return 0; 340 } 341 } 342 343 // If there are any non-simple uses of this getelementptr, make sure to reject 344 // them. 345 return isSafeElementUse(GEPI); 346} 347 348/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 349/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 350/// or 1 if safe after canonicalization has been performed. 351/// 352int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 353 // Loop over the use list of the alloca. We can only transform it if all of 354 // the users are safe to transform. 355 // 356 int isSafe = 3; 357 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 358 I != E; ++I) { 359 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I)); 360 if (isSafe == 0) { 361 DEBUG(std::cerr << "Cannot transform: " << *AI << " due to user: " 362 << **I); 363 return 0; 364 } 365 } 366 // If we require cleanup, isSafe is now 1, otherwise it is 3. 367 return isSafe; 368} 369 370/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 371/// allocation, but only if cleaned up, perform the cleanups required. 372void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 373 // At this point, we know that the end result will be SROA'd and promoted, so 374 // we can insert ugly code if required so long as sroa+mem2reg will clean it 375 // up. 376 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 377 UI != E; ) { 378 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(*UI++); 379 gep_type_iterator I = gep_type_begin(GEPI); 380 ++I; 381 382 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 383 uint64_t NumElements = AT->getNumElements(); 384 385 if (!isa<ConstantInt>(I.getOperand())) { 386 if (NumElements == 1) { 387 GEPI->setOperand(2, Constant::getNullValue(Type::IntTy)); 388 } else { 389 assert(NumElements == 2 && "Unhandled case!"); 390 // All users of the GEP must be loads. At each use of the GEP, insert 391 // two loads of the appropriate indexed GEP and select between them. 392 Value *IsOne = BinaryOperator::createSetNE(I.getOperand(), 393 Constant::getNullValue(I.getOperand()->getType()), 394 "isone", GEPI); 395 // Insert the new GEP instructions, which are properly indexed. 396 std::vector<Value*> Indices(GEPI->op_begin()+1, GEPI->op_end()); 397 Indices[1] = Constant::getNullValue(Type::IntTy); 398 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices, 399 GEPI->getName()+".0", GEPI); 400 Indices[1] = ConstantInt::get(Type::IntTy, 1); 401 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices, 402 GEPI->getName()+".1", GEPI); 403 // Replace all loads of the variable index GEP with loads from both 404 // indexes and a select. 405 while (!GEPI->use_empty()) { 406 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 407 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 408 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 409 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI); 410 LI->replaceAllUsesWith(R); 411 LI->eraseFromParent(); 412 } 413 GEPI->eraseFromParent(); 414 } 415 } 416 } 417 } 418} 419 420/// MergeInType - Add the 'In' type to the accumulated type so far. If the 421/// types are incompatible, return true, otherwise update Accum and return 422/// false. 423/// 424/// There are two cases we handle here: 425/// 1) An effectively integer union, where the pieces are stored into as 426/// smaller integers (common with byte swap and other idioms). 427/// 2) A union of a vector and its elements. Here we turn element accesses 428/// into insert/extract element operations. 429static bool MergeInType(const Type *In, const Type *&Accum, 430 const TargetData &TD) { 431 // If this is our first type, just use it. 432 const PackedType *PTy; 433 if (Accum == Type::VoidTy || In == Accum) { 434 Accum = In; 435 } else if (In->isIntegral() && Accum->isIntegral()) { // integer union. 436 // Otherwise pick whichever type is larger. 437 if (In->getTypeID() > Accum->getTypeID()) 438 Accum = In; 439 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) { 440 // Pointer unions just stay as one of the pointers. 441 } else if ((PTy = dyn_cast<PackedType>(Accum)) && 442 PTy->getElementType() == In) { 443 // Accum is a vector, and we are accessing an element: ok. 444 } else if ((PTy = dyn_cast<PackedType>(In)) && 445 PTy->getElementType() == Accum) { 446 // In is a vector, and accum is an element: ok, remember In. 447 Accum = In; 448 } else if (isa<PointerType>(In) && Accum->isIntegral()) { 449 // Pointer/Integer unions merge together as integers. 450 return MergeInType(TD.getIntPtrType(), Accum, TD); 451 } else if (isa<PointerType>(Accum) && In->isIntegral()) { 452 // Pointer/Integer unions merge together as integers. 453 Accum = TD.getIntPtrType(); 454 return MergeInType(In, Accum, TD); 455 } else { 456 return true; 457 } 458 return false; 459} 460 461/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least 462/// as big as the specified type. If there is no suitable type, this returns 463/// null. 464const Type *getUIntAtLeastAsBitAs(unsigned NumBits) { 465 if (NumBits > 64) return 0; 466 if (NumBits > 32) return Type::ULongTy; 467 if (NumBits > 16) return Type::UIntTy; 468 if (NumBits > 8) return Type::UShortTy; 469 return Type::UByteTy; 470} 471 472/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 473/// single scalar integer type, return that type. Further, if the use is not 474/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 475/// there are no uses of this pointer, return Type::VoidTy to differentiate from 476/// failure. 477/// 478const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 479 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 480 const TargetData &TD = getAnalysis<TargetData>(); 481 const PointerType *PTy = cast<PointerType>(V->getType()); 482 483 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 484 Instruction *User = cast<Instruction>(*UI); 485 486 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 487 if (MergeInType(LI->getType(), UsedType, TD)) 488 return 0; 489 490 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 491 // Storing the pointer, not the into the value? 492 if (SI->getOperand(0) == V) return 0; 493 494 // NOTE: We could handle storing of FP imms into integers here! 495 496 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD)) 497 return 0; 498 } else if (CastInst *CI = dyn_cast<CastInst>(User)) { 499 if (!isa<PointerType>(CI->getType())) return 0; 500 IsNotTrivial = true; 501 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 502 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0; 503 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 504 // Check to see if this is stepping over an element: GEP Ptr, int C 505 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 506 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 507 unsigned ElSize = TD.getTypeSize(PTy->getElementType()); 508 unsigned BitOffset = Idx*ElSize*8; 509 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 510 511 IsNotTrivial = true; 512 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 513 if (SubElt == 0) return 0; 514 if (SubElt != Type::VoidTy && SubElt->isInteger()) { 515 const Type *NewTy = 516 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset); 517 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0; 518 continue; 519 } 520 } else if (GEP->getNumOperands() == 3 && 521 isa<ConstantInt>(GEP->getOperand(1)) && 522 isa<ConstantInt>(GEP->getOperand(2)) && 523 cast<Constant>(GEP->getOperand(1))->isNullValue()) { 524 // We are stepping into an element, e.g. a structure or an array: 525 // GEP Ptr, int 0, uint C 526 const Type *AggTy = PTy->getElementType(); 527 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 528 529 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 530 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 531 } else if (const PackedType *PackedTy = dyn_cast<PackedType>(AggTy)) { 532 // Getting an element of the packed vector. 533 if (Idx >= PackedTy->getNumElements()) return 0; // Out of range. 534 535 // Merge in the packed type. 536 if (MergeInType(PackedTy, UsedType, TD)) return 0; 537 538 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 539 if (SubTy == 0) return 0; 540 541 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 542 return 0; 543 544 // We'll need to change this to an insert/extract element operation. 545 IsNotTrivial = true; 546 continue; // Everything looks ok 547 548 } else if (isa<StructType>(AggTy)) { 549 // Structs are always ok. 550 } else { 551 return 0; 552 } 553 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8); 554 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0; 555 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 556 if (SubTy == 0) return 0; 557 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 558 return 0; 559 continue; // Everything looks ok 560 } 561 return 0; 562 } else { 563 // Cannot handle this! 564 return 0; 565 } 566 } 567 568 return UsedType; 569} 570 571/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 572/// predicate and is non-trivial. Convert it to something that can be trivially 573/// promoted into a register by mem2reg. 574void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 575 DEBUG(std::cerr << "CONVERT TO SCALAR: " << *AI << " TYPE = " 576 << *ActualTy << "\n"); 577 ++NumConverted; 578 579 BasicBlock *EntryBlock = AI->getParent(); 580 assert(EntryBlock == &EntryBlock->getParent()->front() && 581 "Not in the entry block!"); 582 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 583 584 if (ActualTy->isInteger()) 585 ActualTy = ActualTy->getUnsignedVersion(); 586 587 // Create and insert the alloca. 588 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(), 589 EntryBlock->begin()); 590 ConvertUsesToScalar(AI, NewAI, 0); 591 delete AI; 592} 593 594 595/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 596/// directly. This happens when we are converting an "integer union" to a 597/// single integer scalar, or when we are converting a "vector union" to a 598/// vector with insert/extractelement instructions. 599/// 600/// Offset is an offset from the original alloca, in bits that need to be 601/// shifted to the right. By the end of this, there should be no uses of Ptr. 602void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 603 bool isVectorInsert = isa<PackedType>(NewAI->getType()->getElementType()); 604 const TargetData &TD = getAnalysis<TargetData>(); 605 while (!Ptr->use_empty()) { 606 Instruction *User = cast<Instruction>(Ptr->use_back()); 607 608 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 609 // The load is a bit extract from NewAI shifted right by Offset bits. 610 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 611 if (NV->getType() != LI->getType()) { 612 if (const PackedType *PTy = dyn_cast<PackedType>(NV->getType())) { 613 // Must be an element access. 614 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 615 NV = new ExtractElementInst(NV, ConstantInt::get(Type::UIntTy, Elt), 616 "tmp", LI); 617 } else { 618 if (Offset) { 619 assert(NV->getType()->isInteger() && "Unknown promotion!"); 620 if (Offset < TD.getTypeSize(NV->getType())*8) { 621 NV = new ShiftInst(Instruction::LShr, NV, 622 ConstantInt::get(Type::UByteTy, Offset), 623 LI->getName(), LI); 624 } 625 } else { 626 assert((NV->getType()->isInteger() || 627 isa<PointerType>(NV->getType())) && "Unknown promotion!"); 628 } 629 NV = new CastInst(NV, LI->getType(), LI->getName(), LI); 630 } 631 } 632 LI->replaceAllUsesWith(NV); 633 LI->eraseFromParent(); 634 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 635 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 636 637 // Convert the stored type to the actual type, shift it left to insert 638 // then 'or' into place. 639 Value *SV = SI->getOperand(0); 640 const Type *AllocaType = NewAI->getType()->getElementType(); 641 if (SV->getType() != AllocaType) { 642 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 643 644 if (const PackedType *PTy = dyn_cast<PackedType>(AllocaType)) { 645 // Must be an element insertion. 646 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 647 SV = new InsertElementInst(Old, SV, 648 ConstantInt::get(Type::UIntTy, Elt), 649 "tmp", SI); 650 } else { 651 // If SV is signed, convert it to unsigned, so that the next cast zero 652 // extends the value. 653 if (SV->getType()->isSigned()) 654 SV = new CastInst(SV, SV->getType()->getUnsignedVersion(), 655 SV->getName(), SI); 656 SV = new CastInst(SV, Old->getType(), SV->getName(), SI); 657 if (Offset && Offset < TD.getTypeSize(SV->getType())*8) 658 SV = new ShiftInst(Instruction::Shl, SV, 659 ConstantInt::get(Type::UByteTy, Offset), 660 SV->getName()+".adj", SI); 661 // Mask out the bits we are about to insert from the old value. 662 unsigned TotalBits = TD.getTypeSize(SV->getType())*8; 663 unsigned InsertBits = TD.getTypeSize(SI->getOperand(0)->getType())*8; 664 if (TotalBits != InsertBits) { 665 assert(TotalBits > InsertBits); 666 uint64_t Mask = ~(((1ULL << InsertBits)-1) << Offset); 667 if (TotalBits != 64) 668 Mask = Mask & ((1ULL << TotalBits)-1); 669 Old = BinaryOperator::createAnd(Old, 670 ConstantInt::get(Old->getType(), Mask), 671 Old->getName()+".mask", SI); 672 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI); 673 } 674 } 675 } 676 new StoreInst(SV, NewAI, SI); 677 SI->eraseFromParent(); 678 679 } else if (CastInst *CI = dyn_cast<CastInst>(User)) { 680 unsigned NewOff = Offset; 681 const TargetData &TD = getAnalysis<TargetData>(); 682 if (TD.isBigEndian() && !isVectorInsert) { 683 // Adjust the pointer. For example, storing 16-bits into a 32-bit 684 // alloca with just a cast makes it modify the top 16-bits. 685 const Type *SrcTy = cast<PointerType>(Ptr->getType())->getElementType(); 686 const Type *DstTy = cast<PointerType>(CI->getType())->getElementType(); 687 int PtrDiffBits = TD.getTypeSize(SrcTy)*8-TD.getTypeSize(DstTy)*8; 688 NewOff += PtrDiffBits; 689 } 690 ConvertUsesToScalar(CI, NewAI, NewOff); 691 CI->eraseFromParent(); 692 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 693 const PointerType *AggPtrTy = 694 cast<PointerType>(GEP->getOperand(0)->getType()); 695 const TargetData &TD = getAnalysis<TargetData>(); 696 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8; 697 698 // Check to see if this is stepping over an element: GEP Ptr, int C 699 unsigned NewOffset = Offset; 700 if (GEP->getNumOperands() == 2) { 701 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 702 unsigned BitOffset = Idx*AggSizeInBits; 703 704 if (TD.isLittleEndian() || isVectorInsert) 705 NewOffset += BitOffset; 706 else 707 NewOffset -= BitOffset; 708 709 } else if (GEP->getNumOperands() == 3) { 710 // We know that operand #2 is zero. 711 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 712 const Type *AggTy = AggPtrTy->getElementType(); 713 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 714 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8; 715 716 if (TD.isLittleEndian() || isVectorInsert) 717 NewOffset += ElSizeBits*Idx; 718 else 719 NewOffset += AggSizeInBits-ElSizeBits*(Idx+1); 720 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 721 unsigned EltBitOffset = TD.getStructLayout(STy)->MemberOffsets[Idx]*8; 722 723 if (TD.isLittleEndian() || isVectorInsert) 724 NewOffset += EltBitOffset; 725 else { 726 const PointerType *ElPtrTy = cast<PointerType>(GEP->getType()); 727 unsigned ElSizeBits = TD.getTypeSize(ElPtrTy->getElementType())*8; 728 NewOffset += AggSizeInBits-(EltBitOffset+ElSizeBits); 729 } 730 731 } else { 732 assert(0 && "Unsupported operation!"); 733 abort(); 734 } 735 } else { 736 assert(0 && "Unsupported operation!"); 737 abort(); 738 } 739 ConvertUsesToScalar(GEP, NewAI, NewOffset); 740 GEP->eraseFromParent(); 741 } else { 742 assert(0 && "Unsupported operation!"); 743 abort(); 744 } 745 } 746} 747