ScalarReplAggregates.cpp revision f5990edc877c4e63503c589928a00ec6ec751830
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#include "llvm/Transforms/Scalar.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/Function.h" 26#include "llvm/Pass.h" 27#include "llvm/Instructions.h" 28#include "llvm/Analysis/Dominators.h" 29#include "llvm/Support/GetElementPtrTypeIterator.h" 30#include "llvm/Target/TargetData.h" 31#include "llvm/Transforms/Utils/PromoteMemToReg.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/ADT/StringExtras.h" 35using namespace llvm; 36 37namespace { 38 Statistic<> NumReplaced("scalarrepl", "Number of allocas broken up"); 39 Statistic<> NumPromoted("scalarrepl", "Number of allocas promoted"); 40 41 struct SROA : public FunctionPass { 42 bool runOnFunction(Function &F); 43 44 bool performScalarRepl(Function &F); 45 bool performPromotion(Function &F); 46 47 // getAnalysisUsage - This pass does not require any passes, but we know it 48 // will not alter the CFG, so say so. 49 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 50 AU.addRequired<DominatorTree>(); 51 AU.addRequired<DominanceFrontier>(); 52 AU.addRequired<TargetData>(); 53 AU.setPreservesCFG(); 54 } 55 56 private: 57 int isSafeElementUse(Value *Ptr); 58 int isSafeUseOfAllocation(Instruction *User); 59 int isSafeAllocaToScalarRepl(AllocationInst *AI); 60 void CanonicalizeAllocaUsers(AllocationInst *AI); 61 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 62 }; 63 64 RegisterOpt<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 65} 66 67// Public interface to the ScalarReplAggregates pass 68FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); } 69 70 71bool SROA::runOnFunction(Function &F) { 72 bool Changed = performPromotion(F); 73 while (1) { 74 bool LocalChange = performScalarRepl(F); 75 if (!LocalChange) break; // No need to repromote if no scalarrepl 76 Changed = true; 77 LocalChange = performPromotion(F); 78 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 79 } 80 81 return Changed; 82} 83 84 85bool SROA::performPromotion(Function &F) { 86 std::vector<AllocaInst*> Allocas; 87 const TargetData &TD = getAnalysis<TargetData>(); 88 DominatorTree &DT = getAnalysis<DominatorTree>(); 89 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 90 91 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 92 93 bool Changed = false; 94 95 while (1) { 96 Allocas.clear(); 97 98 // Find allocas that are safe to promote, by looking at all instructions in 99 // the entry node 100 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 101 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 102 if (isAllocaPromotable(AI, TD)) 103 Allocas.push_back(AI); 104 105 if (Allocas.empty()) break; 106 107 PromoteMemToReg(Allocas, DT, DF, TD); 108 NumPromoted += Allocas.size(); 109 Changed = true; 110 } 111 112 return Changed; 113} 114 115 116// performScalarRepl - This algorithm is a simple worklist driven algorithm, 117// which runs on all of the malloc/alloca instructions in the function, removing 118// them if they are only used by getelementptr instructions. 119// 120bool SROA::performScalarRepl(Function &F) { 121 std::vector<AllocationInst*> WorkList; 122 123 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 124 BasicBlock &BB = F.getEntryBlock(); 125 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 126 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 127 WorkList.push_back(A); 128 129 // Process the worklist 130 bool Changed = false; 131 while (!WorkList.empty()) { 132 AllocationInst *AI = WorkList.back(); 133 WorkList.pop_back(); 134 135 // We cannot transform the allocation instruction if it is an array 136 // allocation (allocations OF arrays are ok though), and an allocation of a 137 // scalar value cannot be decomposed at all. 138 // 139 if (AI->isArrayAllocation() || 140 (!isa<StructType>(AI->getAllocatedType()) && 141 !isa<ArrayType>(AI->getAllocatedType()))) continue; 142 143 // Check that all of the users of the allocation are capable of being 144 // transformed. 145 switch (isSafeAllocaToScalarRepl(AI)) { 146 default: assert(0 && "Unexpected value!"); 147 case 0: // Not safe to scalar replace. 148 continue; 149 case 1: // Safe, but requires cleanup/canonicalizations first 150 CanonicalizeAllocaUsers(AI); 151 case 3: // Safe to scalar replace. 152 break; 153 } 154 155 DEBUG(std::cerr << "Found inst to xform: " << *AI); 156 Changed = true; 157 158 std::vector<AllocaInst*> ElementAllocas; 159 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 160 ElementAllocas.reserve(ST->getNumContainedTypes()); 161 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 162 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 163 AI->getName() + "." + utostr(i), AI); 164 ElementAllocas.push_back(NA); 165 WorkList.push_back(NA); // Add to worklist for recursive processing 166 } 167 } else { 168 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 169 ElementAllocas.reserve(AT->getNumElements()); 170 const Type *ElTy = AT->getElementType(); 171 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 172 AllocaInst *NA = new AllocaInst(ElTy, 0, 173 AI->getName() + "." + utostr(i), AI); 174 ElementAllocas.push_back(NA); 175 WorkList.push_back(NA); // Add to worklist for recursive processing 176 } 177 } 178 179 // Now that we have created the alloca instructions that we want to use, 180 // expand the getelementptr instructions to use them. 181 // 182 while (!AI->use_empty()) { 183 Instruction *User = cast<Instruction>(AI->use_back()); 184 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 185 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 186 uint64_t Idx = cast<ConstantInt>(GEPI->getOperand(2))->getRawValue(); 187 188 assert(Idx < ElementAllocas.size() && "Index out of range?"); 189 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 190 191 Value *RepValue; 192 if (GEPI->getNumOperands() == 3) { 193 // Do not insert a new getelementptr instruction with zero indices, 194 // only to have it optimized out later. 195 RepValue = AllocaToUse; 196 } else { 197 // We are indexing deeply into the structure, so we still need a 198 // getelement ptr instruction to finish the indexing. This may be 199 // expanded itself once the worklist is rerun. 200 // 201 std::string OldName = GEPI->getName(); // Steal the old name... 202 std::vector<Value*> NewArgs; 203 NewArgs.push_back(Constant::getNullValue(Type::IntTy)); 204 NewArgs.insert(NewArgs.end(), GEPI->op_begin()+3, GEPI->op_end()); 205 GEPI->setName(""); 206 RepValue = 207 new GetElementPtrInst(AllocaToUse, NewArgs, OldName, GEPI); 208 } 209 210 // Move all of the users over to the new GEP. 211 GEPI->replaceAllUsesWith(RepValue); 212 // Delete the old GEP 213 GEPI->getParent()->getInstList().erase(GEPI); 214 } else { 215 assert(0 && "Unexpected instruction type!"); 216 } 217 } 218 219 // Finally, delete the Alloca instruction 220 AI->getParent()->getInstList().erase(AI); 221 NumReplaced++; 222 } 223 224 return Changed; 225} 226 227 228/// isSafeElementUse - Check to see if this use is an allowed use for a 229/// getelementptr instruction of an array aggregate allocation. 230/// 231int SROA::isSafeElementUse(Value *Ptr) { 232 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 233 I != E; ++I) { 234 Instruction *User = cast<Instruction>(*I); 235 switch (User->getOpcode()) { 236 case Instruction::Load: break; 237 case Instruction::Store: 238 // Store is ok if storing INTO the pointer, not storing the pointer 239 if (User->getOperand(0) == Ptr) return 0; 240 break; 241 case Instruction::GetElementPtr: { 242 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 243 if (GEP->getNumOperands() > 1) { 244 if (!isa<Constant>(GEP->getOperand(1)) || 245 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 246 return 0; // Using pointer arithmetic to navigate the array... 247 } 248 if (!isSafeElementUse(GEP)) return 0; 249 break; 250 } 251 default: 252 DEBUG(std::cerr << " Transformation preventing inst: " << *User); 253 return 0; 254 } 255 } 256 return 3; // All users look ok :) 257} 258 259/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 260/// aggregate allocation. 261/// 262int SROA::isSafeUseOfAllocation(Instruction *User) { 263 if (!isa<GetElementPtrInst>(User)) return 0; 264 265 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 266 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 267 268 // The GEP is safe to transform if it is of the form GEP <ptr>, 0, <cst> 269 if (I == E || 270 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) 271 return 0; 272 273 ++I; 274 if (I == E || !isa<ConstantInt>(I.getOperand())) 275 return 0; 276 277 // If this is a use of an array allocation, do a bit more checking for sanity. 278 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 279 uint64_t NumElements = AT->getNumElements(); 280 281 // Check to make sure that index falls within the array. If not, 282 // something funny is going on, so we won't do the optimization. 283 // 284 if (cast<ConstantInt>(GEPI->getOperand(2))->getRawValue() >= NumElements) 285 return 0; 286 } 287 288 // If there are any non-simple uses of this getelementptr, make sure to reject 289 // them. 290 return isSafeElementUse(GEPI); 291} 292 293/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 294/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 295/// or 1 if safe after canonicalization has been performed. 296/// 297int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 298 // Loop over the use list of the alloca. We can only transform it if all of 299 // the users are safe to transform. 300 // 301 int isSafe = 3; 302 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 303 I != E; ++I) { 304 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I)); 305 if (isSafe == 0) { 306 DEBUG(std::cerr << "Cannot transform: " << *AI << " due to user: " 307 << **I); 308 return 0; 309 } 310 } 311 // If we require cleanup, isSafe is now 1, otherwise it is 3. 312 return isSafe; 313} 314 315/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 316/// allocation, but only if cleaned up, perform the cleanups required. 317void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 318 319 320} 321