ArgumentPromotion.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===-- ArgumentPromotion.cpp - Promote by-reference arguments ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass promotes "by reference" arguments to be "by value" arguments. In 11// practice, this means looking for internal functions that have pointer 12// arguments. If it can prove, through the use of alias analysis, that an 13// argument is *only* loaded, then it can pass the value into the function 14// instead of the address of the value. This can cause recursive simplification 15// of code and lead to the elimination of allocas (especially in C++ template 16// code like the STL). 17// 18// This pass also handles aggregate arguments that are passed into a function, 19// scalarizing them if the elements of the aggregate are only loaded. Note that 20// by default it refuses to scalarize aggregates which would require passing in 21// more than three operands to the function, because passing thousands of 22// operands for a large array or structure is unprofitable! This limit can be 23// configured or disabled, however. 24// 25// Note that this transformation could also be done for arguments that are only 26// stored to (returning the value instead), but does not currently. This case 27// would be best handled when and if LLVM begins supporting multiple return 28// values from functions. 29// 30//===----------------------------------------------------------------------===// 31 32#include "llvm/Transforms/IPO.h" 33#include "llvm/ADT/DepthFirstIterator.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/ADT/StringExtras.h" 36#include "llvm/Analysis/AliasAnalysis.h" 37#include "llvm/Analysis/CallGraph.h" 38#include "llvm/Analysis/CallGraphSCCPass.h" 39#include "llvm/IR/CFG.h" 40#include "llvm/IR/CallSite.h" 41#include "llvm/IR/Constants.h" 42#include "llvm/IR/DerivedTypes.h" 43#include "llvm/IR/Instructions.h" 44#include "llvm/IR/LLVMContext.h" 45#include "llvm/IR/Module.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/raw_ostream.h" 48#include <set> 49using namespace llvm; 50 51#define DEBUG_TYPE "argpromotion" 52 53STATISTIC(NumArgumentsPromoted , "Number of pointer arguments promoted"); 54STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted"); 55STATISTIC(NumByValArgsPromoted , "Number of byval arguments promoted"); 56STATISTIC(NumArgumentsDead , "Number of dead pointer args eliminated"); 57 58namespace { 59 /// ArgPromotion - The 'by reference' to 'by value' argument promotion pass. 60 /// 61 struct ArgPromotion : public CallGraphSCCPass { 62 void getAnalysisUsage(AnalysisUsage &AU) const override { 63 AU.addRequired<AliasAnalysis>(); 64 CallGraphSCCPass::getAnalysisUsage(AU); 65 } 66 67 bool runOnSCC(CallGraphSCC &SCC) override; 68 static char ID; // Pass identification, replacement for typeid 69 explicit ArgPromotion(unsigned maxElements = 3) 70 : CallGraphSCCPass(ID), maxElements(maxElements) { 71 initializeArgPromotionPass(*PassRegistry::getPassRegistry()); 72 } 73 74 /// A vector used to hold the indices of a single GEP instruction 75 typedef std::vector<uint64_t> IndicesVector; 76 77 private: 78 CallGraphNode *PromoteArguments(CallGraphNode *CGN); 79 bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const; 80 CallGraphNode *DoPromotion(Function *F, 81 SmallPtrSet<Argument*, 8> &ArgsToPromote, 82 SmallPtrSet<Argument*, 8> &ByValArgsToTransform); 83 /// The maximum number of elements to expand, or 0 for unlimited. 84 unsigned maxElements; 85 }; 86} 87 88char ArgPromotion::ID = 0; 89INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion", 90 "Promote 'by reference' arguments to scalars", false, false) 91INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 92INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 93INITIALIZE_PASS_END(ArgPromotion, "argpromotion", 94 "Promote 'by reference' arguments to scalars", false, false) 95 96Pass *llvm::createArgumentPromotionPass(unsigned maxElements) { 97 return new ArgPromotion(maxElements); 98} 99 100bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) { 101 bool Changed = false, LocalChange; 102 103 do { // Iterate until we stop promoting from this SCC. 104 LocalChange = false; 105 // Attempt to promote arguments from all functions in this SCC. 106 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { 107 if (CallGraphNode *CGN = PromoteArguments(*I)) { 108 LocalChange = true; 109 SCC.ReplaceNode(*I, CGN); 110 } 111 } 112 Changed |= LocalChange; // Remember that we changed something. 113 } while (LocalChange); 114 115 return Changed; 116} 117 118/// PromoteArguments - This method checks the specified function to see if there 119/// are any promotable arguments and if it is safe to promote the function (for 120/// example, all callers are direct). If safe to promote some arguments, it 121/// calls the DoPromotion method. 122/// 123CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) { 124 Function *F = CGN->getFunction(); 125 126 // Make sure that it is local to this module. 127 if (!F || !F->hasLocalLinkage()) return nullptr; 128 129 // First check: see if there are any pointer arguments! If not, quick exit. 130 SmallVector<Argument*, 16> PointerArgs; 131 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) 132 if (I->getType()->isPointerTy()) 133 PointerArgs.push_back(I); 134 if (PointerArgs.empty()) return nullptr; 135 136 // Second check: make sure that all callers are direct callers. We can't 137 // transform functions that have indirect callers. Also see if the function 138 // is self-recursive. 139 bool isSelfRecursive = false; 140 for (Use &U : F->uses()) { 141 CallSite CS(U.getUser()); 142 // Must be a direct call. 143 if (CS.getInstruction() == nullptr || !CS.isCallee(&U)) return nullptr; 144 145 if (CS.getInstruction()->getParent()->getParent() == F) 146 isSelfRecursive = true; 147 } 148 149 // Check to see which arguments are promotable. If an argument is promotable, 150 // add it to ArgsToPromote. 151 SmallPtrSet<Argument*, 8> ArgsToPromote; 152 SmallPtrSet<Argument*, 8> ByValArgsToTransform; 153 for (unsigned i = 0, e = PointerArgs.size(); i != e; ++i) { 154 Argument *PtrArg = PointerArgs[i]; 155 Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType(); 156 157 // If this is a byval argument, and if the aggregate type is small, just 158 // pass the elements, which is always safe. This does not apply to 159 // inalloca. 160 if (PtrArg->hasByValAttr()) { 161 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 162 if (maxElements > 0 && STy->getNumElements() > maxElements) { 163 DEBUG(dbgs() << "argpromotion disable promoting argument '" 164 << PtrArg->getName() << "' because it would require adding more" 165 << " than " << maxElements << " arguments to the function.\n"); 166 continue; 167 } 168 169 // If all the elements are single-value types, we can promote it. 170 bool AllSimple = true; 171 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 172 if (!STy->getElementType(i)->isSingleValueType()) { 173 AllSimple = false; 174 break; 175 } 176 } 177 178 // Safe to transform, don't even bother trying to "promote" it. 179 // Passing the elements as a scalar will allow scalarrepl to hack on 180 // the new alloca we introduce. 181 if (AllSimple) { 182 ByValArgsToTransform.insert(PtrArg); 183 continue; 184 } 185 } 186 } 187 188 // If the argument is a recursive type and we're in a recursive 189 // function, we could end up infinitely peeling the function argument. 190 if (isSelfRecursive) { 191 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 192 bool RecursiveType = false; 193 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 194 if (STy->getElementType(i) == PtrArg->getType()) { 195 RecursiveType = true; 196 break; 197 } 198 } 199 if (RecursiveType) 200 continue; 201 } 202 } 203 204 // Otherwise, see if we can promote the pointer to its value. 205 if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValOrInAllocaAttr())) 206 ArgsToPromote.insert(PtrArg); 207 } 208 209 // No promotable pointer arguments. 210 if (ArgsToPromote.empty() && ByValArgsToTransform.empty()) 211 return nullptr; 212 213 return DoPromotion(F, ArgsToPromote, ByValArgsToTransform); 214} 215 216/// AllCallersPassInValidPointerForArgument - Return true if we can prove that 217/// all callees pass in a valid pointer for the specified function argument. 218static bool AllCallersPassInValidPointerForArgument(Argument *Arg) { 219 Function *Callee = Arg->getParent(); 220 221 unsigned ArgNo = Arg->getArgNo(); 222 223 // Look at all call sites of the function. At this pointer we know we only 224 // have direct callees. 225 for (User *U : Callee->users()) { 226 CallSite CS(U); 227 assert(CS && "Should only have direct calls!"); 228 229 if (!CS.getArgument(ArgNo)->isDereferenceablePointer()) 230 return false; 231 } 232 return true; 233} 234 235/// Returns true if Prefix is a prefix of longer. That means, Longer has a size 236/// that is greater than or equal to the size of prefix, and each of the 237/// elements in Prefix is the same as the corresponding elements in Longer. 238/// 239/// This means it also returns true when Prefix and Longer are equal! 240static bool IsPrefix(const ArgPromotion::IndicesVector &Prefix, 241 const ArgPromotion::IndicesVector &Longer) { 242 if (Prefix.size() > Longer.size()) 243 return false; 244 return std::equal(Prefix.begin(), Prefix.end(), Longer.begin()); 245} 246 247 248/// Checks if Indices, or a prefix of Indices, is in Set. 249static bool PrefixIn(const ArgPromotion::IndicesVector &Indices, 250 std::set<ArgPromotion::IndicesVector> &Set) { 251 std::set<ArgPromotion::IndicesVector>::iterator Low; 252 Low = Set.upper_bound(Indices); 253 if (Low != Set.begin()) 254 Low--; 255 // Low is now the last element smaller than or equal to Indices. This means 256 // it points to a prefix of Indices (possibly Indices itself), if such 257 // prefix exists. 258 // 259 // This load is safe if any prefix of its operands is safe to load. 260 return Low != Set.end() && IsPrefix(*Low, Indices); 261} 262 263/// Mark the given indices (ToMark) as safe in the given set of indices 264/// (Safe). Marking safe usually means adding ToMark to Safe. However, if there 265/// is already a prefix of Indices in Safe, Indices are implicitely marked safe 266/// already. Furthermore, any indices that Indices is itself a prefix of, are 267/// removed from Safe (since they are implicitely safe because of Indices now). 268static void MarkIndicesSafe(const ArgPromotion::IndicesVector &ToMark, 269 std::set<ArgPromotion::IndicesVector> &Safe) { 270 std::set<ArgPromotion::IndicesVector>::iterator Low; 271 Low = Safe.upper_bound(ToMark); 272 // Guard against the case where Safe is empty 273 if (Low != Safe.begin()) 274 Low--; 275 // Low is now the last element smaller than or equal to Indices. This 276 // means it points to a prefix of Indices (possibly Indices itself), if 277 // such prefix exists. 278 if (Low != Safe.end()) { 279 if (IsPrefix(*Low, ToMark)) 280 // If there is already a prefix of these indices (or exactly these 281 // indices) marked a safe, don't bother adding these indices 282 return; 283 284 // Increment Low, so we can use it as a "insert before" hint 285 ++Low; 286 } 287 // Insert 288 Low = Safe.insert(Low, ToMark); 289 ++Low; 290 // If there we're a prefix of longer index list(s), remove those 291 std::set<ArgPromotion::IndicesVector>::iterator End = Safe.end(); 292 while (Low != End && IsPrefix(ToMark, *Low)) { 293 std::set<ArgPromotion::IndicesVector>::iterator Remove = Low; 294 ++Low; 295 Safe.erase(Remove); 296 } 297} 298 299/// isSafeToPromoteArgument - As you might guess from the name of this method, 300/// it checks to see if it is both safe and useful to promote the argument. 301/// This method limits promotion of aggregates to only promote up to three 302/// elements of the aggregate in order to avoid exploding the number of 303/// arguments passed in. 304bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, 305 bool isByValOrInAlloca) const { 306 typedef std::set<IndicesVector> GEPIndicesSet; 307 308 // Quick exit for unused arguments 309 if (Arg->use_empty()) 310 return true; 311 312 // We can only promote this argument if all of the uses are loads, or are GEP 313 // instructions (with constant indices) that are subsequently loaded. 314 // 315 // Promoting the argument causes it to be loaded in the caller 316 // unconditionally. This is only safe if we can prove that either the load 317 // would have happened in the callee anyway (ie, there is a load in the entry 318 // block) or the pointer passed in at every call site is guaranteed to be 319 // valid. 320 // In the former case, invalid loads can happen, but would have happened 321 // anyway, in the latter case, invalid loads won't happen. This prevents us 322 // from introducing an invalid load that wouldn't have happened in the 323 // original code. 324 // 325 // This set will contain all sets of indices that are loaded in the entry 326 // block, and thus are safe to unconditionally load in the caller. 327 // 328 // This optimization is also safe for InAlloca parameters, because it verifies 329 // that the address isn't captured. 330 GEPIndicesSet SafeToUnconditionallyLoad; 331 332 // This set contains all the sets of indices that we are planning to promote. 333 // This makes it possible to limit the number of arguments added. 334 GEPIndicesSet ToPromote; 335 336 // If the pointer is always valid, any load with first index 0 is valid. 337 if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg)) 338 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); 339 340 // First, iterate the entry block and mark loads of (geps of) arguments as 341 // safe. 342 BasicBlock *EntryBlock = Arg->getParent()->begin(); 343 // Declare this here so we can reuse it 344 IndicesVector Indices; 345 for (BasicBlock::iterator I = EntryBlock->begin(), E = EntryBlock->end(); 346 I != E; ++I) 347 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 348 Value *V = LI->getPointerOperand(); 349 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { 350 V = GEP->getPointerOperand(); 351 if (V == Arg) { 352 // This load actually loads (part of) Arg? Check the indices then. 353 Indices.reserve(GEP->getNumIndices()); 354 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); 355 II != IE; ++II) 356 if (ConstantInt *CI = dyn_cast<ConstantInt>(*II)) 357 Indices.push_back(CI->getSExtValue()); 358 else 359 // We found a non-constant GEP index for this argument? Bail out 360 // right away, can't promote this argument at all. 361 return false; 362 363 // Indices checked out, mark them as safe 364 MarkIndicesSafe(Indices, SafeToUnconditionallyLoad); 365 Indices.clear(); 366 } 367 } else if (V == Arg) { 368 // Direct loads are equivalent to a GEP with a single 0 index. 369 MarkIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad); 370 } 371 } 372 373 // Now, iterate all uses of the argument to see if there are any uses that are 374 // not (GEP+)loads, or any (GEP+)loads that are not safe to promote. 375 SmallVector<LoadInst*, 16> Loads; 376 IndicesVector Operands; 377 for (Use &U : Arg->uses()) { 378 User *UR = U.getUser(); 379 Operands.clear(); 380 if (LoadInst *LI = dyn_cast<LoadInst>(UR)) { 381 // Don't hack volatile/atomic loads 382 if (!LI->isSimple()) return false; 383 Loads.push_back(LI); 384 // Direct loads are equivalent to a GEP with a zero index and then a load. 385 Operands.push_back(0); 386 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) { 387 if (GEP->use_empty()) { 388 // Dead GEP's cause trouble later. Just remove them if we run into 389 // them. 390 getAnalysis<AliasAnalysis>().deleteValue(GEP); 391 GEP->eraseFromParent(); 392 // TODO: This runs the above loop over and over again for dead GEPs 393 // Couldn't we just do increment the UI iterator earlier and erase the 394 // use? 395 return isSafeToPromoteArgument(Arg, isByValOrInAlloca); 396 } 397 398 // Ensure that all of the indices are constants. 399 for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end(); 400 i != e; ++i) 401 if (ConstantInt *C = dyn_cast<ConstantInt>(*i)) 402 Operands.push_back(C->getSExtValue()); 403 else 404 return false; // Not a constant operand GEP! 405 406 // Ensure that the only users of the GEP are load instructions. 407 for (User *GEPU : GEP->users()) 408 if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) { 409 // Don't hack volatile/atomic loads 410 if (!LI->isSimple()) return false; 411 Loads.push_back(LI); 412 } else { 413 // Other uses than load? 414 return false; 415 } 416 } else { 417 return false; // Not a load or a GEP. 418 } 419 420 // Now, see if it is safe to promote this load / loads of this GEP. Loading 421 // is safe if Operands, or a prefix of Operands, is marked as safe. 422 if (!PrefixIn(Operands, SafeToUnconditionallyLoad)) 423 return false; 424 425 // See if we are already promoting a load with these indices. If not, check 426 // to make sure that we aren't promoting too many elements. If so, nothing 427 // to do. 428 if (ToPromote.find(Operands) == ToPromote.end()) { 429 if (maxElements > 0 && ToPromote.size() == maxElements) { 430 DEBUG(dbgs() << "argpromotion not promoting argument '" 431 << Arg->getName() << "' because it would require adding more " 432 << "than " << maxElements << " arguments to the function.\n"); 433 // We limit aggregate promotion to only promoting up to a fixed number 434 // of elements of the aggregate. 435 return false; 436 } 437 ToPromote.insert(Operands); 438 } 439 } 440 441 if (Loads.empty()) return true; // No users, this is a dead argument. 442 443 // Okay, now we know that the argument is only used by load instructions and 444 // it is safe to unconditionally perform all of them. Use alias analysis to 445 // check to see if the pointer is guaranteed to not be modified from entry of 446 // the function to each of the load instructions. 447 448 // Because there could be several/many load instructions, remember which 449 // blocks we know to be transparent to the load. 450 SmallPtrSet<BasicBlock*, 16> TranspBlocks; 451 452 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 453 454 for (unsigned i = 0, e = Loads.size(); i != e; ++i) { 455 // Check to see if the load is invalidated from the start of the block to 456 // the load itself. 457 LoadInst *Load = Loads[i]; 458 BasicBlock *BB = Load->getParent(); 459 460 AliasAnalysis::Location Loc = AA.getLocation(Load); 461 if (AA.canInstructionRangeModify(BB->front(), *Load, Loc)) 462 return false; // Pointer is invalidated! 463 464 // Now check every path from the entry block to the load for transparency. 465 // To do this, we perform a depth first search on the inverse CFG from the 466 // loading block. 467 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 468 BasicBlock *P = *PI; 469 for (idf_ext_iterator<BasicBlock*, SmallPtrSet<BasicBlock*, 16> > 470 I = idf_ext_begin(P, TranspBlocks), 471 E = idf_ext_end(P, TranspBlocks); I != E; ++I) 472 if (AA.canBasicBlockModify(**I, Loc)) 473 return false; 474 } 475 } 476 477 // If the path from the entry of the function to each load is free of 478 // instructions that potentially invalidate the load, we can make the 479 // transformation! 480 return true; 481} 482 483/// DoPromotion - This method actually performs the promotion of the specified 484/// arguments, and returns the new function. At this point, we know that it's 485/// safe to do so. 486CallGraphNode *ArgPromotion::DoPromotion(Function *F, 487 SmallPtrSet<Argument*, 8> &ArgsToPromote, 488 SmallPtrSet<Argument*, 8> &ByValArgsToTransform) { 489 490 // Start by computing a new prototype for the function, which is the same as 491 // the old function, but has modified arguments. 492 FunctionType *FTy = F->getFunctionType(); 493 std::vector<Type*> Params; 494 495 typedef std::set<IndicesVector> ScalarizeTable; 496 497 // ScalarizedElements - If we are promoting a pointer that has elements 498 // accessed out of it, keep track of which elements are accessed so that we 499 // can add one argument for each. 500 // 501 // Arguments that are directly loaded will have a zero element value here, to 502 // handle cases where there are both a direct load and GEP accesses. 503 // 504 std::map<Argument*, ScalarizeTable> ScalarizedElements; 505 506 // OriginalLoads - Keep track of a representative load instruction from the 507 // original function so that we can tell the alias analysis implementation 508 // what the new GEP/Load instructions we are inserting look like. 509 // We need to keep the original loads for each argument and the elements 510 // of the argument that are accessed. 511 std::map<std::pair<Argument*, IndicesVector>, LoadInst*> OriginalLoads; 512 513 // Attribute - Keep track of the parameter attributes for the arguments 514 // that we are *not* promoting. For the ones that we do promote, the parameter 515 // attributes are lost 516 SmallVector<AttributeSet, 8> AttributesVec; 517 const AttributeSet &PAL = F->getAttributes(); 518 519 // Add any return attributes. 520 if (PAL.hasAttributes(AttributeSet::ReturnIndex)) 521 AttributesVec.push_back(AttributeSet::get(F->getContext(), 522 PAL.getRetAttributes())); 523 524 // First, determine the new argument list 525 unsigned ArgIndex = 1; 526 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; 527 ++I, ++ArgIndex) { 528 if (ByValArgsToTransform.count(I)) { 529 // Simple byval argument? Just add all the struct element types. 530 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 531 StructType *STy = cast<StructType>(AgTy); 532 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 533 Params.push_back(STy->getElementType(i)); 534 ++NumByValArgsPromoted; 535 } else if (!ArgsToPromote.count(I)) { 536 // Unchanged argument 537 Params.push_back(I->getType()); 538 AttributeSet attrs = PAL.getParamAttributes(ArgIndex); 539 if (attrs.hasAttributes(ArgIndex)) { 540 AttrBuilder B(attrs, ArgIndex); 541 AttributesVec. 542 push_back(AttributeSet::get(F->getContext(), Params.size(), B)); 543 } 544 } else if (I->use_empty()) { 545 // Dead argument (which are always marked as promotable) 546 ++NumArgumentsDead; 547 } else { 548 // Okay, this is being promoted. This means that the only uses are loads 549 // or GEPs which are only used by loads 550 551 // In this table, we will track which indices are loaded from the argument 552 // (where direct loads are tracked as no indices). 553 ScalarizeTable &ArgIndices = ScalarizedElements[I]; 554 for (User *U : I->users()) { 555 Instruction *UI = cast<Instruction>(U); 556 assert(isa<LoadInst>(UI) || isa<GetElementPtrInst>(UI)); 557 IndicesVector Indices; 558 Indices.reserve(UI->getNumOperands() - 1); 559 // Since loads will only have a single operand, and GEPs only a single 560 // non-index operand, this will record direct loads without any indices, 561 // and gep+loads with the GEP indices. 562 for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end(); 563 II != IE; ++II) 564 Indices.push_back(cast<ConstantInt>(*II)->getSExtValue()); 565 // GEPs with a single 0 index can be merged with direct loads 566 if (Indices.size() == 1 && Indices.front() == 0) 567 Indices.clear(); 568 ArgIndices.insert(Indices); 569 LoadInst *OrigLoad; 570 if (LoadInst *L = dyn_cast<LoadInst>(UI)) 571 OrigLoad = L; 572 else 573 // Take any load, we will use it only to update Alias Analysis 574 OrigLoad = cast<LoadInst>(UI->user_back()); 575 OriginalLoads[std::make_pair(I, Indices)] = OrigLoad; 576 } 577 578 // Add a parameter to the function for each element passed in. 579 for (ScalarizeTable::iterator SI = ArgIndices.begin(), 580 E = ArgIndices.end(); SI != E; ++SI) { 581 // not allowed to dereference ->begin() if size() is 0 582 Params.push_back(GetElementPtrInst::getIndexedType(I->getType(), *SI)); 583 assert(Params.back()); 584 } 585 586 if (ArgIndices.size() == 1 && ArgIndices.begin()->empty()) 587 ++NumArgumentsPromoted; 588 else 589 ++NumAggregatesPromoted; 590 } 591 } 592 593 // Add any function attributes. 594 if (PAL.hasAttributes(AttributeSet::FunctionIndex)) 595 AttributesVec.push_back(AttributeSet::get(FTy->getContext(), 596 PAL.getFnAttributes())); 597 598 Type *RetTy = FTy->getReturnType(); 599 600 // Construct the new function type using the new arguments. 601 FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg()); 602 603 // Create the new function body and insert it into the module. 604 Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName()); 605 NF->copyAttributesFrom(F); 606 607 608 DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" 609 << "From: " << *F); 610 611 // Recompute the parameter attributes list based on the new arguments for 612 // the function. 613 NF->setAttributes(AttributeSet::get(F->getContext(), AttributesVec)); 614 AttributesVec.clear(); 615 616 F->getParent()->getFunctionList().insert(F, NF); 617 NF->takeName(F); 618 619 // Get the alias analysis information that we need to update to reflect our 620 // changes. 621 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 622 623 // Get the callgraph information that we need to update to reflect our 624 // changes. 625 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 626 627 // Get a new callgraph node for NF. 628 CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF); 629 630 // Loop over all of the callers of the function, transforming the call sites 631 // to pass in the loaded pointers. 632 // 633 SmallVector<Value*, 16> Args; 634 while (!F->use_empty()) { 635 CallSite CS(F->user_back()); 636 assert(CS.getCalledFunction() == F); 637 Instruction *Call = CS.getInstruction(); 638 const AttributeSet &CallPAL = CS.getAttributes(); 639 640 // Add any return attributes. 641 if (CallPAL.hasAttributes(AttributeSet::ReturnIndex)) 642 AttributesVec.push_back(AttributeSet::get(F->getContext(), 643 CallPAL.getRetAttributes())); 644 645 // Loop over the operands, inserting GEP and loads in the caller as 646 // appropriate. 647 CallSite::arg_iterator AI = CS.arg_begin(); 648 ArgIndex = 1; 649 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); 650 I != E; ++I, ++AI, ++ArgIndex) 651 if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { 652 Args.push_back(*AI); // Unmodified argument 653 654 if (CallPAL.hasAttributes(ArgIndex)) { 655 AttrBuilder B(CallPAL, ArgIndex); 656 AttributesVec. 657 push_back(AttributeSet::get(F->getContext(), Args.size(), B)); 658 } 659 } else if (ByValArgsToTransform.count(I)) { 660 // Emit a GEP and load for each element of the struct. 661 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 662 StructType *STy = cast<StructType>(AgTy); 663 Value *Idxs[2] = { 664 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr }; 665 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 666 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 667 Value *Idx = GetElementPtrInst::Create(*AI, Idxs, 668 (*AI)->getName()+"."+utostr(i), 669 Call); 670 // TODO: Tell AA about the new values? 671 Args.push_back(new LoadInst(Idx, Idx->getName()+".val", Call)); 672 } 673 } else if (!I->use_empty()) { 674 // Non-dead argument: insert GEPs and loads as appropriate. 675 ScalarizeTable &ArgIndices = ScalarizedElements[I]; 676 // Store the Value* version of the indices in here, but declare it now 677 // for reuse. 678 std::vector<Value*> Ops; 679 for (ScalarizeTable::iterator SI = ArgIndices.begin(), 680 E = ArgIndices.end(); SI != E; ++SI) { 681 Value *V = *AI; 682 LoadInst *OrigLoad = OriginalLoads[std::make_pair(I, *SI)]; 683 if (!SI->empty()) { 684 Ops.reserve(SI->size()); 685 Type *ElTy = V->getType(); 686 for (IndicesVector::const_iterator II = SI->begin(), 687 IE = SI->end(); II != IE; ++II) { 688 // Use i32 to index structs, and i64 for others (pointers/arrays). 689 // This satisfies GEP constraints. 690 Type *IdxTy = (ElTy->isStructTy() ? 691 Type::getInt32Ty(F->getContext()) : 692 Type::getInt64Ty(F->getContext())); 693 Ops.push_back(ConstantInt::get(IdxTy, *II)); 694 // Keep track of the type we're currently indexing. 695 ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II); 696 } 697 // And create a GEP to extract those indices. 698 V = GetElementPtrInst::Create(V, Ops, V->getName()+".idx", Call); 699 Ops.clear(); 700 AA.copyValue(OrigLoad->getOperand(0), V); 701 } 702 // Since we're replacing a load make sure we take the alignment 703 // of the previous load. 704 LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call); 705 newLoad->setAlignment(OrigLoad->getAlignment()); 706 // Transfer the TBAA info too. 707 newLoad->setMetadata(LLVMContext::MD_tbaa, 708 OrigLoad->getMetadata(LLVMContext::MD_tbaa)); 709 Args.push_back(newLoad); 710 AA.copyValue(OrigLoad, Args.back()); 711 } 712 } 713 714 // Push any varargs arguments on the list. 715 for (; AI != CS.arg_end(); ++AI, ++ArgIndex) { 716 Args.push_back(*AI); 717 if (CallPAL.hasAttributes(ArgIndex)) { 718 AttrBuilder B(CallPAL, ArgIndex); 719 AttributesVec. 720 push_back(AttributeSet::get(F->getContext(), Args.size(), B)); 721 } 722 } 723 724 // Add any function attributes. 725 if (CallPAL.hasAttributes(AttributeSet::FunctionIndex)) 726 AttributesVec.push_back(AttributeSet::get(Call->getContext(), 727 CallPAL.getFnAttributes())); 728 729 Instruction *New; 730 if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { 731 New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), 732 Args, "", Call); 733 cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv()); 734 cast<InvokeInst>(New)->setAttributes(AttributeSet::get(II->getContext(), 735 AttributesVec)); 736 } else { 737 New = CallInst::Create(NF, Args, "", Call); 738 cast<CallInst>(New)->setCallingConv(CS.getCallingConv()); 739 cast<CallInst>(New)->setAttributes(AttributeSet::get(New->getContext(), 740 AttributesVec)); 741 if (cast<CallInst>(Call)->isTailCall()) 742 cast<CallInst>(New)->setTailCall(); 743 } 744 Args.clear(); 745 AttributesVec.clear(); 746 747 // Update the alias analysis implementation to know that we are replacing 748 // the old call with a new one. 749 AA.replaceWithNewValue(Call, New); 750 751 // Update the callgraph to know that the callsite has been transformed. 752 CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()]; 753 CalleeNode->replaceCallEdge(Call, New, NF_CGN); 754 755 if (!Call->use_empty()) { 756 Call->replaceAllUsesWith(New); 757 New->takeName(Call); 758 } 759 760 // Finally, remove the old call from the program, reducing the use-count of 761 // F. 762 Call->eraseFromParent(); 763 } 764 765 // Since we have now created the new function, splice the body of the old 766 // function right into the new function, leaving the old rotting hulk of the 767 // function empty. 768 NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); 769 770 // Loop over the argument list, transferring uses of the old arguments over to 771 // the new arguments, also transferring over the names as well. 772 // 773 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), 774 I2 = NF->arg_begin(); I != E; ++I) { 775 if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { 776 // If this is an unmodified argument, move the name and users over to the 777 // new version. 778 I->replaceAllUsesWith(I2); 779 I2->takeName(I); 780 AA.replaceWithNewValue(I, I2); 781 ++I2; 782 continue; 783 } 784 785 if (ByValArgsToTransform.count(I)) { 786 // In the callee, we create an alloca, and store each of the new incoming 787 // arguments into the alloca. 788 Instruction *InsertPt = NF->begin()->begin(); 789 790 // Just add all the struct element types. 791 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 792 Value *TheAlloca = new AllocaInst(AgTy, nullptr, "", InsertPt); 793 StructType *STy = cast<StructType>(AgTy); 794 Value *Idxs[2] = { 795 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr }; 796 797 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 798 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 799 Value *Idx = 800 GetElementPtrInst::Create(TheAlloca, Idxs, 801 TheAlloca->getName()+"."+Twine(i), 802 InsertPt); 803 I2->setName(I->getName()+"."+Twine(i)); 804 new StoreInst(I2++, Idx, InsertPt); 805 } 806 807 // Anything that used the arg should now use the alloca. 808 I->replaceAllUsesWith(TheAlloca); 809 TheAlloca->takeName(I); 810 AA.replaceWithNewValue(I, TheAlloca); 811 812 // If the alloca is used in a call, we must clear the tail flag since 813 // the callee now uses an alloca from the caller. 814 for (User *U : TheAlloca->users()) { 815 CallInst *Call = dyn_cast<CallInst>(U); 816 if (!Call) 817 continue; 818 Call->setTailCall(false); 819 } 820 continue; 821 } 822 823 if (I->use_empty()) { 824 AA.deleteValue(I); 825 continue; 826 } 827 828 // Otherwise, if we promoted this argument, then all users are load 829 // instructions (or GEPs with only load users), and all loads should be 830 // using the new argument that we added. 831 ScalarizeTable &ArgIndices = ScalarizedElements[I]; 832 833 while (!I->use_empty()) { 834 if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) { 835 assert(ArgIndices.begin()->empty() && 836 "Load element should sort to front!"); 837 I2->setName(I->getName()+".val"); 838 LI->replaceAllUsesWith(I2); 839 AA.replaceWithNewValue(LI, I2); 840 LI->eraseFromParent(); 841 DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() 842 << "' in function '" << F->getName() << "'\n"); 843 } else { 844 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back()); 845 IndicesVector Operands; 846 Operands.reserve(GEP->getNumIndices()); 847 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); 848 II != IE; ++II) 849 Operands.push_back(cast<ConstantInt>(*II)->getSExtValue()); 850 851 // GEPs with a single 0 index can be merged with direct loads 852 if (Operands.size() == 1 && Operands.front() == 0) 853 Operands.clear(); 854 855 Function::arg_iterator TheArg = I2; 856 for (ScalarizeTable::iterator It = ArgIndices.begin(); 857 *It != Operands; ++It, ++TheArg) { 858 assert(It != ArgIndices.end() && "GEP not handled??"); 859 } 860 861 std::string NewName = I->getName(); 862 for (unsigned i = 0, e = Operands.size(); i != e; ++i) { 863 NewName += "." + utostr(Operands[i]); 864 } 865 NewName += ".val"; 866 TheArg->setName(NewName); 867 868 DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() 869 << "' of function '" << NF->getName() << "'\n"); 870 871 // All of the uses must be load instructions. Replace them all with 872 // the argument specified by ArgNo. 873 while (!GEP->use_empty()) { 874 LoadInst *L = cast<LoadInst>(GEP->user_back()); 875 L->replaceAllUsesWith(TheArg); 876 AA.replaceWithNewValue(L, TheArg); 877 L->eraseFromParent(); 878 } 879 AA.deleteValue(GEP); 880 GEP->eraseFromParent(); 881 } 882 } 883 884 // Increment I2 past all of the arguments added for this promoted pointer. 885 std::advance(I2, ArgIndices.size()); 886 } 887 888 // Tell the alias analysis that the old function is about to disappear. 889 AA.replaceWithNewValue(F, NF); 890 891 892 NF_CGN->stealCalledFunctionsFrom(CG[F]); 893 894 // Now that the old function is dead, delete it. If there is a dangling 895 // reference to the CallgraphNode, just leave the dead function around for 896 // someone else to nuke. 897 CallGraphNode *CGN = CG[F]; 898 if (CGN->getNumReferences() == 0) 899 delete CG.removeFunctionFromModule(CGN); 900 else 901 F->setLinkage(Function::ExternalLinkage); 902 903 return NF_CGN; 904} 905