GlobalOpt.cpp revision 9c9f10e3c7667d9104b1ed98090cf0c84e90f8e0
1//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass transforms simple global variables that never have their address 11// taken. If obviously true, it marks read/write globals as constant, deletes 12// variables only stored to, etc. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "globalopt" 17#include "llvm/Transforms/IPO.h" 18#include "llvm/CallingConv.h" 19#include "llvm/Constants.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/Instructions.h" 22#include "llvm/IntrinsicInst.h" 23#include "llvm/LLVMContext.h" 24#include "llvm/Module.h" 25#include "llvm/Pass.h" 26#include "llvm/Analysis/ConstantFolding.h" 27#include "llvm/Target/TargetData.h" 28#include "llvm/Support/CallSite.h" 29#include "llvm/Support/Compiler.h" 30#include "llvm/Support/Debug.h" 31#include "llvm/Support/ErrorHandling.h" 32#include "llvm/Support/GetElementPtrTypeIterator.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/Support/raw_ostream.h" 35#include "llvm/ADT/DenseMap.h" 36#include "llvm/ADT/SmallPtrSet.h" 37#include "llvm/ADT/SmallVector.h" 38#include "llvm/ADT/Statistic.h" 39#include "llvm/ADT/StringExtras.h" 40#include "llvm/ADT/STLExtras.h" 41#include <algorithm> 42using namespace llvm; 43 44STATISTIC(NumMarked , "Number of globals marked constant"); 45STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 46STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 47STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 48STATISTIC(NumDeleted , "Number of globals deleted"); 49STATISTIC(NumFnDeleted , "Number of functions deleted"); 50STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 51STATISTIC(NumLocalized , "Number of globals localized"); 52STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 53STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 54STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 55STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 56STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 57STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 58 59namespace { 60 struct VISIBILITY_HIDDEN GlobalOpt : public ModulePass { 61 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 62 AU.addRequired<TargetData>(); 63 } 64 static char ID; // Pass identification, replacement for typeid 65 GlobalOpt() : ModulePass(&ID) {} 66 67 bool runOnModule(Module &M); 68 69 private: 70 GlobalVariable *FindGlobalCtors(Module &M); 71 bool OptimizeFunctions(Module &M); 72 bool OptimizeGlobalVars(Module &M); 73 bool OptimizeGlobalAliases(Module &M); 74 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); 75 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 76 }; 77} 78 79char GlobalOpt::ID = 0; 80static RegisterPass<GlobalOpt> X("globalopt", "Global Variable Optimizer"); 81 82ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 83 84namespace { 85 86/// GlobalStatus - As we analyze each global, keep track of some information 87/// about it. If we find out that the address of the global is taken, none of 88/// this info will be accurate. 89struct VISIBILITY_HIDDEN GlobalStatus { 90 /// isLoaded - True if the global is ever loaded. If the global isn't ever 91 /// loaded it can be deleted. 92 bool isLoaded; 93 94 /// StoredType - Keep track of what stores to the global look like. 95 /// 96 enum StoredType { 97 /// NotStored - There is no store to this global. It can thus be marked 98 /// constant. 99 NotStored, 100 101 /// isInitializerStored - This global is stored to, but the only thing 102 /// stored is the constant it was initialized with. This is only tracked 103 /// for scalar globals. 104 isInitializerStored, 105 106 /// isStoredOnce - This global is stored to, but only its initializer and 107 /// one other value is ever stored to it. If this global isStoredOnce, we 108 /// track the value stored to it in StoredOnceValue below. This is only 109 /// tracked for scalar globals. 110 isStoredOnce, 111 112 /// isStored - This global is stored to by multiple values or something else 113 /// that we cannot track. 114 isStored 115 } StoredType; 116 117 /// StoredOnceValue - If only one value (besides the initializer constant) is 118 /// ever stored to this global, keep track of what value it is. 119 Value *StoredOnceValue; 120 121 /// AccessingFunction/HasMultipleAccessingFunctions - These start out 122 /// null/false. When the first accessing function is noticed, it is recorded. 123 /// When a second different accessing function is noticed, 124 /// HasMultipleAccessingFunctions is set to true. 125 Function *AccessingFunction; 126 bool HasMultipleAccessingFunctions; 127 128 /// HasNonInstructionUser - Set to true if this global has a user that is not 129 /// an instruction (e.g. a constant expr or GV initializer). 130 bool HasNonInstructionUser; 131 132 /// HasPHIUser - Set to true if this global has a user that is a PHI node. 133 bool HasPHIUser; 134 135 GlobalStatus() : isLoaded(false), StoredType(NotStored), StoredOnceValue(0), 136 AccessingFunction(0), HasMultipleAccessingFunctions(false), 137 HasNonInstructionUser(false), HasPHIUser(false) {} 138}; 139 140} 141 142// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used 143// by constants itself. Note that constants cannot be cyclic, so this test is 144// pretty easy to implement recursively. 145// 146static bool SafeToDestroyConstant(Constant *C) { 147 if (isa<GlobalValue>(C)) return false; 148 149 for (Value::use_iterator UI = C->use_begin(), E = C->use_end(); UI != E; ++UI) 150 if (Constant *CU = dyn_cast<Constant>(*UI)) { 151 if (!SafeToDestroyConstant(CU)) return false; 152 } else 153 return false; 154 return true; 155} 156 157 158/// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus 159/// structure. If the global has its address taken, return true to indicate we 160/// can't do anything with it. 161/// 162static bool AnalyzeGlobal(Value *V, GlobalStatus &GS, 163 SmallPtrSet<PHINode*, 16> &PHIUsers) { 164 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) 165 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) { 166 GS.HasNonInstructionUser = true; 167 168 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true; 169 170 } else if (Instruction *I = dyn_cast<Instruction>(*UI)) { 171 if (!GS.HasMultipleAccessingFunctions) { 172 Function *F = I->getParent()->getParent(); 173 if (GS.AccessingFunction == 0) 174 GS.AccessingFunction = F; 175 else if (GS.AccessingFunction != F) 176 GS.HasMultipleAccessingFunctions = true; 177 } 178 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 179 GS.isLoaded = true; 180 if (LI->isVolatile()) return true; // Don't hack on volatile loads. 181 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 182 // Don't allow a store OF the address, only stores TO the address. 183 if (SI->getOperand(0) == V) return true; 184 185 if (SI->isVolatile()) return true; // Don't hack on volatile stores. 186 187 // If this is a direct store to the global (i.e., the global is a scalar 188 // value, not an aggregate), keep more specific information about 189 // stores. 190 if (GS.StoredType != GlobalStatus::isStored) { 191 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(SI->getOperand(1))){ 192 Value *StoredVal = SI->getOperand(0); 193 if (StoredVal == GV->getInitializer()) { 194 if (GS.StoredType < GlobalStatus::isInitializerStored) 195 GS.StoredType = GlobalStatus::isInitializerStored; 196 } else if (isa<LoadInst>(StoredVal) && 197 cast<LoadInst>(StoredVal)->getOperand(0) == GV) { 198 // G = G 199 if (GS.StoredType < GlobalStatus::isInitializerStored) 200 GS.StoredType = GlobalStatus::isInitializerStored; 201 } else if (GS.StoredType < GlobalStatus::isStoredOnce) { 202 GS.StoredType = GlobalStatus::isStoredOnce; 203 GS.StoredOnceValue = StoredVal; 204 } else if (GS.StoredType == GlobalStatus::isStoredOnce && 205 GS.StoredOnceValue == StoredVal) { 206 // noop. 207 } else { 208 GS.StoredType = GlobalStatus::isStored; 209 } 210 } else { 211 GS.StoredType = GlobalStatus::isStored; 212 } 213 } 214 } else if (isa<GetElementPtrInst>(I)) { 215 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 216 } else if (isa<SelectInst>(I)) { 217 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 218 } else if (PHINode *PN = dyn_cast<PHINode>(I)) { 219 // PHI nodes we can check just like select or GEP instructions, but we 220 // have to be careful about infinite recursion. 221 if (PHIUsers.insert(PN)) // Not already visited. 222 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 223 GS.HasPHIUser = true; 224 } else if (isa<CmpInst>(I)) { 225 } else if (isa<MemTransferInst>(I)) { 226 if (I->getOperand(1) == V) 227 GS.StoredType = GlobalStatus::isStored; 228 if (I->getOperand(2) == V) 229 GS.isLoaded = true; 230 } else if (isa<MemSetInst>(I)) { 231 assert(I->getOperand(1) == V && "Memset only takes one pointer!"); 232 GS.StoredType = GlobalStatus::isStored; 233 } else { 234 return true; // Any other non-load instruction might take address! 235 } 236 } else if (Constant *C = dyn_cast<Constant>(*UI)) { 237 GS.HasNonInstructionUser = true; 238 // We might have a dead and dangling constant hanging off of here. 239 if (!SafeToDestroyConstant(C)) 240 return true; 241 } else { 242 GS.HasNonInstructionUser = true; 243 // Otherwise must be some other user. 244 return true; 245 } 246 247 return false; 248} 249 250static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx, 251 LLVMContext &Context) { 252 ConstantInt *CI = dyn_cast<ConstantInt>(Idx); 253 if (!CI) return 0; 254 unsigned IdxV = CI->getZExtValue(); 255 256 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) { 257 if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV); 258 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) { 259 if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV); 260 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) { 261 if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV); 262 } else if (isa<ConstantAggregateZero>(Agg)) { 263 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { 264 if (IdxV < STy->getNumElements()) 265 return Context.getNullValue(STy->getElementType(IdxV)); 266 } else if (const SequentialType *STy = 267 dyn_cast<SequentialType>(Agg->getType())) { 268 return Context.getNullValue(STy->getElementType()); 269 } 270 } else if (isa<UndefValue>(Agg)) { 271 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { 272 if (IdxV < STy->getNumElements()) 273 return Context.getUndef(STy->getElementType(IdxV)); 274 } else if (const SequentialType *STy = 275 dyn_cast<SequentialType>(Agg->getType())) { 276 return Context.getUndef(STy->getElementType()); 277 } 278 } 279 return 0; 280} 281 282 283/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 284/// users of the global, cleaning up the obvious ones. This is largely just a 285/// quick scan over the use list to clean up the easy and obvious cruft. This 286/// returns true if it made a change. 287static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 288 LLVMContext &Context) { 289 bool Changed = false; 290 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { 291 User *U = *UI++; 292 293 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 294 if (Init) { 295 // Replace the load with the initializer. 296 LI->replaceAllUsesWith(Init); 297 LI->eraseFromParent(); 298 Changed = true; 299 } 300 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 301 // Store must be unreachable or storing Init into the global. 302 SI->eraseFromParent(); 303 Changed = true; 304 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 305 if (CE->getOpcode() == Instruction::GetElementPtr) { 306 Constant *SubInit = 0; 307 if (Init) 308 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE, Context); 309 Changed |= CleanupConstantGlobalUsers(CE, SubInit, Context); 310 } else if (CE->getOpcode() == Instruction::BitCast && 311 isa<PointerType>(CE->getType())) { 312 // Pointer cast, delete any stores and memsets to the global. 313 Changed |= CleanupConstantGlobalUsers(CE, 0, Context); 314 } 315 316 if (CE->use_empty()) { 317 CE->destroyConstant(); 318 Changed = true; 319 } 320 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 321 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 322 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 323 // and will invalidate our notion of what Init is. 324 Constant *SubInit = 0; 325 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 326 ConstantExpr *CE = 327 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, Context)); 328 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 329 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE, Context); 330 } 331 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, Context); 332 333 if (GEP->use_empty()) { 334 GEP->eraseFromParent(); 335 Changed = true; 336 } 337 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 338 if (MI->getRawDest() == V) { 339 MI->eraseFromParent(); 340 Changed = true; 341 } 342 343 } else if (Constant *C = dyn_cast<Constant>(U)) { 344 // If we have a chain of dead constantexprs or other things dangling from 345 // us, and if they are all dead, nuke them without remorse. 346 if (SafeToDestroyConstant(C)) { 347 C->destroyConstant(); 348 // This could have invalidated UI, start over from scratch. 349 CleanupConstantGlobalUsers(V, Init, Context); 350 return true; 351 } 352 } 353 } 354 return Changed; 355} 356 357/// isSafeSROAElementUse - Return true if the specified instruction is a safe 358/// user of a derived expression from a global that we want to SROA. 359static bool isSafeSROAElementUse(Value *V) { 360 // We might have a dead and dangling constant hanging off of here. 361 if (Constant *C = dyn_cast<Constant>(V)) 362 return SafeToDestroyConstant(C); 363 364 Instruction *I = dyn_cast<Instruction>(V); 365 if (!I) return false; 366 367 // Loads are ok. 368 if (isa<LoadInst>(I)) return true; 369 370 // Stores *to* the pointer are ok. 371 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 372 return SI->getOperand(0) != V; 373 374 // Otherwise, it must be a GEP. 375 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 376 if (GEPI == 0) return false; 377 378 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 379 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 380 return false; 381 382 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end(); 383 I != E; ++I) 384 if (!isSafeSROAElementUse(*I)) 385 return false; 386 return true; 387} 388 389 390/// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 391/// Look at it and its uses and decide whether it is safe to SROA this global. 392/// 393static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 394 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 395 if (!isa<GetElementPtrInst>(U) && 396 (!isa<ConstantExpr>(U) || 397 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 398 return false; 399 400 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 401 // don't like < 3 operand CE's, and we don't like non-constant integer 402 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 403 // value of C. 404 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 405 !cast<Constant>(U->getOperand(1))->isNullValue() || 406 !isa<ConstantInt>(U->getOperand(2))) 407 return false; 408 409 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 410 ++GEPI; // Skip over the pointer index. 411 412 // If this is a use of an array allocation, do a bit more checking for sanity. 413 if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 414 uint64_t NumElements = AT->getNumElements(); 415 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 416 417 // Check to make sure that index falls within the array. If not, 418 // something funny is going on, so we won't do the optimization. 419 // 420 if (Idx->getZExtValue() >= NumElements) 421 return false; 422 423 // We cannot scalar repl this level of the array unless any array 424 // sub-indices are in-range constants. In particular, consider: 425 // A[0][i]. We cannot know that the user isn't doing invalid things like 426 // allowing i to index an out-of-range subscript that accesses A[1]. 427 // 428 // Scalar replacing *just* the outer index of the array is probably not 429 // going to be a win anyway, so just give up. 430 for (++GEPI; // Skip array index. 431 GEPI != E && (isa<ArrayType>(*GEPI) || isa<VectorType>(*GEPI)); 432 ++GEPI) { 433 uint64_t NumElements; 434 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 435 NumElements = SubArrayTy->getNumElements(); 436 else 437 NumElements = cast<VectorType>(*GEPI)->getNumElements(); 438 439 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 440 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 441 return false; 442 } 443 } 444 445 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I) 446 if (!isSafeSROAElementUse(*I)) 447 return false; 448 return true; 449} 450 451/// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 452/// is safe for us to perform this transformation. 453/// 454static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 455 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 456 UI != E; ++UI) { 457 if (!IsUserOfGlobalSafeForSRA(*UI, GV)) 458 return false; 459 } 460 return true; 461} 462 463 464/// SRAGlobal - Perform scalar replacement of aggregates on the specified global 465/// variable. This opens the door for other optimizations by exposing the 466/// behavior of the program in a more fine-grained way. We have determined that 467/// this transformation is safe already. We return the first global variable we 468/// insert so that the caller can reprocess it. 469static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD, 470 LLVMContext &Context) { 471 // Make sure this global only has simple uses that we can SRA. 472 if (!GlobalUsersSafeToSRA(GV)) 473 return 0; 474 475 assert(GV->hasLocalLinkage() && !GV->isConstant()); 476 Constant *Init = GV->getInitializer(); 477 const Type *Ty = Init->getType(); 478 479 std::vector<GlobalVariable*> NewGlobals; 480 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 481 482 // Get the alignment of the global, either explicit or target-specific. 483 unsigned StartAlignment = GV->getAlignment(); 484 if (StartAlignment == 0) 485 StartAlignment = TD.getABITypeAlignment(GV->getType()); 486 487 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 488 NewGlobals.reserve(STy->getNumElements()); 489 const StructLayout &Layout = *TD.getStructLayout(STy); 490 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 491 Constant *In = getAggregateConstantElement(Init, 492 ConstantInt::get(Type::Int32Ty, i), 493 Context); 494 assert(In && "Couldn't get element of initializer?"); 495 GlobalVariable *NGV = new GlobalVariable(Context, 496 STy->getElementType(i), false, 497 GlobalVariable::InternalLinkage, 498 In, GV->getName()+"."+utostr(i), 499 GV->isThreadLocal(), 500 GV->getType()->getAddressSpace()); 501 Globals.insert(GV, NGV); 502 NewGlobals.push_back(NGV); 503 504 // Calculate the known alignment of the field. If the original aggregate 505 // had 256 byte alignment for example, something might depend on that: 506 // propagate info to each field. 507 uint64_t FieldOffset = Layout.getElementOffset(i); 508 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 509 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i))) 510 NGV->setAlignment(NewAlign); 511 } 512 } else if (const SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 513 unsigned NumElements = 0; 514 if (const ArrayType *ATy = dyn_cast<ArrayType>(STy)) 515 NumElements = ATy->getNumElements(); 516 else 517 NumElements = cast<VectorType>(STy)->getNumElements(); 518 519 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 520 return 0; // It's not worth it. 521 NewGlobals.reserve(NumElements); 522 523 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType()); 524 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType()); 525 for (unsigned i = 0, e = NumElements; i != e; ++i) { 526 Constant *In = getAggregateConstantElement(Init, 527 ConstantInt::get(Type::Int32Ty, i), 528 Context); 529 assert(In && "Couldn't get element of initializer?"); 530 531 GlobalVariable *NGV = new GlobalVariable(Context, 532 STy->getElementType(), false, 533 GlobalVariable::InternalLinkage, 534 In, GV->getName()+"."+utostr(i), 535 GV->isThreadLocal(), 536 GV->getType()->getAddressSpace()); 537 Globals.insert(GV, NGV); 538 NewGlobals.push_back(NGV); 539 540 // Calculate the known alignment of the field. If the original aggregate 541 // had 256 byte alignment for example, something might depend on that: 542 // propagate info to each field. 543 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 544 if (NewAlign > EltAlign) 545 NGV->setAlignment(NewAlign); 546 } 547 } 548 549 if (NewGlobals.empty()) 550 return 0; 551 552 DOUT << "PERFORMING GLOBAL SRA ON: " << *GV; 553 554 Constant *NullInt = Context.getNullValue(Type::Int32Ty); 555 556 // Loop over all of the uses of the global, replacing the constantexpr geps, 557 // with smaller constantexpr geps or direct references. 558 while (!GV->use_empty()) { 559 User *GEP = GV->use_back(); 560 assert(((isa<ConstantExpr>(GEP) && 561 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 562 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 563 564 // Ignore the 1th operand, which has to be zero or else the program is quite 565 // broken (undefined). Get the 2nd operand, which is the structure or array 566 // index. 567 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 568 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 569 570 Value *NewPtr = NewGlobals[Val]; 571 572 // Form a shorter GEP if needed. 573 if (GEP->getNumOperands() > 3) { 574 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 575 SmallVector<Constant*, 8> Idxs; 576 Idxs.push_back(NullInt); 577 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 578 Idxs.push_back(CE->getOperand(i)); 579 NewPtr = Context.getConstantExprGetElementPtr(cast<Constant>(NewPtr), 580 &Idxs[0], Idxs.size()); 581 } else { 582 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 583 SmallVector<Value*, 8> Idxs; 584 Idxs.push_back(NullInt); 585 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 586 Idxs.push_back(GEPI->getOperand(i)); 587 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs.begin(), Idxs.end(), 588 GEPI->getName()+"."+utostr(Val), GEPI); 589 } 590 } 591 GEP->replaceAllUsesWith(NewPtr); 592 593 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 594 GEPI->eraseFromParent(); 595 else 596 cast<ConstantExpr>(GEP)->destroyConstant(); 597 } 598 599 // Delete the old global, now that it is dead. 600 Globals.erase(GV); 601 ++NumSRA; 602 603 // Loop over the new globals array deleting any globals that are obviously 604 // dead. This can arise due to scalarization of a structure or an array that 605 // has elements that are dead. 606 unsigned FirstGlobal = 0; 607 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 608 if (NewGlobals[i]->use_empty()) { 609 Globals.erase(NewGlobals[i]); 610 if (FirstGlobal == i) ++FirstGlobal; 611 } 612 613 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; 614} 615 616/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 617/// value will trap if the value is dynamically null. PHIs keeps track of any 618/// phi nodes we've seen to avoid reprocessing them. 619static bool AllUsesOfValueWillTrapIfNull(Value *V, 620 SmallPtrSet<PHINode*, 8> &PHIs) { 621 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) 622 if (isa<LoadInst>(*UI)) { 623 // Will trap. 624 } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) { 625 if (SI->getOperand(0) == V) { 626 //cerr << "NONTRAPPING USE: " << **UI; 627 return false; // Storing the value. 628 } 629 } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) { 630 if (CI->getOperand(0) != V) { 631 //cerr << "NONTRAPPING USE: " << **UI; 632 return false; // Not calling the ptr 633 } 634 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) { 635 if (II->getOperand(0) != V) { 636 //cerr << "NONTRAPPING USE: " << **UI; 637 return false; // Not calling the ptr 638 } 639 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(*UI)) { 640 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 641 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI)) { 642 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 643 } else if (PHINode *PN = dyn_cast<PHINode>(*UI)) { 644 // If we've already seen this phi node, ignore it, it has already been 645 // checked. 646 if (PHIs.insert(PN)) 647 return AllUsesOfValueWillTrapIfNull(PN, PHIs); 648 } else if (isa<ICmpInst>(*UI) && 649 isa<ConstantPointerNull>(UI->getOperand(1))) { 650 // Ignore setcc X, null 651 } else { 652 //cerr << "NONTRAPPING USE: " << **UI; 653 return false; 654 } 655 return true; 656} 657 658/// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 659/// from GV will trap if the loaded value is null. Note that this also permits 660/// comparisons of the loaded value against null, as a special case. 661static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) { 662 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI!=E; ++UI) 663 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 664 SmallPtrSet<PHINode*, 8> PHIs; 665 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 666 return false; 667 } else if (isa<StoreInst>(*UI)) { 668 // Ignore stores to the global. 669 } else { 670 // We don't know or understand this user, bail out. 671 //cerr << "UNKNOWN USER OF GLOBAL!: " << **UI; 672 return false; 673 } 674 675 return true; 676} 677 678static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV, 679 LLVMContext &Context) { 680 bool Changed = false; 681 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { 682 Instruction *I = cast<Instruction>(*UI++); 683 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 684 LI->setOperand(0, NewV); 685 Changed = true; 686 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 687 if (SI->getOperand(1) == V) { 688 SI->setOperand(1, NewV); 689 Changed = true; 690 } 691 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 692 if (I->getOperand(0) == V) { 693 // Calling through the pointer! Turn into a direct call, but be careful 694 // that the pointer is not also being passed as an argument. 695 I->setOperand(0, NewV); 696 Changed = true; 697 bool PassedAsArg = false; 698 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i) 699 if (I->getOperand(i) == V) { 700 PassedAsArg = true; 701 I->setOperand(i, NewV); 702 } 703 704 if (PassedAsArg) { 705 // Being passed as an argument also. Be careful to not invalidate UI! 706 UI = V->use_begin(); 707 } 708 } 709 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 710 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 711 Context.getConstantExprCast(CI->getOpcode(), 712 NewV, CI->getType()), Context); 713 if (CI->use_empty()) { 714 Changed = true; 715 CI->eraseFromParent(); 716 } 717 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 718 // Should handle GEP here. 719 SmallVector<Constant*, 8> Idxs; 720 Idxs.reserve(GEPI->getNumOperands()-1); 721 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 722 i != e; ++i) 723 if (Constant *C = dyn_cast<Constant>(*i)) 724 Idxs.push_back(C); 725 else 726 break; 727 if (Idxs.size() == GEPI->getNumOperands()-1) 728 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 729 Context.getConstantExprGetElementPtr(NewV, &Idxs[0], 730 Idxs.size()), Context); 731 if (GEPI->use_empty()) { 732 Changed = true; 733 GEPI->eraseFromParent(); 734 } 735 } 736 } 737 738 return Changed; 739} 740 741 742/// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 743/// value stored into it. If there are uses of the loaded value that would trap 744/// if the loaded value is dynamically null, then we know that they cannot be 745/// reachable with a null optimize away the load. 746static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 747 LLVMContext &Context) { 748 bool Changed = false; 749 750 // Keep track of whether we are able to remove all the uses of the global 751 // other than the store that defines it. 752 bool AllNonStoreUsesGone = true; 753 754 // Replace all uses of loads with uses of uses of the stored value. 755 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){ 756 User *GlobalUser = *GUI++; 757 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 758 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV, Context); 759 // If we were able to delete all uses of the loads 760 if (LI->use_empty()) { 761 LI->eraseFromParent(); 762 Changed = true; 763 } else { 764 AllNonStoreUsesGone = false; 765 } 766 } else if (isa<StoreInst>(GlobalUser)) { 767 // Ignore the store that stores "LV" to the global. 768 assert(GlobalUser->getOperand(1) == GV && 769 "Must be storing *to* the global"); 770 } else { 771 AllNonStoreUsesGone = false; 772 773 // If we get here we could have other crazy uses that are transitively 774 // loaded. 775 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 776 isa<ConstantExpr>(GlobalUser)) && "Only expect load and stores!"); 777 } 778 } 779 780 if (Changed) { 781 DOUT << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV; 782 ++NumGlobUses; 783 } 784 785 // If we nuked all of the loads, then none of the stores are needed either, 786 // nor is the global. 787 if (AllNonStoreUsesGone) { 788 DOUT << " *** GLOBAL NOW DEAD!\n"; 789 CleanupConstantGlobalUsers(GV, 0, Context); 790 if (GV->use_empty()) { 791 GV->eraseFromParent(); 792 ++NumDeleted; 793 } 794 Changed = true; 795 } 796 return Changed; 797} 798 799/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 800/// instructions that are foldable. 801static void ConstantPropUsersOf(Value *V, LLVMContext &Context) { 802 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) 803 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 804 if (Constant *NewC = ConstantFoldInstruction(I, Context)) { 805 I->replaceAllUsesWith(NewC); 806 807 // Advance UI to the next non-I use to avoid invalidating it! 808 // Instructions could multiply use V. 809 while (UI != E && *UI == I) 810 ++UI; 811 I->eraseFromParent(); 812 } 813} 814 815/// OptimizeGlobalAddressOfMalloc - This function takes the specified global 816/// variable, and transforms the program as if it always contained the result of 817/// the specified malloc. Because it is always the result of the specified 818/// malloc, there is no reason to actually DO the malloc. Instead, turn the 819/// malloc into a global, and any loads of GV as uses of the new global. 820static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 821 MallocInst *MI, 822 LLVMContext &Context) { 823 DOUT << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI; 824 ConstantInt *NElements = cast<ConstantInt>(MI->getArraySize()); 825 826 if (NElements->getZExtValue() != 1) { 827 // If we have an array allocation, transform it to a single element 828 // allocation to make the code below simpler. 829 Type *NewTy = Context.getArrayType(MI->getAllocatedType(), 830 NElements->getZExtValue()); 831 MallocInst *NewMI = 832 new MallocInst(NewTy, Context.getNullValue(Type::Int32Ty), 833 MI->getAlignment(), MI->getName(), MI); 834 Value* Indices[2]; 835 Indices[0] = Indices[1] = Context.getNullValue(Type::Int32Ty); 836 Value *NewGEP = GetElementPtrInst::Create(NewMI, Indices, Indices + 2, 837 NewMI->getName()+".el0", MI); 838 MI->replaceAllUsesWith(NewGEP); 839 MI->eraseFromParent(); 840 MI = NewMI; 841 } 842 843 // Create the new global variable. The contents of the malloc'd memory is 844 // undefined, so initialize with an undef value. 845 // FIXME: This new global should have the alignment returned by malloc. Code 846 // could depend on malloc returning large alignment (on the mac, 16 bytes) but 847 // this would only guarantee some lower alignment. 848 Constant *Init = Context.getUndef(MI->getAllocatedType()); 849 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 850 MI->getAllocatedType(), false, 851 GlobalValue::InternalLinkage, Init, 852 GV->getName()+".body", 853 GV, 854 GV->isThreadLocal()); 855 856 // Anything that used the malloc now uses the global directly. 857 MI->replaceAllUsesWith(NewGV); 858 859 Constant *RepValue = NewGV; 860 if (NewGV->getType() != GV->getType()->getElementType()) 861 RepValue = Context.getConstantExprBitCast(RepValue, 862 GV->getType()->getElementType()); 863 864 // If there is a comparison against null, we will insert a global bool to 865 // keep track of whether the global was initialized yet or not. 866 GlobalVariable *InitBool = 867 new GlobalVariable(Context, Type::Int1Ty, false, 868 GlobalValue::InternalLinkage, 869 Context.getFalse(), GV->getName()+".init", 870 GV->isThreadLocal()); 871 bool InitBoolUsed = false; 872 873 // Loop over all uses of GV, processing them in turn. 874 std::vector<StoreInst*> Stores; 875 while (!GV->use_empty()) 876 if (LoadInst *LI = dyn_cast<LoadInst>(GV->use_back())) { 877 while (!LI->use_empty()) { 878 Use &LoadUse = LI->use_begin().getUse(); 879 if (!isa<ICmpInst>(LoadUse.getUser())) 880 LoadUse = RepValue; 881 else { 882 ICmpInst *CI = cast<ICmpInst>(LoadUse.getUser()); 883 // Replace the cmp X, 0 with a use of the bool value. 884 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", CI); 885 InitBoolUsed = true; 886 switch (CI->getPredicate()) { 887 default: llvm_unreachable("Unknown ICmp Predicate!"); 888 case ICmpInst::ICMP_ULT: 889 case ICmpInst::ICMP_SLT: 890 LV = Context.getFalse(); // X < null -> always false 891 break; 892 case ICmpInst::ICMP_ULE: 893 case ICmpInst::ICMP_SLE: 894 case ICmpInst::ICMP_EQ: 895 LV = BinaryOperator::CreateNot(Context, LV, "notinit", CI); 896 break; 897 case ICmpInst::ICMP_NE: 898 case ICmpInst::ICMP_UGE: 899 case ICmpInst::ICMP_SGE: 900 case ICmpInst::ICMP_UGT: 901 case ICmpInst::ICMP_SGT: 902 break; // no change. 903 } 904 CI->replaceAllUsesWith(LV); 905 CI->eraseFromParent(); 906 } 907 } 908 LI->eraseFromParent(); 909 } else { 910 StoreInst *SI = cast<StoreInst>(GV->use_back()); 911 // The global is initialized when the store to it occurs. 912 new StoreInst(Context.getTrue(), InitBool, SI); 913 SI->eraseFromParent(); 914 } 915 916 // If the initialization boolean was used, insert it, otherwise delete it. 917 if (!InitBoolUsed) { 918 while (!InitBool->use_empty()) // Delete initializations 919 cast<Instruction>(InitBool->use_back())->eraseFromParent(); 920 delete InitBool; 921 } else 922 GV->getParent()->getGlobalList().insert(GV, InitBool); 923 924 925 // Now the GV is dead, nuke it and the malloc. 926 GV->eraseFromParent(); 927 MI->eraseFromParent(); 928 929 // To further other optimizations, loop over all users of NewGV and try to 930 // constant prop them. This will promote GEP instructions with constant 931 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 932 ConstantPropUsersOf(NewGV, Context); 933 if (RepValue != NewGV) 934 ConstantPropUsersOf(RepValue, Context); 935 936 return NewGV; 937} 938 939/// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 940/// to make sure that there are no complex uses of V. We permit simple things 941/// like dereferencing the pointer, but not storing through the address, unless 942/// it is to the specified global. 943static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Instruction *V, 944 GlobalVariable *GV, 945 SmallPtrSet<PHINode*, 8> &PHIs) { 946 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ 947 Instruction *Inst = cast<Instruction>(*UI); 948 949 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 950 continue; // Fine, ignore. 951 } 952 953 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 954 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 955 return false; // Storing the pointer itself... bad. 956 continue; // Otherwise, storing through it, or storing into GV... fine. 957 } 958 959 if (isa<GetElementPtrInst>(Inst)) { 960 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 961 return false; 962 continue; 963 } 964 965 if (PHINode *PN = dyn_cast<PHINode>(Inst)) { 966 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 967 // cycles. 968 if (PHIs.insert(PN)) 969 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 970 return false; 971 continue; 972 } 973 974 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 975 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 976 return false; 977 continue; 978 } 979 980 return false; 981 } 982 return true; 983} 984 985/// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 986/// somewhere. Transform all uses of the allocation into loads from the 987/// global and uses of the resultant pointer. Further, delete the store into 988/// GV. This assumes that these value pass the 989/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 990static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 991 GlobalVariable *GV) { 992 while (!Alloc->use_empty()) { 993 Instruction *U = cast<Instruction>(*Alloc->use_begin()); 994 Instruction *InsertPt = U; 995 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 996 // If this is the store of the allocation into the global, remove it. 997 if (SI->getOperand(1) == GV) { 998 SI->eraseFromParent(); 999 continue; 1000 } 1001 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1002 // Insert the load in the corresponding predecessor, not right before the 1003 // PHI. 1004 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator(); 1005 } else if (isa<BitCastInst>(U)) { 1006 // Must be bitcast between the malloc and store to initialize the global. 1007 ReplaceUsesOfMallocWithGlobal(U, GV); 1008 U->eraseFromParent(); 1009 continue; 1010 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1011 // If this is a "GEP bitcast" and the user is a store to the global, then 1012 // just process it as a bitcast. 1013 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1014 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back())) 1015 if (SI->getOperand(1) == GV) { 1016 // Must be bitcast GEP between the malloc and store to initialize 1017 // the global. 1018 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1019 GEPI->eraseFromParent(); 1020 continue; 1021 } 1022 } 1023 1024 // Insert a load from the global, and use it instead of the malloc. 1025 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1026 U->replaceUsesOfWith(Alloc, NL); 1027 } 1028} 1029 1030/// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 1031/// of a load) are simple enough to perform heap SRA on. This permits GEP's 1032/// that index through the array and struct field, icmps of null, and PHIs. 1033static bool LoadUsesSimpleEnoughForHeapSRA(Value *V, 1034 SmallPtrSet<PHINode*, 32> &LoadUsingPHIs, 1035 SmallPtrSet<PHINode*, 32> &LoadUsingPHIsPerLoad) { 1036 // We permit two users of the load: setcc comparing against the null 1037 // pointer, and a getelementptr of a specific form. 1038 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ 1039 Instruction *User = cast<Instruction>(*UI); 1040 1041 // Comparison against null is ok. 1042 if (ICmpInst *ICI = dyn_cast<ICmpInst>(User)) { 1043 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1044 return false; 1045 continue; 1046 } 1047 1048 // getelementptr is also ok, but only a simple form. 1049 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1050 // Must index into the array and into the struct. 1051 if (GEPI->getNumOperands() < 3) 1052 return false; 1053 1054 // Otherwise the GEP is ok. 1055 continue; 1056 } 1057 1058 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1059 if (!LoadUsingPHIsPerLoad.insert(PN)) 1060 // This means some phi nodes are dependent on each other. 1061 // Avoid infinite looping! 1062 return false; 1063 if (!LoadUsingPHIs.insert(PN)) 1064 // If we have already analyzed this PHI, then it is safe. 1065 continue; 1066 1067 // Make sure all uses of the PHI are simple enough to transform. 1068 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1069 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1070 return false; 1071 1072 continue; 1073 } 1074 1075 // Otherwise we don't know what this is, not ok. 1076 return false; 1077 } 1078 1079 return true; 1080} 1081 1082 1083/// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 1084/// GV are simple enough to perform HeapSRA, return true. 1085static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV, 1086 MallocInst *MI) { 1087 SmallPtrSet<PHINode*, 32> LoadUsingPHIs; 1088 SmallPtrSet<PHINode*, 32> LoadUsingPHIsPerLoad; 1089 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E; 1090 ++UI) 1091 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 1092 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1093 LoadUsingPHIsPerLoad)) 1094 return false; 1095 LoadUsingPHIsPerLoad.clear(); 1096 } 1097 1098 // If we reach here, we know that all uses of the loads and transitive uses 1099 // (through PHI nodes) are simple enough to transform. However, we don't know 1100 // that all inputs the to the PHI nodes are in the same equivalence sets. 1101 // Check to verify that all operands of the PHIs are either PHIS that can be 1102 // transformed, loads from GV, or MI itself. 1103 for (SmallPtrSet<PHINode*, 32>::iterator I = LoadUsingPHIs.begin(), 1104 E = LoadUsingPHIs.end(); I != E; ++I) { 1105 PHINode *PN = *I; 1106 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1107 Value *InVal = PN->getIncomingValue(op); 1108 1109 // PHI of the stored value itself is ok. 1110 if (InVal == MI) continue; 1111 1112 if (PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1113 // One of the PHIs in our set is (optimistically) ok. 1114 if (LoadUsingPHIs.count(InPN)) 1115 continue; 1116 return false; 1117 } 1118 1119 // Load from GV is ok. 1120 if (LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1121 if (LI->getOperand(0) == GV) 1122 continue; 1123 1124 // UNDEF? NULL? 1125 1126 // Anything else is rejected. 1127 return false; 1128 } 1129 } 1130 1131 return true; 1132} 1133 1134static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1135 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1136 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, 1137 LLVMContext &Context) { 1138 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 1139 1140 if (FieldNo >= FieldVals.size()) 1141 FieldVals.resize(FieldNo+1); 1142 1143 // If we already have this value, just reuse the previously scalarized 1144 // version. 1145 if (Value *FieldVal = FieldVals[FieldNo]) 1146 return FieldVal; 1147 1148 // Depending on what instruction this is, we have several cases. 1149 Value *Result; 1150 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1151 // This is a scalarized version of the load from the global. Just create 1152 // a new Load of the scalarized global. 1153 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1154 InsertedScalarizedValues, 1155 PHIsToRewrite, Context), 1156 LI->getName()+".f" + utostr(FieldNo), LI); 1157 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1158 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1159 // field. 1160 const StructType *ST = 1161 cast<StructType>(cast<PointerType>(PN->getType())->getElementType()); 1162 1163 Result = 1164 PHINode::Create(Context.getPointerTypeUnqual(ST->getElementType(FieldNo)), 1165 PN->getName()+".f"+utostr(FieldNo), PN); 1166 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1167 } else { 1168 llvm_unreachable("Unknown usable value"); 1169 Result = 0; 1170 } 1171 1172 return FieldVals[FieldNo] = Result; 1173} 1174 1175/// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 1176/// the load, rewrite the derived value to use the HeapSRoA'd load. 1177static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1178 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1179 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, 1180 LLVMContext &Context) { 1181 // If this is a comparison against null, handle it. 1182 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1183 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1184 // If we have a setcc of the loaded pointer, we can use a setcc of any 1185 // field. 1186 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1187 InsertedScalarizedValues, PHIsToRewrite, 1188 Context); 1189 1190 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1191 Context.getNullValue(NPtr->getType()), 1192 SCI->getName()); 1193 SCI->replaceAllUsesWith(New); 1194 SCI->eraseFromParent(); 1195 return; 1196 } 1197 1198 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1199 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1200 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1201 && "Unexpected GEPI!"); 1202 1203 // Load the pointer for this field. 1204 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1205 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1206 InsertedScalarizedValues, PHIsToRewrite, 1207 Context); 1208 1209 // Create the new GEP idx vector. 1210 SmallVector<Value*, 8> GEPIdx; 1211 GEPIdx.push_back(GEPI->getOperand(1)); 1212 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1213 1214 Value *NGEPI = GetElementPtrInst::Create(NewPtr, 1215 GEPIdx.begin(), GEPIdx.end(), 1216 GEPI->getName(), GEPI); 1217 GEPI->replaceAllUsesWith(NGEPI); 1218 GEPI->eraseFromParent(); 1219 return; 1220 } 1221 1222 // Recursively transform the users of PHI nodes. This will lazily create the 1223 // PHIs that are needed for individual elements. Keep track of what PHIs we 1224 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1225 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1226 // already been seen first by another load, so its uses have already been 1227 // processed. 1228 PHINode *PN = cast<PHINode>(LoadUser); 1229 bool Inserted; 1230 DenseMap<Value*, std::vector<Value*> >::iterator InsertPos; 1231 tie(InsertPos, Inserted) = 1232 InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>())); 1233 if (!Inserted) return; 1234 1235 // If this is the first time we've seen this PHI, recursively process all 1236 // users. 1237 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) { 1238 Instruction *User = cast<Instruction>(*UI++); 1239 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite, 1240 Context); 1241 } 1242} 1243 1244/// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 1245/// is a value loaded from the global. Eliminate all uses of Ptr, making them 1246/// use FieldGlobals instead. All uses of loaded values satisfy 1247/// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1248static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1249 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1250 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, 1251 LLVMContext &Context) { 1252 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); 1253 UI != E; ) { 1254 Instruction *User = cast<Instruction>(*UI++); 1255 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite, 1256 Context); 1257 } 1258 1259 if (Load->use_empty()) { 1260 Load->eraseFromParent(); 1261 InsertedScalarizedValues.erase(Load); 1262 } 1263} 1264 1265/// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break 1266/// it up into multiple allocations of arrays of the fields. 1267static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI, 1268 LLVMContext &Context){ 1269 DOUT << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI; 1270 const StructType *STy = cast<StructType>(MI->getAllocatedType()); 1271 1272 // There is guaranteed to be at least one use of the malloc (storing 1273 // it into GV). If there are other uses, change them to be uses of 1274 // the global to simplify later code. This also deletes the store 1275 // into GV. 1276 ReplaceUsesOfMallocWithGlobal(MI, GV); 1277 1278 // Okay, at this point, there are no users of the malloc. Insert N 1279 // new mallocs at the same place as MI, and N globals. 1280 std::vector<Value*> FieldGlobals; 1281 std::vector<MallocInst*> FieldMallocs; 1282 1283 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1284 const Type *FieldTy = STy->getElementType(FieldNo); 1285 const Type *PFieldTy = Context.getPointerTypeUnqual(FieldTy); 1286 1287 GlobalVariable *NGV = 1288 new GlobalVariable(*GV->getParent(), 1289 PFieldTy, false, GlobalValue::InternalLinkage, 1290 Context.getNullValue(PFieldTy), 1291 GV->getName() + ".f" + utostr(FieldNo), GV, 1292 GV->isThreadLocal()); 1293 FieldGlobals.push_back(NGV); 1294 1295 MallocInst *NMI = new MallocInst(FieldTy, MI->getArraySize(), 1296 MI->getName() + ".f" + utostr(FieldNo),MI); 1297 FieldMallocs.push_back(NMI); 1298 new StoreInst(NMI, NGV, MI); 1299 } 1300 1301 // The tricky aspect of this transformation is handling the case when malloc 1302 // fails. In the original code, malloc failing would set the result pointer 1303 // of malloc to null. In this case, some mallocs could succeed and others 1304 // could fail. As such, we emit code that looks like this: 1305 // F0 = malloc(field0) 1306 // F1 = malloc(field1) 1307 // F2 = malloc(field2) 1308 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1309 // if (F0) { free(F0); F0 = 0; } 1310 // if (F1) { free(F1); F1 = 0; } 1311 // if (F2) { free(F2); F2 = 0; } 1312 // } 1313 Value *RunningOr = 0; 1314 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1315 Value *Cond = new ICmpInst(MI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1316 Context.getNullValue(FieldMallocs[i]->getType()), 1317 "isnull"); 1318 if (!RunningOr) 1319 RunningOr = Cond; // First seteq 1320 else 1321 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", MI); 1322 } 1323 1324 // Split the basic block at the old malloc. 1325 BasicBlock *OrigBB = MI->getParent(); 1326 BasicBlock *ContBB = OrigBB->splitBasicBlock(MI, "malloc_cont"); 1327 1328 // Create the block to check the first condition. Put all these blocks at the 1329 // end of the function as they are unlikely to be executed. 1330 BasicBlock *NullPtrBlock = BasicBlock::Create("malloc_ret_null", 1331 OrigBB->getParent()); 1332 1333 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1334 // branch on RunningOr. 1335 OrigBB->getTerminator()->eraseFromParent(); 1336 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1337 1338 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1339 // pointer, because some may be null while others are not. 1340 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1341 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1342 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1343 Context.getNullValue(GVVal->getType()), 1344 "tmp"); 1345 BasicBlock *FreeBlock = BasicBlock::Create("free_it", OrigBB->getParent()); 1346 BasicBlock *NextBlock = BasicBlock::Create("next", OrigBB->getParent()); 1347 BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock); 1348 1349 // Fill in FreeBlock. 1350 new FreeInst(GVVal, FreeBlock); 1351 new StoreInst(Context.getNullValue(GVVal->getType()), FieldGlobals[i], 1352 FreeBlock); 1353 BranchInst::Create(NextBlock, FreeBlock); 1354 1355 NullPtrBlock = NextBlock; 1356 } 1357 1358 BranchInst::Create(ContBB, NullPtrBlock); 1359 1360 // MI is no longer needed, remove it. 1361 MI->eraseFromParent(); 1362 1363 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 1364 /// update all uses of the load, keep track of what scalarized loads are 1365 /// inserted for a given load. 1366 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 1367 InsertedScalarizedValues[GV] = FieldGlobals; 1368 1369 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 1370 1371 // Okay, the malloc site is completely handled. All of the uses of GV are now 1372 // loads, and all uses of those loads are simple. Rewrite them to use loads 1373 // of the per-field globals instead. 1374 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { 1375 Instruction *User = cast<Instruction>(*UI++); 1376 1377 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1378 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite, 1379 Context); 1380 continue; 1381 } 1382 1383 // Must be a store of null. 1384 StoreInst *SI = cast<StoreInst>(User); 1385 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1386 "Unexpected heap-sra user!"); 1387 1388 // Insert a store of null into each global. 1389 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1390 const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 1391 Constant *Null = Context.getNullValue(PT->getElementType()); 1392 new StoreInst(Null, FieldGlobals[i], SI); 1393 } 1394 // Erase the original store. 1395 SI->eraseFromParent(); 1396 } 1397 1398 // While we have PHIs that are interesting to rewrite, do it. 1399 while (!PHIsToRewrite.empty()) { 1400 PHINode *PN = PHIsToRewrite.back().first; 1401 unsigned FieldNo = PHIsToRewrite.back().second; 1402 PHIsToRewrite.pop_back(); 1403 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1404 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1405 1406 // Add all the incoming values. This can materialize more phis. 1407 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1408 Value *InVal = PN->getIncomingValue(i); 1409 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1410 PHIsToRewrite, Context); 1411 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1412 } 1413 } 1414 1415 // Drop all inter-phi links and any loads that made it this far. 1416 for (DenseMap<Value*, std::vector<Value*> >::iterator 1417 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1418 I != E; ++I) { 1419 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1420 PN->dropAllReferences(); 1421 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1422 LI->dropAllReferences(); 1423 } 1424 1425 // Delete all the phis and loads now that inter-references are dead. 1426 for (DenseMap<Value*, std::vector<Value*> >::iterator 1427 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1428 I != E; ++I) { 1429 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1430 PN->eraseFromParent(); 1431 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1432 LI->eraseFromParent(); 1433 } 1434 1435 // The old global is now dead, remove it. 1436 GV->eraseFromParent(); 1437 1438 ++NumHeapSRA; 1439 return cast<GlobalVariable>(FieldGlobals[0]); 1440} 1441 1442/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 1443/// pointer global variable with a single value stored it that is a malloc or 1444/// cast of malloc. 1445static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 1446 MallocInst *MI, 1447 Module::global_iterator &GVI, 1448 TargetData &TD, 1449 LLVMContext &Context) { 1450 // If this is a malloc of an abstract type, don't touch it. 1451 if (!MI->getAllocatedType()->isSized()) 1452 return false; 1453 1454 // We can't optimize this global unless all uses of it are *known* to be 1455 // of the malloc value, not of the null initializer value (consider a use 1456 // that compares the global's value against zero to see if the malloc has 1457 // been reached). To do this, we check to see if all uses of the global 1458 // would trap if the global were null: this proves that they must all 1459 // happen after the malloc. 1460 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1461 return false; 1462 1463 // We can't optimize this if the malloc itself is used in a complex way, 1464 // for example, being stored into multiple globals. This allows the 1465 // malloc to be stored into the specified global, loaded setcc'd, and 1466 // GEP'd. These are all things we could transform to using the global 1467 // for. 1468 { 1469 SmallPtrSet<PHINode*, 8> PHIs; 1470 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(MI, GV, PHIs)) 1471 return false; 1472 } 1473 1474 1475 // If we have a global that is only initialized with a fixed size malloc, 1476 // transform the program to use global memory instead of malloc'd memory. 1477 // This eliminates dynamic allocation, avoids an indirection accessing the 1478 // data, and exposes the resultant global to further GlobalOpt. 1479 if (ConstantInt *NElements = dyn_cast<ConstantInt>(MI->getArraySize())) { 1480 // Restrict this transformation to only working on small allocations 1481 // (2048 bytes currently), as we don't want to introduce a 16M global or 1482 // something. 1483 if (NElements->getZExtValue()* 1484 TD.getTypeAllocSize(MI->getAllocatedType()) < 2048) { 1485 GVI = OptimizeGlobalAddressOfMalloc(GV, MI, Context); 1486 return true; 1487 } 1488 } 1489 1490 // If the allocation is an array of structures, consider transforming this 1491 // into multiple malloc'd arrays, one for each field. This is basically 1492 // SRoA for malloc'd memory. 1493 const Type *AllocTy = MI->getAllocatedType(); 1494 1495 // If this is an allocation of a fixed size array of structs, analyze as a 1496 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1497 if (!MI->isArrayAllocation()) 1498 if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1499 AllocTy = AT->getElementType(); 1500 1501 if (const StructType *AllocSTy = dyn_cast<StructType>(AllocTy)) { 1502 // This the structure has an unreasonable number of fields, leave it 1503 // alone. 1504 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1505 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, MI)) { 1506 1507 // If this is a fixed size array, transform the Malloc to be an alloc of 1508 // structs. malloc [100 x struct],1 -> malloc struct, 100 1509 if (const ArrayType *AT = dyn_cast<ArrayType>(MI->getAllocatedType())) { 1510 MallocInst *NewMI = 1511 new MallocInst(AllocSTy, 1512 ConstantInt::get(Type::Int32Ty, AT->getNumElements()), 1513 "", MI); 1514 NewMI->takeName(MI); 1515 Value *Cast = new BitCastInst(NewMI, MI->getType(), "tmp", MI); 1516 MI->replaceAllUsesWith(Cast); 1517 MI->eraseFromParent(); 1518 MI = NewMI; 1519 } 1520 1521 GVI = PerformHeapAllocSRoA(GV, MI, Context); 1522 return true; 1523 } 1524 } 1525 1526 return false; 1527} 1528 1529// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 1530// that only one value (besides its initializer) is ever stored to the global. 1531static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1532 Module::global_iterator &GVI, 1533 TargetData &TD, LLVMContext &Context) { 1534 // Ignore no-op GEPs and bitcasts. 1535 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1536 1537 // If we are dealing with a pointer global that is initialized to null and 1538 // only has one (non-null) value stored into it, then we can optimize any 1539 // users of the loaded value (often calls and loads) that would trap if the 1540 // value was null. 1541 if (isa<PointerType>(GV->getInitializer()->getType()) && 1542 GV->getInitializer()->isNullValue()) { 1543 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1544 if (GV->getInitializer()->getType() != SOVC->getType()) 1545 SOVC = 1546 Context.getConstantExprBitCast(SOVC, GV->getInitializer()->getType()); 1547 1548 // Optimize away any trapping uses of the loaded value. 1549 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, Context)) 1550 return true; 1551 } else if (MallocInst *MI = dyn_cast<MallocInst>(StoredOnceVal)) { 1552 if (TryToOptimizeStoreOfMallocToGlobal(GV, MI, GVI, TD, Context)) 1553 return true; 1554 } 1555 } 1556 1557 return false; 1558} 1559 1560/// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 1561/// two values ever stored into GV are its initializer and OtherVal. See if we 1562/// can shrink the global into a boolean and select between the two values 1563/// whenever it is used. This exposes the values to other scalar optimizations. 1564static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal, 1565 LLVMContext &Context) { 1566 const Type *GVElType = GV->getType()->getElementType(); 1567 1568 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1569 // an FP value, pointer or vector, don't do this optimization because a select 1570 // between them is very expensive and unlikely to lead to later 1571 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1572 // where v1 and v2 both require constant pool loads, a big loss. 1573 if (GVElType == Type::Int1Ty || GVElType->isFloatingPoint() || 1574 isa<PointerType>(GVElType) || isa<VectorType>(GVElType)) 1575 return false; 1576 1577 // Walk the use list of the global seeing if all the uses are load or store. 1578 // If there is anything else, bail out. 1579 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I) 1580 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 1581 return false; 1582 1583 DOUT << " *** SHRINKING TO BOOL: " << *GV; 1584 1585 // Create the new global, initializing it to false. 1586 GlobalVariable *NewGV = new GlobalVariable(Context, Type::Int1Ty, false, 1587 GlobalValue::InternalLinkage, Context.getFalse(), 1588 GV->getName()+".b", 1589 GV->isThreadLocal()); 1590 GV->getParent()->getGlobalList().insert(GV, NewGV); 1591 1592 Constant *InitVal = GV->getInitializer(); 1593 assert(InitVal->getType() != Type::Int1Ty && "No reason to shrink to bool!"); 1594 1595 // If initialized to zero and storing one into the global, we can use a cast 1596 // instead of a select to synthesize the desired value. 1597 bool IsOneZero = false; 1598 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 1599 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1600 1601 while (!GV->use_empty()) { 1602 Instruction *UI = cast<Instruction>(GV->use_back()); 1603 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1604 // Change the store into a boolean store. 1605 bool StoringOther = SI->getOperand(0) == OtherVal; 1606 // Only do this if we weren't storing a loaded value. 1607 Value *StoreVal; 1608 if (StoringOther || SI->getOperand(0) == InitVal) 1609 StoreVal = ConstantInt::get(Type::Int1Ty, StoringOther); 1610 else { 1611 // Otherwise, we are storing a previously loaded copy. To do this, 1612 // change the copy from copying the original value to just copying the 1613 // bool. 1614 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1615 1616 // If we're already replaced the input, StoredVal will be a cast or 1617 // select instruction. If not, it will be a load of the original 1618 // global. 1619 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1620 assert(LI->getOperand(0) == GV && "Not a copy!"); 1621 // Insert a new load, to preserve the saved value. 1622 StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI); 1623 } else { 1624 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1625 "This is not a form that we understand!"); 1626 StoreVal = StoredVal->getOperand(0); 1627 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1628 } 1629 } 1630 new StoreInst(StoreVal, NewGV, SI); 1631 } else { 1632 // Change the load into a load of bool then a select. 1633 LoadInst *LI = cast<LoadInst>(UI); 1634 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI); 1635 Value *NSI; 1636 if (IsOneZero) 1637 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1638 else 1639 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1640 NSI->takeName(LI); 1641 LI->replaceAllUsesWith(NSI); 1642 } 1643 UI->eraseFromParent(); 1644 } 1645 1646 GV->eraseFromParent(); 1647 return true; 1648} 1649 1650 1651/// ProcessInternalGlobal - Analyze the specified global variable and optimize 1652/// it if possible. If we make a change, return true. 1653bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 1654 Module::global_iterator &GVI) { 1655 SmallPtrSet<PHINode*, 16> PHIUsers; 1656 GlobalStatus GS; 1657 GV->removeDeadConstantUsers(); 1658 1659 if (GV->use_empty()) { 1660 DOUT << "GLOBAL DEAD: " << *GV; 1661 GV->eraseFromParent(); 1662 ++NumDeleted; 1663 return true; 1664 } 1665 1666 if (!AnalyzeGlobal(GV, GS, PHIUsers)) { 1667#if 0 1668 cerr << "Global: " << *GV; 1669 cerr << " isLoaded = " << GS.isLoaded << "\n"; 1670 cerr << " StoredType = "; 1671 switch (GS.StoredType) { 1672 case GlobalStatus::NotStored: cerr << "NEVER STORED\n"; break; 1673 case GlobalStatus::isInitializerStored: cerr << "INIT STORED\n"; break; 1674 case GlobalStatus::isStoredOnce: cerr << "STORED ONCE\n"; break; 1675 case GlobalStatus::isStored: cerr << "stored\n"; break; 1676 } 1677 if (GS.StoredType == GlobalStatus::isStoredOnce && GS.StoredOnceValue) 1678 cerr << " StoredOnceValue = " << *GS.StoredOnceValue << "\n"; 1679 if (GS.AccessingFunction && !GS.HasMultipleAccessingFunctions) 1680 cerr << " AccessingFunction = " << GS.AccessingFunction->getName() 1681 << "\n"; 1682 cerr << " HasMultipleAccessingFunctions = " 1683 << GS.HasMultipleAccessingFunctions << "\n"; 1684 cerr << " HasNonInstructionUser = " << GS.HasNonInstructionUser<<"\n"; 1685 cerr << "\n"; 1686#endif 1687 1688 // If this is a first class global and has only one accessing function 1689 // and this function is main (which we know is not recursive we can make 1690 // this global a local variable) we replace the global with a local alloca 1691 // in this function. 1692 // 1693 // NOTE: It doesn't make sense to promote non single-value types since we 1694 // are just replacing static memory to stack memory. 1695 // 1696 // If the global is in different address space, don't bring it to stack. 1697 if (!GS.HasMultipleAccessingFunctions && 1698 GS.AccessingFunction && !GS.HasNonInstructionUser && 1699 GV->getType()->getElementType()->isSingleValueType() && 1700 GS.AccessingFunction->getName() == "main" && 1701 GS.AccessingFunction->hasExternalLinkage() && 1702 GV->getType()->getAddressSpace() == 0) { 1703 DOUT << "LOCALIZING GLOBAL: " << *GV; 1704 Instruction* FirstI = GS.AccessingFunction->getEntryBlock().begin(); 1705 const Type* ElemTy = GV->getType()->getElementType(); 1706 // FIXME: Pass Global's alignment when globals have alignment 1707 AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), FirstI); 1708 if (!isa<UndefValue>(GV->getInitializer())) 1709 new StoreInst(GV->getInitializer(), Alloca, FirstI); 1710 1711 GV->replaceAllUsesWith(Alloca); 1712 GV->eraseFromParent(); 1713 ++NumLocalized; 1714 return true; 1715 } 1716 1717 // If the global is never loaded (but may be stored to), it is dead. 1718 // Delete it now. 1719 if (!GS.isLoaded) { 1720 DOUT << "GLOBAL NEVER LOADED: " << *GV; 1721 1722 // Delete any stores we can find to the global. We may not be able to 1723 // make it completely dead though. 1724 bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), 1725 GV->getContext()); 1726 1727 // If the global is dead now, delete it. 1728 if (GV->use_empty()) { 1729 GV->eraseFromParent(); 1730 ++NumDeleted; 1731 Changed = true; 1732 } 1733 return Changed; 1734 1735 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) { 1736 DOUT << "MARKING CONSTANT: " << *GV; 1737 GV->setConstant(true); 1738 1739 // Clean up any obviously simplifiable users now. 1740 CleanupConstantGlobalUsers(GV, GV->getInitializer(), GV->getContext()); 1741 1742 // If the global is dead now, just nuke it. 1743 if (GV->use_empty()) { 1744 DOUT << " *** Marking constant allowed us to simplify " 1745 << "all users and delete global!\n"; 1746 GV->eraseFromParent(); 1747 ++NumDeleted; 1748 } 1749 1750 ++NumMarked; 1751 return true; 1752 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 1753 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, 1754 getAnalysis<TargetData>(), 1755 GV->getContext())) { 1756 GVI = FirstNewGV; // Don't skip the newly produced globals! 1757 return true; 1758 } 1759 } else if (GS.StoredType == GlobalStatus::isStoredOnce) { 1760 // If the initial value for the global was an undef value, and if only 1761 // one other value was stored into it, we can just change the 1762 // initializer to be the stored value, then delete all stores to the 1763 // global. This allows us to mark it constant. 1764 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1765 if (isa<UndefValue>(GV->getInitializer())) { 1766 // Change the initial value here. 1767 GV->setInitializer(SOVConstant); 1768 1769 // Clean up any obviously simplifiable users now. 1770 CleanupConstantGlobalUsers(GV, GV->getInitializer(), 1771 GV->getContext()); 1772 1773 if (GV->use_empty()) { 1774 DOUT << " *** Substituting initializer allowed us to " 1775 << "simplify all users and delete global!\n"; 1776 GV->eraseFromParent(); 1777 ++NumDeleted; 1778 } else { 1779 GVI = GV; 1780 } 1781 ++NumSubstitute; 1782 return true; 1783 } 1784 1785 // Try to optimize globals based on the knowledge that only one value 1786 // (besides its initializer) is ever stored to the global. 1787 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI, 1788 getAnalysis<TargetData>(), GV->getContext())) 1789 return true; 1790 1791 // Otherwise, if the global was not a boolean, we can shrink it to be a 1792 // boolean. 1793 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1794 if (TryToShrinkGlobalToBoolean(GV, SOVConstant, GV->getContext())) { 1795 ++NumShrunkToBool; 1796 return true; 1797 } 1798 } 1799 } 1800 return false; 1801} 1802 1803/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 1804/// function, changing them to FastCC. 1805static void ChangeCalleesToFastCall(Function *F) { 1806 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1807 CallSite User(cast<Instruction>(*UI)); 1808 User.setCallingConv(CallingConv::Fast); 1809 } 1810} 1811 1812static AttrListPtr StripNest(const AttrListPtr &Attrs) { 1813 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1814 if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0) 1815 continue; 1816 1817 // There can be only one. 1818 return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest); 1819 } 1820 1821 return Attrs; 1822} 1823 1824static void RemoveNestAttribute(Function *F) { 1825 F->setAttributes(StripNest(F->getAttributes())); 1826 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1827 CallSite User(cast<Instruction>(*UI)); 1828 User.setAttributes(StripNest(User.getAttributes())); 1829 } 1830} 1831 1832bool GlobalOpt::OptimizeFunctions(Module &M) { 1833 bool Changed = false; 1834 // Optimize functions. 1835 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 1836 Function *F = FI++; 1837 // Functions without names cannot be referenced outside this module. 1838 if (!F->hasName() && !F->isDeclaration()) 1839 F->setLinkage(GlobalValue::InternalLinkage); 1840 F->removeDeadConstantUsers(); 1841 if (F->use_empty() && (F->hasLocalLinkage() || 1842 F->hasLinkOnceLinkage())) { 1843 M.getFunctionList().erase(F); 1844 Changed = true; 1845 ++NumFnDeleted; 1846 } else if (F->hasLocalLinkage()) { 1847 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() && 1848 !F->hasAddressTaken()) { 1849 // If this function has C calling conventions, is not a varargs 1850 // function, and is only called directly, promote it to use the Fast 1851 // calling convention. 1852 F->setCallingConv(CallingConv::Fast); 1853 ChangeCalleesToFastCall(F); 1854 ++NumFastCallFns; 1855 Changed = true; 1856 } 1857 1858 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 1859 !F->hasAddressTaken()) { 1860 // The function is not used by a trampoline intrinsic, so it is safe 1861 // to remove the 'nest' attribute. 1862 RemoveNestAttribute(F); 1863 ++NumNestRemoved; 1864 Changed = true; 1865 } 1866 } 1867 } 1868 return Changed; 1869} 1870 1871bool GlobalOpt::OptimizeGlobalVars(Module &M) { 1872 bool Changed = false; 1873 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 1874 GVI != E; ) { 1875 GlobalVariable *GV = GVI++; 1876 // Global variables without names cannot be referenced outside this module. 1877 if (!GV->hasName() && !GV->isDeclaration()) 1878 GV->setLinkage(GlobalValue::InternalLinkage); 1879 if (!GV->isConstant() && GV->hasLocalLinkage() && 1880 GV->hasInitializer()) 1881 Changed |= ProcessInternalGlobal(GV, GVI); 1882 } 1883 return Changed; 1884} 1885 1886/// FindGlobalCtors - Find the llvm.globalctors list, verifying that all 1887/// initializers have an init priority of 65535. 1888GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { 1889 for (Module::global_iterator I = M.global_begin(), E = M.global_end(); 1890 I != E; ++I) 1891 if (I->getName() == "llvm.global_ctors") { 1892 // Found it, verify it's an array of { int, void()* }. 1893 const ArrayType *ATy =dyn_cast<ArrayType>(I->getType()->getElementType()); 1894 if (!ATy) return 0; 1895 const StructType *STy = dyn_cast<StructType>(ATy->getElementType()); 1896 if (!STy || STy->getNumElements() != 2 || 1897 STy->getElementType(0) != Type::Int32Ty) return 0; 1898 const PointerType *PFTy = dyn_cast<PointerType>(STy->getElementType(1)); 1899 if (!PFTy) return 0; 1900 const FunctionType *FTy = dyn_cast<FunctionType>(PFTy->getElementType()); 1901 if (!FTy || FTy->getReturnType() != Type::VoidTy || FTy->isVarArg() || 1902 FTy->getNumParams() != 0) 1903 return 0; 1904 1905 // Verify that the initializer is simple enough for us to handle. 1906 if (!I->hasInitializer()) return 0; 1907 ConstantArray *CA = dyn_cast<ConstantArray>(I->getInitializer()); 1908 if (!CA) return 0; 1909 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) 1910 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(*i)) { 1911 if (isa<ConstantPointerNull>(CS->getOperand(1))) 1912 continue; 1913 1914 // Must have a function or null ptr. 1915 if (!isa<Function>(CS->getOperand(1))) 1916 return 0; 1917 1918 // Init priority must be standard. 1919 ConstantInt *CI = dyn_cast<ConstantInt>(CS->getOperand(0)); 1920 if (!CI || CI->getZExtValue() != 65535) 1921 return 0; 1922 } else { 1923 return 0; 1924 } 1925 1926 return I; 1927 } 1928 return 0; 1929} 1930 1931/// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, 1932/// return a list of the functions and null terminator as a vector. 1933static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { 1934 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1935 std::vector<Function*> Result; 1936 Result.reserve(CA->getNumOperands()); 1937 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1938 ConstantStruct *CS = cast<ConstantStruct>(*i); 1939 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); 1940 } 1941 return Result; 1942} 1943 1944/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the 1945/// specified array, returning the new global to use. 1946static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, 1947 const std::vector<Function*> &Ctors, 1948 LLVMContext &Context) { 1949 // If we made a change, reassemble the initializer list. 1950 std::vector<Constant*> CSVals; 1951 CSVals.push_back(ConstantInt::get(Type::Int32Ty, 65535)); 1952 CSVals.push_back(0); 1953 1954 // Create the new init list. 1955 std::vector<Constant*> CAList; 1956 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { 1957 if (Ctors[i]) { 1958 CSVals[1] = Ctors[i]; 1959 } else { 1960 const Type *FTy = Context.getFunctionType(Type::VoidTy, false); 1961 const PointerType *PFTy = Context.getPointerTypeUnqual(FTy); 1962 CSVals[1] = Context.getNullValue(PFTy); 1963 CSVals[0] = ConstantInt::get(Type::Int32Ty, 2147483647); 1964 } 1965 CAList.push_back(ConstantStruct::get(CSVals)); 1966 } 1967 1968 // Create the array initializer. 1969 const Type *StructTy = 1970 cast<ArrayType>(GCL->getType()->getElementType())->getElementType(); 1971 Constant *CA = Context.getConstantArray(ArrayType::get(StructTy, 1972 CAList.size()), CAList); 1973 1974 // If we didn't change the number of elements, don't create a new GV. 1975 if (CA->getType() == GCL->getInitializer()->getType()) { 1976 GCL->setInitializer(CA); 1977 return GCL; 1978 } 1979 1980 // Create the new global and insert it next to the existing list. 1981 GlobalVariable *NGV = new GlobalVariable(Context, CA->getType(), 1982 GCL->isConstant(), 1983 GCL->getLinkage(), CA, "", 1984 GCL->isThreadLocal()); 1985 GCL->getParent()->getGlobalList().insert(GCL, NGV); 1986 NGV->takeName(GCL); 1987 1988 // Nuke the old list, replacing any uses with the new one. 1989 if (!GCL->use_empty()) { 1990 Constant *V = NGV; 1991 if (V->getType() != GCL->getType()) 1992 V = Context.getConstantExprBitCast(V, GCL->getType()); 1993 GCL->replaceAllUsesWith(V); 1994 } 1995 GCL->eraseFromParent(); 1996 1997 if (Ctors.size()) 1998 return NGV; 1999 else 2000 return 0; 2001} 2002 2003 2004static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, 2005 Value *V) { 2006 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 2007 Constant *R = ComputedValues[V]; 2008 assert(R && "Reference to an uncomputed value!"); 2009 return R; 2010} 2011 2012/// isSimpleEnoughPointerToCommit - Return true if this constant is simple 2013/// enough for us to understand. In particular, if it is a cast of something, 2014/// we punt. We basically just support direct accesses to globals and GEP's of 2015/// globals. This should be kept up to date with CommitValueTo. 2016static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext &Context) { 2017 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { 2018 if (!GV->hasExternalLinkage() && !GV->hasLocalLinkage()) 2019 return false; // do not allow weak/linkonce/dllimport/dllexport linkage. 2020 return !GV->isDeclaration(); // reject external globals. 2021 } 2022 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2023 // Handle a constantexpr gep. 2024 if (CE->getOpcode() == Instruction::GetElementPtr && 2025 isa<GlobalVariable>(CE->getOperand(0))) { 2026 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2027 if (!GV->hasExternalLinkage() && !GV->hasLocalLinkage()) 2028 return false; // do not allow weak/linkonce/dllimport/dllexport linkage. 2029 return GV->hasInitializer() && 2030 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE, 2031 Context); 2032 } 2033 return false; 2034} 2035 2036/// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 2037/// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 2038/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 2039static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2040 ConstantExpr *Addr, unsigned OpNo, 2041 LLVMContext &Context) { 2042 // Base case of the recursion. 2043 if (OpNo == Addr->getNumOperands()) { 2044 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2045 return Val; 2046 } 2047 2048 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 2049 std::vector<Constant*> Elts; 2050 2051 // Break up the constant into its elements. 2052 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 2053 for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i) 2054 Elts.push_back(cast<Constant>(*i)); 2055 } else if (isa<ConstantAggregateZero>(Init)) { 2056 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2057 Elts.push_back(Context.getNullValue(STy->getElementType(i))); 2058 } else if (isa<UndefValue>(Init)) { 2059 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2060 Elts.push_back(Context.getUndef(STy->getElementType(i))); 2061 } else { 2062 llvm_unreachable("This code is out of sync with " 2063 " ConstantFoldLoadThroughGEPConstantExpr"); 2064 } 2065 2066 // Replace the element that we are supposed to. 2067 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2068 unsigned Idx = CU->getZExtValue(); 2069 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2070 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1, Context); 2071 2072 // Return the modified struct. 2073 return ConstantStruct::get(&Elts[0], Elts.size(), STy->isPacked()); 2074 } else { 2075 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2076 const ArrayType *ATy = cast<ArrayType>(Init->getType()); 2077 2078 // Break up the array into elements. 2079 std::vector<Constant*> Elts; 2080 if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 2081 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) 2082 Elts.push_back(cast<Constant>(*i)); 2083 } else if (isa<ConstantAggregateZero>(Init)) { 2084 Constant *Elt = Context.getNullValue(ATy->getElementType()); 2085 Elts.assign(ATy->getNumElements(), Elt); 2086 } else if (isa<UndefValue>(Init)) { 2087 Constant *Elt = Context.getUndef(ATy->getElementType()); 2088 Elts.assign(ATy->getNumElements(), Elt); 2089 } else { 2090 llvm_unreachable("This code is out of sync with " 2091 " ConstantFoldLoadThroughGEPConstantExpr"); 2092 } 2093 2094 assert(CI->getZExtValue() < ATy->getNumElements()); 2095 Elts[CI->getZExtValue()] = 2096 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1, Context); 2097 return Context.getConstantArray(ATy, Elts); 2098 } 2099} 2100 2101/// CommitValueTo - We have decided that Addr (which satisfies the predicate 2102/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2103static void CommitValueTo(Constant *Val, Constant *Addr, 2104 LLVMContext &Context) { 2105 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2106 assert(GV->hasInitializer()); 2107 GV->setInitializer(Val); 2108 return; 2109 } 2110 2111 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2112 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2113 2114 Constant *Init = GV->getInitializer(); 2115 Init = EvaluateStoreInto(Init, Val, CE, 2, Context); 2116 GV->setInitializer(Init); 2117} 2118 2119/// ComputeLoadResult - Return the value that would be computed by a load from 2120/// P after the stores reflected by 'memory' have been performed. If we can't 2121/// decide, return null. 2122static Constant *ComputeLoadResult(Constant *P, 2123 const DenseMap<Constant*, Constant*> &Memory, 2124 LLVMContext &Context) { 2125 // If this memory location has been recently stored, use the stored value: it 2126 // is the most up-to-date. 2127 DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P); 2128 if (I != Memory.end()) return I->second; 2129 2130 // Access it. 2131 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 2132 if (GV->hasInitializer()) 2133 return GV->getInitializer(); 2134 return 0; 2135 } 2136 2137 // Handle a constantexpr getelementptr. 2138 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 2139 if (CE->getOpcode() == Instruction::GetElementPtr && 2140 isa<GlobalVariable>(CE->getOperand(0))) { 2141 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2142 if (GV->hasInitializer()) 2143 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE, 2144 Context); 2145 } 2146 2147 return 0; // don't know how to evaluate. 2148} 2149 2150/// EvaluateFunction - Evaluate a call to function F, returning true if 2151/// successful, false if we can't evaluate it. ActualArgs contains the formal 2152/// arguments for the function. 2153static bool EvaluateFunction(Function *F, Constant *&RetVal, 2154 const std::vector<Constant*> &ActualArgs, 2155 std::vector<Function*> &CallStack, 2156 DenseMap<Constant*, Constant*> &MutatedMemory, 2157 std::vector<GlobalVariable*> &AllocaTmps) { 2158 // Check to see if this function is already executing (recursion). If so, 2159 // bail out. TODO: we might want to accept limited recursion. 2160 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 2161 return false; 2162 2163 LLVMContext &Context = F->getContext(); 2164 2165 CallStack.push_back(F); 2166 2167 /// Values - As we compute SSA register values, we store their contents here. 2168 DenseMap<Value*, Constant*> Values; 2169 2170 // Initialize arguments to the incoming values specified. 2171 unsigned ArgNo = 0; 2172 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 2173 ++AI, ++ArgNo) 2174 Values[AI] = ActualArgs[ArgNo]; 2175 2176 /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 2177 /// we can only evaluate any one basic block at most once. This set keeps 2178 /// track of what we have executed so we can detect recursive cases etc. 2179 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 2180 2181 // CurInst - The current instruction we're evaluating. 2182 BasicBlock::iterator CurInst = F->begin()->begin(); 2183 2184 // This is the main evaluation loop. 2185 while (1) { 2186 Constant *InstResult = 0; 2187 2188 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 2189 if (SI->isVolatile()) return false; // no volatile accesses. 2190 Constant *Ptr = getVal(Values, SI->getOperand(1)); 2191 if (!isSimpleEnoughPointerToCommit(Ptr, Context)) 2192 // If this is too complex for us to commit, reject it. 2193 return false; 2194 Constant *Val = getVal(Values, SI->getOperand(0)); 2195 MutatedMemory[Ptr] = Val; 2196 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 2197 InstResult = Context.getConstantExpr(BO->getOpcode(), 2198 getVal(Values, BO->getOperand(0)), 2199 getVal(Values, BO->getOperand(1))); 2200 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 2201 InstResult = Context.getConstantExprCompare(CI->getPredicate(), 2202 getVal(Values, CI->getOperand(0)), 2203 getVal(Values, CI->getOperand(1))); 2204 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 2205 InstResult = Context.getConstantExprCast(CI->getOpcode(), 2206 getVal(Values, CI->getOperand(0)), 2207 CI->getType()); 2208 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 2209 InstResult = 2210 Context.getConstantExprSelect(getVal(Values, SI->getOperand(0)), 2211 getVal(Values, SI->getOperand(1)), 2212 getVal(Values, SI->getOperand(2))); 2213 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 2214 Constant *P = getVal(Values, GEP->getOperand(0)); 2215 SmallVector<Constant*, 8> GEPOps; 2216 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 2217 i != e; ++i) 2218 GEPOps.push_back(getVal(Values, *i)); 2219 InstResult = 2220 Context.getConstantExprGetElementPtr(P, &GEPOps[0], GEPOps.size()); 2221 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 2222 if (LI->isVolatile()) return false; // no volatile accesses. 2223 InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)), 2224 MutatedMemory, Context); 2225 if (InstResult == 0) return false; // Could not evaluate load. 2226 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 2227 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs. 2228 const Type *Ty = AI->getType()->getElementType(); 2229 AllocaTmps.push_back(new GlobalVariable(Context, Ty, false, 2230 GlobalValue::InternalLinkage, 2231 Context.getUndef(Ty), 2232 AI->getName())); 2233 InstResult = AllocaTmps.back(); 2234 } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) { 2235 2236 // Debug info can safely be ignored here. 2237 if (isa<DbgInfoIntrinsic>(CI)) { 2238 ++CurInst; 2239 continue; 2240 } 2241 2242 // Cannot handle inline asm. 2243 if (isa<InlineAsm>(CI->getOperand(0))) return false; 2244 2245 // Resolve function pointers. 2246 Function *Callee = dyn_cast<Function>(getVal(Values, CI->getOperand(0))); 2247 if (!Callee) return false; // Cannot resolve. 2248 2249 std::vector<Constant*> Formals; 2250 for (User::op_iterator i = CI->op_begin() + 1, e = CI->op_end(); 2251 i != e; ++i) 2252 Formals.push_back(getVal(Values, *i)); 2253 2254 if (Callee->isDeclaration()) { 2255 // If this is a function we can constant fold, do it. 2256 if (Constant *C = ConstantFoldCall(Callee, &Formals[0], 2257 Formals.size())) { 2258 InstResult = C; 2259 } else { 2260 return false; 2261 } 2262 } else { 2263 if (Callee->getFunctionType()->isVarArg()) 2264 return false; 2265 2266 Constant *RetVal; 2267 // Execute the call, if successful, use the return value. 2268 if (!EvaluateFunction(Callee, RetVal, Formals, CallStack, 2269 MutatedMemory, AllocaTmps)) 2270 return false; 2271 InstResult = RetVal; 2272 } 2273 } else if (isa<TerminatorInst>(CurInst)) { 2274 BasicBlock *NewBB = 0; 2275 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 2276 if (BI->isUnconditional()) { 2277 NewBB = BI->getSuccessor(0); 2278 } else { 2279 ConstantInt *Cond = 2280 dyn_cast<ConstantInt>(getVal(Values, BI->getCondition())); 2281 if (!Cond) return false; // Cannot determine. 2282 2283 NewBB = BI->getSuccessor(!Cond->getZExtValue()); 2284 } 2285 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 2286 ConstantInt *Val = 2287 dyn_cast<ConstantInt>(getVal(Values, SI->getCondition())); 2288 if (!Val) return false; // Cannot determine. 2289 NewBB = SI->getSuccessor(SI->findCaseValue(Val)); 2290 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) { 2291 if (RI->getNumOperands()) 2292 RetVal = getVal(Values, RI->getOperand(0)); 2293 2294 CallStack.pop_back(); // return from fn. 2295 return true; // We succeeded at evaluating this ctor! 2296 } else { 2297 // invoke, unwind, unreachable. 2298 return false; // Cannot handle this terminator. 2299 } 2300 2301 // Okay, we succeeded in evaluating this control flow. See if we have 2302 // executed the new block before. If so, we have a looping function, 2303 // which we cannot evaluate in reasonable time. 2304 if (!ExecutedBlocks.insert(NewBB)) 2305 return false; // looped! 2306 2307 // Okay, we have never been in this block before. Check to see if there 2308 // are any PHI nodes. If so, evaluate them with information about where 2309 // we came from. 2310 BasicBlock *OldBB = CurInst->getParent(); 2311 CurInst = NewBB->begin(); 2312 PHINode *PN; 2313 for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 2314 Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB)); 2315 2316 // Do NOT increment CurInst. We know that the terminator had no value. 2317 continue; 2318 } else { 2319 // Did not know how to evaluate this! 2320 return false; 2321 } 2322 2323 if (!CurInst->use_empty()) 2324 Values[CurInst] = InstResult; 2325 2326 // Advance program counter. 2327 ++CurInst; 2328 } 2329} 2330 2331/// EvaluateStaticConstructor - Evaluate static constructors in the function, if 2332/// we can. Return true if we can, false otherwise. 2333static bool EvaluateStaticConstructor(Function *F) { 2334 /// MutatedMemory - For each store we execute, we update this map. Loads 2335 /// check this to get the most up-to-date value. If evaluation is successful, 2336 /// this state is committed to the process. 2337 DenseMap<Constant*, Constant*> MutatedMemory; 2338 2339 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 2340 /// to represent its body. This vector is needed so we can delete the 2341 /// temporary globals when we are done. 2342 std::vector<GlobalVariable*> AllocaTmps; 2343 2344 /// CallStack - This is used to detect recursion. In pathological situations 2345 /// we could hit exponential behavior, but at least there is nothing 2346 /// unbounded. 2347 std::vector<Function*> CallStack; 2348 2349 // Call the function. 2350 Constant *RetValDummy; 2351 bool EvalSuccess = EvaluateFunction(F, RetValDummy, std::vector<Constant*>(), 2352 CallStack, MutatedMemory, AllocaTmps); 2353 if (EvalSuccess) { 2354 // We succeeded at evaluation: commit the result. 2355 DEBUG(errs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2356 << F->getName() << "' to " << MutatedMemory.size() 2357 << " stores.\n"); 2358 for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(), 2359 E = MutatedMemory.end(); I != E; ++I) 2360 CommitValueTo(I->second, I->first, F->getContext()); 2361 } 2362 2363 // At this point, we are done interpreting. If we created any 'alloca' 2364 // temporaries, release them now. 2365 while (!AllocaTmps.empty()) { 2366 GlobalVariable *Tmp = AllocaTmps.back(); 2367 AllocaTmps.pop_back(); 2368 2369 // If there are still users of the alloca, the program is doing something 2370 // silly, e.g. storing the address of the alloca somewhere and using it 2371 // later. Since this is undefined, we'll just make it be null. 2372 if (!Tmp->use_empty()) 2373 Tmp->replaceAllUsesWith(F->getContext().getNullValue(Tmp->getType())); 2374 delete Tmp; 2375 } 2376 2377 return EvalSuccess; 2378} 2379 2380 2381 2382/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. 2383/// Return true if anything changed. 2384bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { 2385 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); 2386 bool MadeChange = false; 2387 if (Ctors.empty()) return false; 2388 2389 // Loop over global ctors, optimizing them when we can. 2390 for (unsigned i = 0; i != Ctors.size(); ++i) { 2391 Function *F = Ctors[i]; 2392 // Found a null terminator in the middle of the list, prune off the rest of 2393 // the list. 2394 if (F == 0) { 2395 if (i != Ctors.size()-1) { 2396 Ctors.resize(i+1); 2397 MadeChange = true; 2398 } 2399 break; 2400 } 2401 2402 // We cannot simplify external ctor functions. 2403 if (F->empty()) continue; 2404 2405 // If we can evaluate the ctor at compile time, do. 2406 if (EvaluateStaticConstructor(F)) { 2407 Ctors.erase(Ctors.begin()+i); 2408 MadeChange = true; 2409 --i; 2410 ++NumCtorsEvaluated; 2411 continue; 2412 } 2413 } 2414 2415 if (!MadeChange) return false; 2416 2417 GCL = InstallGlobalCtors(GCL, Ctors, GCL->getContext()); 2418 return true; 2419} 2420 2421bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 2422 bool Changed = false; 2423 2424 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2425 I != E;) { 2426 Module::alias_iterator J = I++; 2427 // Aliases without names cannot be referenced outside this module. 2428 if (!J->hasName() && !J->isDeclaration()) 2429 J->setLinkage(GlobalValue::InternalLinkage); 2430 // If the aliasee may change at link time, nothing can be done - bail out. 2431 if (J->mayBeOverridden()) 2432 continue; 2433 2434 Constant *Aliasee = J->getAliasee(); 2435 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2436 Target->removeDeadConstantUsers(); 2437 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse(); 2438 2439 // Make all users of the alias use the aliasee instead. 2440 if (!J->use_empty()) { 2441 J->replaceAllUsesWith(Aliasee); 2442 ++NumAliasesResolved; 2443 Changed = true; 2444 } 2445 2446 // If the aliasee has internal linkage, give it the name and linkage 2447 // of the alias, and delete the alias. This turns: 2448 // define internal ... @f(...) 2449 // @a = alias ... @f 2450 // into: 2451 // define ... @a(...) 2452 if (!Target->hasLocalLinkage()) 2453 continue; 2454 2455 // The transform is only useful if the alias does not have internal linkage. 2456 if (J->hasLocalLinkage()) 2457 continue; 2458 2459 // Do not perform the transform if multiple aliases potentially target the 2460 // aliasee. This check also ensures that it is safe to replace the section 2461 // and other attributes of the aliasee with those of the alias. 2462 if (!hasOneUse) 2463 continue; 2464 2465 // Give the aliasee the name, linkage and other attributes of the alias. 2466 Target->takeName(J); 2467 Target->setLinkage(J->getLinkage()); 2468 Target->GlobalValue::copyAttributesFrom(J); 2469 2470 // Delete the alias. 2471 M.getAliasList().erase(J); 2472 ++NumAliasesRemoved; 2473 Changed = true; 2474 } 2475 2476 return Changed; 2477} 2478 2479bool GlobalOpt::runOnModule(Module &M) { 2480 bool Changed = false; 2481 2482 // Try to find the llvm.globalctors list. 2483 GlobalVariable *GlobalCtors = FindGlobalCtors(M); 2484 2485 bool LocalChange = true; 2486 while (LocalChange) { 2487 LocalChange = false; 2488 2489 // Delete functions that are trivially dead, ccc -> fastcc 2490 LocalChange |= OptimizeFunctions(M); 2491 2492 // Optimize global_ctors list. 2493 if (GlobalCtors) 2494 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); 2495 2496 // Optimize non-address-taken globals. 2497 LocalChange |= OptimizeGlobalVars(M); 2498 2499 // Resolve aliases, when possible. 2500 LocalChange |= OptimizeGlobalAliases(M); 2501 Changed |= LocalChange; 2502 } 2503 2504 // TODO: Move all global ctors functions to the end of the module for code 2505 // layout. 2506 2507 return Changed; 2508} 2509