GlobalOpt.cpp revision 18a2e50a9bfe4ecde57dc3913a7bd98b954ec81a
1//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass transforms simple global variables that never have their address 11// taken. If obviously true, it marks read/write globals as constant, deletes 12// variables only stored to, etc. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "globalopt" 17#include "llvm/Transforms/IPO.h" 18#include "llvm/CallingConv.h" 19#include "llvm/Constants.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/Instructions.h" 22#include "llvm/IntrinsicInst.h" 23#include "llvm/Module.h" 24#include "llvm/Pass.h" 25#include "llvm/Analysis/ConstantFolding.h" 26#include "llvm/Analysis/MemoryBuiltins.h" 27#include "llvm/Target/TargetData.h" 28#include "llvm/Support/CallSite.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/ErrorHandling.h" 31#include "llvm/Support/GetElementPtrTypeIterator.h" 32#include "llvm/Support/MathExtras.h" 33#include "llvm/Support/raw_ostream.h" 34#include "llvm/ADT/DenseMap.h" 35#include "llvm/ADT/SmallPtrSet.h" 36#include "llvm/ADT/SmallVector.h" 37#include "llvm/ADT/Statistic.h" 38#include "llvm/ADT/STLExtras.h" 39#include <algorithm> 40using namespace llvm; 41 42STATISTIC(NumMarked , "Number of globals marked constant"); 43STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 44STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 45STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 46STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 47STATISTIC(NumDeleted , "Number of globals deleted"); 48STATISTIC(NumFnDeleted , "Number of functions deleted"); 49STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 50STATISTIC(NumLocalized , "Number of globals localized"); 51STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 52STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 53STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 54STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 55STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 56STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 57STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 58 59namespace { 60 struct GlobalStatus; 61 struct GlobalOpt : public ModulePass { 62 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 63 } 64 static char ID; // Pass identification, replacement for typeid 65 GlobalOpt() : ModulePass(ID) { 66 initializeGlobalOptPass(*PassRegistry::getPassRegistry()); 67 } 68 69 bool runOnModule(Module &M); 70 71 private: 72 GlobalVariable *FindGlobalCtors(Module &M); 73 bool OptimizeFunctions(Module &M); 74 bool OptimizeGlobalVars(Module &M); 75 bool OptimizeGlobalAliases(Module &M); 76 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); 77 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 78 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, 79 const SmallPtrSet<const PHINode*, 16> &PHIUsers, 80 const GlobalStatus &GS); 81 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); 82 }; 83} 84 85char GlobalOpt::ID = 0; 86INITIALIZE_PASS(GlobalOpt, "globalopt", 87 "Global Variable Optimizer", false, false) 88 89ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 90 91namespace { 92 93/// GlobalStatus - As we analyze each global, keep track of some information 94/// about it. If we find out that the address of the global is taken, none of 95/// this info will be accurate. 96struct GlobalStatus { 97 /// isCompared - True if the global's address is used in a comparison. 98 bool isCompared; 99 100 /// isLoaded - True if the global is ever loaded. If the global isn't ever 101 /// loaded it can be deleted. 102 bool isLoaded; 103 104 /// StoredType - Keep track of what stores to the global look like. 105 /// 106 enum StoredType { 107 /// NotStored - There is no store to this global. It can thus be marked 108 /// constant. 109 NotStored, 110 111 /// isInitializerStored - This global is stored to, but the only thing 112 /// stored is the constant it was initialized with. This is only tracked 113 /// for scalar globals. 114 isInitializerStored, 115 116 /// isStoredOnce - This global is stored to, but only its initializer and 117 /// one other value is ever stored to it. If this global isStoredOnce, we 118 /// track the value stored to it in StoredOnceValue below. This is only 119 /// tracked for scalar globals. 120 isStoredOnce, 121 122 /// isStored - This global is stored to by multiple values or something else 123 /// that we cannot track. 124 isStored 125 } StoredType; 126 127 /// StoredOnceValue - If only one value (besides the initializer constant) is 128 /// ever stored to this global, keep track of what value it is. 129 Value *StoredOnceValue; 130 131 /// AccessingFunction/HasMultipleAccessingFunctions - These start out 132 /// null/false. When the first accessing function is noticed, it is recorded. 133 /// When a second different accessing function is noticed, 134 /// HasMultipleAccessingFunctions is set to true. 135 const Function *AccessingFunction; 136 bool HasMultipleAccessingFunctions; 137 138 /// HasNonInstructionUser - Set to true if this global has a user that is not 139 /// an instruction (e.g. a constant expr or GV initializer). 140 bool HasNonInstructionUser; 141 142 /// HasPHIUser - Set to true if this global has a user that is a PHI node. 143 bool HasPHIUser; 144 145 GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored), 146 StoredOnceValue(0), AccessingFunction(0), 147 HasMultipleAccessingFunctions(false), HasNonInstructionUser(false), 148 HasPHIUser(false) {} 149}; 150 151} 152 153// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used 154// by constants itself. Note that constants cannot be cyclic, so this test is 155// pretty easy to implement recursively. 156// 157static bool SafeToDestroyConstant(const Constant *C) { 158 if (isa<GlobalValue>(C)) return false; 159 160 for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E; 161 ++UI) 162 if (const Constant *CU = dyn_cast<Constant>(*UI)) { 163 if (!SafeToDestroyConstant(CU)) return false; 164 } else 165 return false; 166 return true; 167} 168 169 170/// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus 171/// structure. If the global has its address taken, return true to indicate we 172/// can't do anything with it. 173/// 174static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS, 175 SmallPtrSet<const PHINode*, 16> &PHIUsers) { 176 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 177 ++UI) { 178 const User *U = *UI; 179 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 180 GS.HasNonInstructionUser = true; 181 182 // If the result of the constantexpr isn't pointer type, then we won't 183 // know to expect it in various places. Just reject early. 184 if (!isa<PointerType>(CE->getType())) return true; 185 186 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true; 187 } else if (const Instruction *I = dyn_cast<Instruction>(U)) { 188 if (!GS.HasMultipleAccessingFunctions) { 189 const Function *F = I->getParent()->getParent(); 190 if (GS.AccessingFunction == 0) 191 GS.AccessingFunction = F; 192 else if (GS.AccessingFunction != F) 193 GS.HasMultipleAccessingFunctions = true; 194 } 195 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 196 GS.isLoaded = true; 197 if (LI->isVolatile()) return true; // Don't hack on volatile loads. 198 } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) { 199 // Don't allow a store OF the address, only stores TO the address. 200 if (SI->getOperand(0) == V) return true; 201 202 if (SI->isVolatile()) return true; // Don't hack on volatile stores. 203 204 // If this is a direct store to the global (i.e., the global is a scalar 205 // value, not an aggregate), keep more specific information about 206 // stores. 207 if (GS.StoredType != GlobalStatus::isStored) { 208 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>( 209 SI->getOperand(1))) { 210 Value *StoredVal = SI->getOperand(0); 211 if (StoredVal == GV->getInitializer()) { 212 if (GS.StoredType < GlobalStatus::isInitializerStored) 213 GS.StoredType = GlobalStatus::isInitializerStored; 214 } else if (isa<LoadInst>(StoredVal) && 215 cast<LoadInst>(StoredVal)->getOperand(0) == GV) { 216 if (GS.StoredType < GlobalStatus::isInitializerStored) 217 GS.StoredType = GlobalStatus::isInitializerStored; 218 } else if (GS.StoredType < GlobalStatus::isStoredOnce) { 219 GS.StoredType = GlobalStatus::isStoredOnce; 220 GS.StoredOnceValue = StoredVal; 221 } else if (GS.StoredType == GlobalStatus::isStoredOnce && 222 GS.StoredOnceValue == StoredVal) { 223 // noop. 224 } else { 225 GS.StoredType = GlobalStatus::isStored; 226 } 227 } else { 228 GS.StoredType = GlobalStatus::isStored; 229 } 230 } 231 } else if (isa<GetElementPtrInst>(I)) { 232 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 233 } else if (isa<SelectInst>(I)) { 234 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 235 } else if (const PHINode *PN = dyn_cast<PHINode>(I)) { 236 // PHI nodes we can check just like select or GEP instructions, but we 237 // have to be careful about infinite recursion. 238 if (PHIUsers.insert(PN)) // Not already visited. 239 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 240 GS.HasPHIUser = true; 241 } else if (isa<CmpInst>(I)) { 242 GS.isCompared = true; 243 } else if (isa<MemTransferInst>(I)) { 244 const MemTransferInst *MTI = cast<MemTransferInst>(I); 245 if (MTI->getArgOperand(0) == V) 246 GS.StoredType = GlobalStatus::isStored; 247 if (MTI->getArgOperand(1) == V) 248 GS.isLoaded = true; 249 } else if (isa<MemSetInst>(I)) { 250 assert(cast<MemSetInst>(I)->getArgOperand(0) == V && 251 "Memset only takes one pointer!"); 252 GS.StoredType = GlobalStatus::isStored; 253 } else { 254 return true; // Any other non-load instruction might take address! 255 } 256 } else if (const Constant *C = dyn_cast<Constant>(U)) { 257 GS.HasNonInstructionUser = true; 258 // We might have a dead and dangling constant hanging off of here. 259 if (!SafeToDestroyConstant(C)) 260 return true; 261 } else { 262 GS.HasNonInstructionUser = true; 263 // Otherwise must be some other user. 264 return true; 265 } 266 } 267 268 return false; 269} 270 271static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) { 272 ConstantInt *CI = dyn_cast<ConstantInt>(Idx); 273 if (!CI) return 0; 274 unsigned IdxV = CI->getZExtValue(); 275 276 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) { 277 if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV); 278 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) { 279 if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV); 280 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) { 281 if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV); 282 } else if (isa<ConstantAggregateZero>(Agg)) { 283 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { 284 if (IdxV < STy->getNumElements()) 285 return Constant::getNullValue(STy->getElementType(IdxV)); 286 } else if (const SequentialType *STy = 287 dyn_cast<SequentialType>(Agg->getType())) { 288 return Constant::getNullValue(STy->getElementType()); 289 } 290 } else if (isa<UndefValue>(Agg)) { 291 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { 292 if (IdxV < STy->getNumElements()) 293 return UndefValue::get(STy->getElementType(IdxV)); 294 } else if (const SequentialType *STy = 295 dyn_cast<SequentialType>(Agg->getType())) { 296 return UndefValue::get(STy->getElementType()); 297 } 298 } 299 return 0; 300} 301 302 303/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 304/// users of the global, cleaning up the obvious ones. This is largely just a 305/// quick scan over the use list to clean up the easy and obvious cruft. This 306/// returns true if it made a change. 307static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) { 308 bool Changed = false; 309 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { 310 User *U = *UI++; 311 312 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 313 if (Init) { 314 // Replace the load with the initializer. 315 LI->replaceAllUsesWith(Init); 316 LI->eraseFromParent(); 317 Changed = true; 318 } 319 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 320 // Store must be unreachable or storing Init into the global. 321 SI->eraseFromParent(); 322 Changed = true; 323 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 324 if (CE->getOpcode() == Instruction::GetElementPtr) { 325 Constant *SubInit = 0; 326 if (Init) 327 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 328 Changed |= CleanupConstantGlobalUsers(CE, SubInit); 329 } else if (CE->getOpcode() == Instruction::BitCast && 330 CE->getType()->isPointerTy()) { 331 // Pointer cast, delete any stores and memsets to the global. 332 Changed |= CleanupConstantGlobalUsers(CE, 0); 333 } 334 335 if (CE->use_empty()) { 336 CE->destroyConstant(); 337 Changed = true; 338 } 339 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 340 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 341 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 342 // and will invalidate our notion of what Init is. 343 Constant *SubInit = 0; 344 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 345 ConstantExpr *CE = 346 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP)); 347 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 348 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 349 } 350 Changed |= CleanupConstantGlobalUsers(GEP, SubInit); 351 352 if (GEP->use_empty()) { 353 GEP->eraseFromParent(); 354 Changed = true; 355 } 356 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 357 if (MI->getRawDest() == V) { 358 MI->eraseFromParent(); 359 Changed = true; 360 } 361 362 } else if (Constant *C = dyn_cast<Constant>(U)) { 363 // If we have a chain of dead constantexprs or other things dangling from 364 // us, and if they are all dead, nuke them without remorse. 365 if (SafeToDestroyConstant(C)) { 366 C->destroyConstant(); 367 // This could have invalidated UI, start over from scratch. 368 CleanupConstantGlobalUsers(V, Init); 369 return true; 370 } 371 } 372 } 373 return Changed; 374} 375 376/// isSafeSROAElementUse - Return true if the specified instruction is a safe 377/// user of a derived expression from a global that we want to SROA. 378static bool isSafeSROAElementUse(Value *V) { 379 // We might have a dead and dangling constant hanging off of here. 380 if (Constant *C = dyn_cast<Constant>(V)) 381 return SafeToDestroyConstant(C); 382 383 Instruction *I = dyn_cast<Instruction>(V); 384 if (!I) return false; 385 386 // Loads are ok. 387 if (isa<LoadInst>(I)) return true; 388 389 // Stores *to* the pointer are ok. 390 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 391 return SI->getOperand(0) != V; 392 393 // Otherwise, it must be a GEP. 394 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 395 if (GEPI == 0) return false; 396 397 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 398 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 399 return false; 400 401 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end(); 402 I != E; ++I) 403 if (!isSafeSROAElementUse(*I)) 404 return false; 405 return true; 406} 407 408 409/// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 410/// Look at it and its uses and decide whether it is safe to SROA this global. 411/// 412static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 413 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 414 if (!isa<GetElementPtrInst>(U) && 415 (!isa<ConstantExpr>(U) || 416 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 417 return false; 418 419 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 420 // don't like < 3 operand CE's, and we don't like non-constant integer 421 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 422 // value of C. 423 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 424 !cast<Constant>(U->getOperand(1))->isNullValue() || 425 !isa<ConstantInt>(U->getOperand(2))) 426 return false; 427 428 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 429 ++GEPI; // Skip over the pointer index. 430 431 // If this is a use of an array allocation, do a bit more checking for sanity. 432 if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 433 uint64_t NumElements = AT->getNumElements(); 434 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 435 436 // Check to make sure that index falls within the array. If not, 437 // something funny is going on, so we won't do the optimization. 438 // 439 if (Idx->getZExtValue() >= NumElements) 440 return false; 441 442 // We cannot scalar repl this level of the array unless any array 443 // sub-indices are in-range constants. In particular, consider: 444 // A[0][i]. We cannot know that the user isn't doing invalid things like 445 // allowing i to index an out-of-range subscript that accesses A[1]. 446 // 447 // Scalar replacing *just* the outer index of the array is probably not 448 // going to be a win anyway, so just give up. 449 for (++GEPI; // Skip array index. 450 GEPI != E; 451 ++GEPI) { 452 uint64_t NumElements; 453 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 454 NumElements = SubArrayTy->getNumElements(); 455 else if (const VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) 456 NumElements = SubVectorTy->getNumElements(); 457 else { 458 assert((*GEPI)->isStructTy() && 459 "Indexed GEP type is not array, vector, or struct!"); 460 continue; 461 } 462 463 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 464 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 465 return false; 466 } 467 } 468 469 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I) 470 if (!isSafeSROAElementUse(*I)) 471 return false; 472 return true; 473} 474 475/// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 476/// is safe for us to perform this transformation. 477/// 478static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 479 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 480 UI != E; ++UI) { 481 if (!IsUserOfGlobalSafeForSRA(*UI, GV)) 482 return false; 483 } 484 return true; 485} 486 487 488/// SRAGlobal - Perform scalar replacement of aggregates on the specified global 489/// variable. This opens the door for other optimizations by exposing the 490/// behavior of the program in a more fine-grained way. We have determined that 491/// this transformation is safe already. We return the first global variable we 492/// insert so that the caller can reprocess it. 493static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { 494 // Make sure this global only has simple uses that we can SRA. 495 if (!GlobalUsersSafeToSRA(GV)) 496 return 0; 497 498 assert(GV->hasLocalLinkage() && !GV->isConstant()); 499 Constant *Init = GV->getInitializer(); 500 const Type *Ty = Init->getType(); 501 502 std::vector<GlobalVariable*> NewGlobals; 503 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 504 505 // Get the alignment of the global, either explicit or target-specific. 506 unsigned StartAlignment = GV->getAlignment(); 507 if (StartAlignment == 0) 508 StartAlignment = TD.getABITypeAlignment(GV->getType()); 509 510 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 511 NewGlobals.reserve(STy->getNumElements()); 512 const StructLayout &Layout = *TD.getStructLayout(STy); 513 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 514 Constant *In = getAggregateConstantElement(Init, 515 ConstantInt::get(Type::getInt32Ty(STy->getContext()), i)); 516 assert(In && "Couldn't get element of initializer?"); 517 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 518 GlobalVariable::InternalLinkage, 519 In, GV->getName()+"."+Twine(i), 520 GV->isThreadLocal(), 521 GV->getType()->getAddressSpace()); 522 Globals.insert(GV, NGV); 523 NewGlobals.push_back(NGV); 524 525 // Calculate the known alignment of the field. If the original aggregate 526 // had 256 byte alignment for example, something might depend on that: 527 // propagate info to each field. 528 uint64_t FieldOffset = Layout.getElementOffset(i); 529 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 530 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i))) 531 NGV->setAlignment(NewAlign); 532 } 533 } else if (const SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 534 unsigned NumElements = 0; 535 if (const ArrayType *ATy = dyn_cast<ArrayType>(STy)) 536 NumElements = ATy->getNumElements(); 537 else 538 NumElements = cast<VectorType>(STy)->getNumElements(); 539 540 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 541 return 0; // It's not worth it. 542 NewGlobals.reserve(NumElements); 543 544 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType()); 545 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType()); 546 for (unsigned i = 0, e = NumElements; i != e; ++i) { 547 Constant *In = getAggregateConstantElement(Init, 548 ConstantInt::get(Type::getInt32Ty(Init->getContext()), i)); 549 assert(In && "Couldn't get element of initializer?"); 550 551 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 552 GlobalVariable::InternalLinkage, 553 In, GV->getName()+"."+Twine(i), 554 GV->isThreadLocal(), 555 GV->getType()->getAddressSpace()); 556 Globals.insert(GV, NGV); 557 NewGlobals.push_back(NGV); 558 559 // Calculate the known alignment of the field. If the original aggregate 560 // had 256 byte alignment for example, something might depend on that: 561 // propagate info to each field. 562 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 563 if (NewAlign > EltAlign) 564 NGV->setAlignment(NewAlign); 565 } 566 } 567 568 if (NewGlobals.empty()) 569 return 0; 570 571 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); 572 573 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 574 575 // Loop over all of the uses of the global, replacing the constantexpr geps, 576 // with smaller constantexpr geps or direct references. 577 while (!GV->use_empty()) { 578 User *GEP = GV->use_back(); 579 assert(((isa<ConstantExpr>(GEP) && 580 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 581 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 582 583 // Ignore the 1th operand, which has to be zero or else the program is quite 584 // broken (undefined). Get the 2nd operand, which is the structure or array 585 // index. 586 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 587 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 588 589 Value *NewPtr = NewGlobals[Val]; 590 591 // Form a shorter GEP if needed. 592 if (GEP->getNumOperands() > 3) { 593 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 594 SmallVector<Constant*, 8> Idxs; 595 Idxs.push_back(NullInt); 596 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 597 Idxs.push_back(CE->getOperand(i)); 598 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), 599 &Idxs[0], Idxs.size()); 600 } else { 601 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 602 SmallVector<Value*, 8> Idxs; 603 Idxs.push_back(NullInt); 604 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 605 Idxs.push_back(GEPI->getOperand(i)); 606 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs.begin(), Idxs.end(), 607 GEPI->getName()+"."+Twine(Val),GEPI); 608 } 609 } 610 GEP->replaceAllUsesWith(NewPtr); 611 612 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 613 GEPI->eraseFromParent(); 614 else 615 cast<ConstantExpr>(GEP)->destroyConstant(); 616 } 617 618 // Delete the old global, now that it is dead. 619 Globals.erase(GV); 620 ++NumSRA; 621 622 // Loop over the new globals array deleting any globals that are obviously 623 // dead. This can arise due to scalarization of a structure or an array that 624 // has elements that are dead. 625 unsigned FirstGlobal = 0; 626 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 627 if (NewGlobals[i]->use_empty()) { 628 Globals.erase(NewGlobals[i]); 629 if (FirstGlobal == i) ++FirstGlobal; 630 } 631 632 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; 633} 634 635/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 636/// value will trap if the value is dynamically null. PHIs keeps track of any 637/// phi nodes we've seen to avoid reprocessing them. 638static bool AllUsesOfValueWillTrapIfNull(const Value *V, 639 SmallPtrSet<const PHINode*, 8> &PHIs) { 640 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 641 ++UI) { 642 const User *U = *UI; 643 644 if (isa<LoadInst>(U)) { 645 // Will trap. 646 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 647 if (SI->getOperand(0) == V) { 648 //cerr << "NONTRAPPING USE: " << *U; 649 return false; // Storing the value. 650 } 651 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 652 if (CI->getCalledValue() != V) { 653 //cerr << "NONTRAPPING USE: " << *U; 654 return false; // Not calling the ptr 655 } 656 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 657 if (II->getCalledValue() != V) { 658 //cerr << "NONTRAPPING USE: " << *U; 659 return false; // Not calling the ptr 660 } 661 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 662 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 663 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 664 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 665 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 666 // If we've already seen this phi node, ignore it, it has already been 667 // checked. 668 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 669 return false; 670 } else if (isa<ICmpInst>(U) && 671 isa<ConstantPointerNull>(UI->getOperand(1))) { 672 // Ignore icmp X, null 673 } else { 674 //cerr << "NONTRAPPING USE: " << *U; 675 return false; 676 } 677 } 678 return true; 679} 680 681/// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 682/// from GV will trap if the loaded value is null. Note that this also permits 683/// comparisons of the loaded value against null, as a special case. 684static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 685 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 686 UI != E; ++UI) { 687 const User *U = *UI; 688 689 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 690 SmallPtrSet<const PHINode*, 8> PHIs; 691 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 692 return false; 693 } else if (isa<StoreInst>(U)) { 694 // Ignore stores to the global. 695 } else { 696 // We don't know or understand this user, bail out. 697 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 698 return false; 699 } 700 } 701 return true; 702} 703 704static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 705 bool Changed = false; 706 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { 707 Instruction *I = cast<Instruction>(*UI++); 708 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 709 LI->setOperand(0, NewV); 710 Changed = true; 711 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 712 if (SI->getOperand(1) == V) { 713 SI->setOperand(1, NewV); 714 Changed = true; 715 } 716 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 717 CallSite CS(I); 718 if (CS.getCalledValue() == V) { 719 // Calling through the pointer! Turn into a direct call, but be careful 720 // that the pointer is not also being passed as an argument. 721 CS.setCalledFunction(NewV); 722 Changed = true; 723 bool PassedAsArg = false; 724 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 725 if (CS.getArgument(i) == V) { 726 PassedAsArg = true; 727 CS.setArgument(i, NewV); 728 } 729 730 if (PassedAsArg) { 731 // Being passed as an argument also. Be careful to not invalidate UI! 732 UI = V->use_begin(); 733 } 734 } 735 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 736 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 737 ConstantExpr::getCast(CI->getOpcode(), 738 NewV, CI->getType())); 739 if (CI->use_empty()) { 740 Changed = true; 741 CI->eraseFromParent(); 742 } 743 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 744 // Should handle GEP here. 745 SmallVector<Constant*, 8> Idxs; 746 Idxs.reserve(GEPI->getNumOperands()-1); 747 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 748 i != e; ++i) 749 if (Constant *C = dyn_cast<Constant>(*i)) 750 Idxs.push_back(C); 751 else 752 break; 753 if (Idxs.size() == GEPI->getNumOperands()-1) 754 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 755 ConstantExpr::getGetElementPtr(NewV, &Idxs[0], 756 Idxs.size())); 757 if (GEPI->use_empty()) { 758 Changed = true; 759 GEPI->eraseFromParent(); 760 } 761 } 762 } 763 764 return Changed; 765} 766 767 768/// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 769/// value stored into it. If there are uses of the loaded value that would trap 770/// if the loaded value is dynamically null, then we know that they cannot be 771/// reachable with a null optimize away the load. 772static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) { 773 bool Changed = false; 774 775 // Keep track of whether we are able to remove all the uses of the global 776 // other than the store that defines it. 777 bool AllNonStoreUsesGone = true; 778 779 // Replace all uses of loads with uses of uses of the stored value. 780 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){ 781 User *GlobalUser = *GUI++; 782 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 783 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 784 // If we were able to delete all uses of the loads 785 if (LI->use_empty()) { 786 LI->eraseFromParent(); 787 Changed = true; 788 } else { 789 AllNonStoreUsesGone = false; 790 } 791 } else if (isa<StoreInst>(GlobalUser)) { 792 // Ignore the store that stores "LV" to the global. 793 assert(GlobalUser->getOperand(1) == GV && 794 "Must be storing *to* the global"); 795 } else { 796 AllNonStoreUsesGone = false; 797 798 // If we get here we could have other crazy uses that are transitively 799 // loaded. 800 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 801 isa<ConstantExpr>(GlobalUser)) && "Only expect load and stores!"); 802 } 803 } 804 805 if (Changed) { 806 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); 807 ++NumGlobUses; 808 } 809 810 // If we nuked all of the loads, then none of the stores are needed either, 811 // nor is the global. 812 if (AllNonStoreUsesGone) { 813 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 814 CleanupConstantGlobalUsers(GV, 0); 815 if (GV->use_empty()) { 816 GV->eraseFromParent(); 817 ++NumDeleted; 818 } 819 Changed = true; 820 } 821 return Changed; 822} 823 824/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 825/// instructions that are foldable. 826static void ConstantPropUsersOf(Value *V) { 827 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) 828 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 829 if (Constant *NewC = ConstantFoldInstruction(I)) { 830 I->replaceAllUsesWith(NewC); 831 832 // Advance UI to the next non-I use to avoid invalidating it! 833 // Instructions could multiply use V. 834 while (UI != E && *UI == I) 835 ++UI; 836 I->eraseFromParent(); 837 } 838} 839 840/// OptimizeGlobalAddressOfMalloc - This function takes the specified global 841/// variable, and transforms the program as if it always contained the result of 842/// the specified malloc. Because it is always the result of the specified 843/// malloc, there is no reason to actually DO the malloc. Instead, turn the 844/// malloc into a global, and any loads of GV as uses of the new global. 845static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 846 CallInst *CI, 847 const Type *AllocTy, 848 ConstantInt *NElements, 849 TargetData* TD) { 850 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); 851 852 const Type *GlobalType; 853 if (NElements->getZExtValue() == 1) 854 GlobalType = AllocTy; 855 else 856 // If we have an array allocation, the global variable is of an array. 857 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 858 859 // Create the new global variable. The contents of the malloc'd memory is 860 // undefined, so initialize with an undef value. 861 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 862 GlobalType, false, 863 GlobalValue::InternalLinkage, 864 UndefValue::get(GlobalType), 865 GV->getName()+".body", 866 GV, 867 GV->isThreadLocal()); 868 869 // If there are bitcast users of the malloc (which is typical, usually we have 870 // a malloc + bitcast) then replace them with uses of the new global. Update 871 // other users to use the global as well. 872 BitCastInst *TheBC = 0; 873 while (!CI->use_empty()) { 874 Instruction *User = cast<Instruction>(CI->use_back()); 875 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 876 if (BCI->getType() == NewGV->getType()) { 877 BCI->replaceAllUsesWith(NewGV); 878 BCI->eraseFromParent(); 879 } else { 880 BCI->setOperand(0, NewGV); 881 } 882 } else { 883 if (TheBC == 0) 884 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 885 User->replaceUsesOfWith(CI, TheBC); 886 } 887 } 888 889 Constant *RepValue = NewGV; 890 if (NewGV->getType() != GV->getType()->getElementType()) 891 RepValue = ConstantExpr::getBitCast(RepValue, 892 GV->getType()->getElementType()); 893 894 // If there is a comparison against null, we will insert a global bool to 895 // keep track of whether the global was initialized yet or not. 896 GlobalVariable *InitBool = 897 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 898 GlobalValue::InternalLinkage, 899 ConstantInt::getFalse(GV->getContext()), 900 GV->getName()+".init", GV->isThreadLocal()); 901 bool InitBoolUsed = false; 902 903 // Loop over all uses of GV, processing them in turn. 904 while (!GV->use_empty()) { 905 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) { 906 // The global is initialized when the store to it occurs. 907 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, SI); 908 SI->eraseFromParent(); 909 continue; 910 } 911 912 LoadInst *LI = cast<LoadInst>(GV->use_back()); 913 while (!LI->use_empty()) { 914 Use &LoadUse = LI->use_begin().getUse(); 915 if (!isa<ICmpInst>(LoadUse.getUser())) { 916 LoadUse = RepValue; 917 continue; 918 } 919 920 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser()); 921 // Replace the cmp X, 0 with a use of the bool value. 922 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", ICI); 923 InitBoolUsed = true; 924 switch (ICI->getPredicate()) { 925 default: llvm_unreachable("Unknown ICmp Predicate!"); 926 case ICmpInst::ICMP_ULT: 927 case ICmpInst::ICMP_SLT: // X < null -> always false 928 LV = ConstantInt::getFalse(GV->getContext()); 929 break; 930 case ICmpInst::ICMP_ULE: 931 case ICmpInst::ICMP_SLE: 932 case ICmpInst::ICMP_EQ: 933 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 934 break; 935 case ICmpInst::ICMP_NE: 936 case ICmpInst::ICMP_UGE: 937 case ICmpInst::ICMP_SGE: 938 case ICmpInst::ICMP_UGT: 939 case ICmpInst::ICMP_SGT: 940 break; // no change. 941 } 942 ICI->replaceAllUsesWith(LV); 943 ICI->eraseFromParent(); 944 } 945 LI->eraseFromParent(); 946 } 947 948 // If the initialization boolean was used, insert it, otherwise delete it. 949 if (!InitBoolUsed) { 950 while (!InitBool->use_empty()) // Delete initializations 951 cast<StoreInst>(InitBool->use_back())->eraseFromParent(); 952 delete InitBool; 953 } else 954 GV->getParent()->getGlobalList().insert(GV, InitBool); 955 956 // Now the GV is dead, nuke it and the malloc.. 957 GV->eraseFromParent(); 958 CI->eraseFromParent(); 959 960 // To further other optimizations, loop over all users of NewGV and try to 961 // constant prop them. This will promote GEP instructions with constant 962 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 963 ConstantPropUsersOf(NewGV); 964 if (RepValue != NewGV) 965 ConstantPropUsersOf(RepValue); 966 967 return NewGV; 968} 969 970/// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 971/// to make sure that there are no complex uses of V. We permit simple things 972/// like dereferencing the pointer, but not storing through the address, unless 973/// it is to the specified global. 974static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 975 const GlobalVariable *GV, 976 SmallPtrSet<const PHINode*, 8> &PHIs) { 977 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); 978 UI != E; ++UI) { 979 const Instruction *Inst = cast<Instruction>(*UI); 980 981 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 982 continue; // Fine, ignore. 983 } 984 985 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 986 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 987 return false; // Storing the pointer itself... bad. 988 continue; // Otherwise, storing through it, or storing into GV... fine. 989 } 990 991 // Must index into the array and into the struct. 992 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 993 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 994 return false; 995 continue; 996 } 997 998 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 999 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 1000 // cycles. 1001 if (PHIs.insert(PN)) 1002 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 1003 return false; 1004 continue; 1005 } 1006 1007 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 1008 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 1009 return false; 1010 continue; 1011 } 1012 1013 return false; 1014 } 1015 return true; 1016} 1017 1018/// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 1019/// somewhere. Transform all uses of the allocation into loads from the 1020/// global and uses of the resultant pointer. Further, delete the store into 1021/// GV. This assumes that these value pass the 1022/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1023static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1024 GlobalVariable *GV) { 1025 while (!Alloc->use_empty()) { 1026 Instruction *U = cast<Instruction>(*Alloc->use_begin()); 1027 Instruction *InsertPt = U; 1028 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1029 // If this is the store of the allocation into the global, remove it. 1030 if (SI->getOperand(1) == GV) { 1031 SI->eraseFromParent(); 1032 continue; 1033 } 1034 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1035 // Insert the load in the corresponding predecessor, not right before the 1036 // PHI. 1037 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator(); 1038 } else if (isa<BitCastInst>(U)) { 1039 // Must be bitcast between the malloc and store to initialize the global. 1040 ReplaceUsesOfMallocWithGlobal(U, GV); 1041 U->eraseFromParent(); 1042 continue; 1043 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1044 // If this is a "GEP bitcast" and the user is a store to the global, then 1045 // just process it as a bitcast. 1046 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1047 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back())) 1048 if (SI->getOperand(1) == GV) { 1049 // Must be bitcast GEP between the malloc and store to initialize 1050 // the global. 1051 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1052 GEPI->eraseFromParent(); 1053 continue; 1054 } 1055 } 1056 1057 // Insert a load from the global, and use it instead of the malloc. 1058 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1059 U->replaceUsesOfWith(Alloc, NL); 1060 } 1061} 1062 1063/// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 1064/// of a load) are simple enough to perform heap SRA on. This permits GEP's 1065/// that index through the array and struct field, icmps of null, and PHIs. 1066static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1067 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs, 1068 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) { 1069 // We permit two users of the load: setcc comparing against the null 1070 // pointer, and a getelementptr of a specific form. 1071 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 1072 ++UI) { 1073 const Instruction *User = cast<Instruction>(*UI); 1074 1075 // Comparison against null is ok. 1076 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) { 1077 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1078 return false; 1079 continue; 1080 } 1081 1082 // getelementptr is also ok, but only a simple form. 1083 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1084 // Must index into the array and into the struct. 1085 if (GEPI->getNumOperands() < 3) 1086 return false; 1087 1088 // Otherwise the GEP is ok. 1089 continue; 1090 } 1091 1092 if (const PHINode *PN = dyn_cast<PHINode>(User)) { 1093 if (!LoadUsingPHIsPerLoad.insert(PN)) 1094 // This means some phi nodes are dependent on each other. 1095 // Avoid infinite looping! 1096 return false; 1097 if (!LoadUsingPHIs.insert(PN)) 1098 // If we have already analyzed this PHI, then it is safe. 1099 continue; 1100 1101 // Make sure all uses of the PHI are simple enough to transform. 1102 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1103 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1104 return false; 1105 1106 continue; 1107 } 1108 1109 // Otherwise we don't know what this is, not ok. 1110 return false; 1111 } 1112 1113 return true; 1114} 1115 1116 1117/// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 1118/// GV are simple enough to perform HeapSRA, return true. 1119static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1120 Instruction *StoredVal) { 1121 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1122 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1123 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 1124 UI != E; ++UI) 1125 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 1126 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1127 LoadUsingPHIsPerLoad)) 1128 return false; 1129 LoadUsingPHIsPerLoad.clear(); 1130 } 1131 1132 // If we reach here, we know that all uses of the loads and transitive uses 1133 // (through PHI nodes) are simple enough to transform. However, we don't know 1134 // that all inputs the to the PHI nodes are in the same equivalence sets. 1135 // Check to verify that all operands of the PHIs are either PHIS that can be 1136 // transformed, loads from GV, or MI itself. 1137 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin() 1138 , E = LoadUsingPHIs.end(); I != E; ++I) { 1139 const PHINode *PN = *I; 1140 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1141 Value *InVal = PN->getIncomingValue(op); 1142 1143 // PHI of the stored value itself is ok. 1144 if (InVal == StoredVal) continue; 1145 1146 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1147 // One of the PHIs in our set is (optimistically) ok. 1148 if (LoadUsingPHIs.count(InPN)) 1149 continue; 1150 return false; 1151 } 1152 1153 // Load from GV is ok. 1154 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1155 if (LI->getOperand(0) == GV) 1156 continue; 1157 1158 // UNDEF? NULL? 1159 1160 // Anything else is rejected. 1161 return false; 1162 } 1163 } 1164 1165 return true; 1166} 1167 1168static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1169 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1170 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1171 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 1172 1173 if (FieldNo >= FieldVals.size()) 1174 FieldVals.resize(FieldNo+1); 1175 1176 // If we already have this value, just reuse the previously scalarized 1177 // version. 1178 if (Value *FieldVal = FieldVals[FieldNo]) 1179 return FieldVal; 1180 1181 // Depending on what instruction this is, we have several cases. 1182 Value *Result; 1183 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1184 // This is a scalarized version of the load from the global. Just create 1185 // a new Load of the scalarized global. 1186 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1187 InsertedScalarizedValues, 1188 PHIsToRewrite), 1189 LI->getName()+".f"+Twine(FieldNo), LI); 1190 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1191 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1192 // field. 1193 const StructType *ST = 1194 cast<StructType>(cast<PointerType>(PN->getType())->getElementType()); 1195 1196 PHINode *NewPN = 1197 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)), 1198 PN->getNumIncomingValues(), 1199 PN->getName()+".f"+Twine(FieldNo), PN); 1200 Result = NewPN; 1201 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1202 } else { 1203 llvm_unreachable("Unknown usable value"); 1204 Result = 0; 1205 } 1206 1207 return FieldVals[FieldNo] = Result; 1208} 1209 1210/// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 1211/// the load, rewrite the derived value to use the HeapSRoA'd load. 1212static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1213 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1214 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1215 // If this is a comparison against null, handle it. 1216 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1217 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1218 // If we have a setcc of the loaded pointer, we can use a setcc of any 1219 // field. 1220 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1221 InsertedScalarizedValues, PHIsToRewrite); 1222 1223 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1224 Constant::getNullValue(NPtr->getType()), 1225 SCI->getName()); 1226 SCI->replaceAllUsesWith(New); 1227 SCI->eraseFromParent(); 1228 return; 1229 } 1230 1231 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1232 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1233 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1234 && "Unexpected GEPI!"); 1235 1236 // Load the pointer for this field. 1237 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1238 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1239 InsertedScalarizedValues, PHIsToRewrite); 1240 1241 // Create the new GEP idx vector. 1242 SmallVector<Value*, 8> GEPIdx; 1243 GEPIdx.push_back(GEPI->getOperand(1)); 1244 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1245 1246 Value *NGEPI = GetElementPtrInst::Create(NewPtr, 1247 GEPIdx.begin(), GEPIdx.end(), 1248 GEPI->getName(), GEPI); 1249 GEPI->replaceAllUsesWith(NGEPI); 1250 GEPI->eraseFromParent(); 1251 return; 1252 } 1253 1254 // Recursively transform the users of PHI nodes. This will lazily create the 1255 // PHIs that are needed for individual elements. Keep track of what PHIs we 1256 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1257 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1258 // already been seen first by another load, so its uses have already been 1259 // processed. 1260 PHINode *PN = cast<PHINode>(LoadUser); 1261 bool Inserted; 1262 DenseMap<Value*, std::vector<Value*> >::iterator InsertPos; 1263 tie(InsertPos, Inserted) = 1264 InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>())); 1265 if (!Inserted) return; 1266 1267 // If this is the first time we've seen this PHI, recursively process all 1268 // users. 1269 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) { 1270 Instruction *User = cast<Instruction>(*UI++); 1271 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1272 } 1273} 1274 1275/// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 1276/// is a value loaded from the global. Eliminate all uses of Ptr, making them 1277/// use FieldGlobals instead. All uses of loaded values satisfy 1278/// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1279static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1280 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1281 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1282 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); 1283 UI != E; ) { 1284 Instruction *User = cast<Instruction>(*UI++); 1285 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1286 } 1287 1288 if (Load->use_empty()) { 1289 Load->eraseFromParent(); 1290 InsertedScalarizedValues.erase(Load); 1291 } 1292} 1293 1294/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break 1295/// it up into multiple allocations of arrays of the fields. 1296static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1297 Value* NElems, TargetData *TD) { 1298 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); 1299 const Type* MAT = getMallocAllocatedType(CI); 1300 const StructType *STy = cast<StructType>(MAT); 1301 1302 // There is guaranteed to be at least one use of the malloc (storing 1303 // it into GV). If there are other uses, change them to be uses of 1304 // the global to simplify later code. This also deletes the store 1305 // into GV. 1306 ReplaceUsesOfMallocWithGlobal(CI, GV); 1307 1308 // Okay, at this point, there are no users of the malloc. Insert N 1309 // new mallocs at the same place as CI, and N globals. 1310 std::vector<Value*> FieldGlobals; 1311 std::vector<Value*> FieldMallocs; 1312 1313 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1314 const Type *FieldTy = STy->getElementType(FieldNo); 1315 const PointerType *PFieldTy = PointerType::getUnqual(FieldTy); 1316 1317 GlobalVariable *NGV = 1318 new GlobalVariable(*GV->getParent(), 1319 PFieldTy, false, GlobalValue::InternalLinkage, 1320 Constant::getNullValue(PFieldTy), 1321 GV->getName() + ".f" + Twine(FieldNo), GV, 1322 GV->isThreadLocal()); 1323 FieldGlobals.push_back(NGV); 1324 1325 unsigned TypeSize = TD->getTypeAllocSize(FieldTy); 1326 if (const StructType *ST = dyn_cast<StructType>(FieldTy)) 1327 TypeSize = TD->getStructLayout(ST)->getSizeInBytes(); 1328 const Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); 1329 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1330 ConstantInt::get(IntPtrTy, TypeSize), 1331 NElems, 0, 1332 CI->getName() + ".f" + Twine(FieldNo)); 1333 FieldMallocs.push_back(NMI); 1334 new StoreInst(NMI, NGV, CI); 1335 } 1336 1337 // The tricky aspect of this transformation is handling the case when malloc 1338 // fails. In the original code, malloc failing would set the result pointer 1339 // of malloc to null. In this case, some mallocs could succeed and others 1340 // could fail. As such, we emit code that looks like this: 1341 // F0 = malloc(field0) 1342 // F1 = malloc(field1) 1343 // F2 = malloc(field2) 1344 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1345 // if (F0) { free(F0); F0 = 0; } 1346 // if (F1) { free(F1); F1 = 0; } 1347 // if (F2) { free(F2); F2 = 0; } 1348 // } 1349 // The malloc can also fail if its argument is too large. 1350 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1351 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1352 ConstantZero, "isneg"); 1353 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1354 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1355 Constant::getNullValue(FieldMallocs[i]->getType()), 1356 "isnull"); 1357 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1358 } 1359 1360 // Split the basic block at the old malloc. 1361 BasicBlock *OrigBB = CI->getParent(); 1362 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); 1363 1364 // Create the block to check the first condition. Put all these blocks at the 1365 // end of the function as they are unlikely to be executed. 1366 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1367 "malloc_ret_null", 1368 OrigBB->getParent()); 1369 1370 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1371 // branch on RunningOr. 1372 OrigBB->getTerminator()->eraseFromParent(); 1373 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1374 1375 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1376 // pointer, because some may be null while others are not. 1377 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1378 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1379 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1380 Constant::getNullValue(GVVal->getType()), 1381 "tmp"); 1382 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1383 OrigBB->getParent()); 1384 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1385 OrigBB->getParent()); 1386 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1387 Cmp, NullPtrBlock); 1388 1389 // Fill in FreeBlock. 1390 CallInst::CreateFree(GVVal, BI); 1391 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1392 FreeBlock); 1393 BranchInst::Create(NextBlock, FreeBlock); 1394 1395 NullPtrBlock = NextBlock; 1396 } 1397 1398 BranchInst::Create(ContBB, NullPtrBlock); 1399 1400 // CI is no longer needed, remove it. 1401 CI->eraseFromParent(); 1402 1403 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 1404 /// update all uses of the load, keep track of what scalarized loads are 1405 /// inserted for a given load. 1406 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 1407 InsertedScalarizedValues[GV] = FieldGlobals; 1408 1409 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 1410 1411 // Okay, the malloc site is completely handled. All of the uses of GV are now 1412 // loads, and all uses of those loads are simple. Rewrite them to use loads 1413 // of the per-field globals instead. 1414 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { 1415 Instruction *User = cast<Instruction>(*UI++); 1416 1417 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1418 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1419 continue; 1420 } 1421 1422 // Must be a store of null. 1423 StoreInst *SI = cast<StoreInst>(User); 1424 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1425 "Unexpected heap-sra user!"); 1426 1427 // Insert a store of null into each global. 1428 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1429 const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 1430 Constant *Null = Constant::getNullValue(PT->getElementType()); 1431 new StoreInst(Null, FieldGlobals[i], SI); 1432 } 1433 // Erase the original store. 1434 SI->eraseFromParent(); 1435 } 1436 1437 // While we have PHIs that are interesting to rewrite, do it. 1438 while (!PHIsToRewrite.empty()) { 1439 PHINode *PN = PHIsToRewrite.back().first; 1440 unsigned FieldNo = PHIsToRewrite.back().second; 1441 PHIsToRewrite.pop_back(); 1442 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1443 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1444 1445 // Add all the incoming values. This can materialize more phis. 1446 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1447 Value *InVal = PN->getIncomingValue(i); 1448 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1449 PHIsToRewrite); 1450 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1451 } 1452 } 1453 1454 // Drop all inter-phi links and any loads that made it this far. 1455 for (DenseMap<Value*, std::vector<Value*> >::iterator 1456 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1457 I != E; ++I) { 1458 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1459 PN->dropAllReferences(); 1460 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1461 LI->dropAllReferences(); 1462 } 1463 1464 // Delete all the phis and loads now that inter-references are dead. 1465 for (DenseMap<Value*, std::vector<Value*> >::iterator 1466 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1467 I != E; ++I) { 1468 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1469 PN->eraseFromParent(); 1470 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1471 LI->eraseFromParent(); 1472 } 1473 1474 // The old global is now dead, remove it. 1475 GV->eraseFromParent(); 1476 1477 ++NumHeapSRA; 1478 return cast<GlobalVariable>(FieldGlobals[0]); 1479} 1480 1481/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 1482/// pointer global variable with a single value stored it that is a malloc or 1483/// cast of malloc. 1484static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 1485 CallInst *CI, 1486 const Type *AllocTy, 1487 Module::global_iterator &GVI, 1488 TargetData *TD) { 1489 if (!TD) 1490 return false; 1491 1492 // If this is a malloc of an abstract type, don't touch it. 1493 if (!AllocTy->isSized()) 1494 return false; 1495 1496 // We can't optimize this global unless all uses of it are *known* to be 1497 // of the malloc value, not of the null initializer value (consider a use 1498 // that compares the global's value against zero to see if the malloc has 1499 // been reached). To do this, we check to see if all uses of the global 1500 // would trap if the global were null: this proves that they must all 1501 // happen after the malloc. 1502 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1503 return false; 1504 1505 // We can't optimize this if the malloc itself is used in a complex way, 1506 // for example, being stored into multiple globals. This allows the 1507 // malloc to be stored into the specified global, loaded setcc'd, and 1508 // GEP'd. These are all things we could transform to using the global 1509 // for. 1510 SmallPtrSet<const PHINode*, 8> PHIs; 1511 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1512 return false; 1513 1514 // If we have a global that is only initialized with a fixed size malloc, 1515 // transform the program to use global memory instead of malloc'd memory. 1516 // This eliminates dynamic allocation, avoids an indirection accessing the 1517 // data, and exposes the resultant global to further GlobalOpt. 1518 // We cannot optimize the malloc if we cannot determine malloc array size. 1519 Value *NElems = getMallocArraySize(CI, TD, true); 1520 if (!NElems) 1521 return false; 1522 1523 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1524 // Restrict this transformation to only working on small allocations 1525 // (2048 bytes currently), as we don't want to introduce a 16M global or 1526 // something. 1527 if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) { 1528 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD); 1529 return true; 1530 } 1531 1532 // If the allocation is an array of structures, consider transforming this 1533 // into multiple malloc'd arrays, one for each field. This is basically 1534 // SRoA for malloc'd memory. 1535 1536 // If this is an allocation of a fixed size array of structs, analyze as a 1537 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1538 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1539 if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1540 AllocTy = AT->getElementType(); 1541 1542 const StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1543 if (!AllocSTy) 1544 return false; 1545 1546 // This the structure has an unreasonable number of fields, leave it 1547 // alone. 1548 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1549 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1550 1551 // If this is a fixed size array, transform the Malloc to be an alloc of 1552 // structs. malloc [100 x struct],1 -> malloc struct, 100 1553 if (const ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) { 1554 const Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); 1555 unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes(); 1556 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1557 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1558 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, 1559 AllocSize, NumElements, 1560 0, CI->getName()); 1561 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1562 CI->replaceAllUsesWith(Cast); 1563 CI->eraseFromParent(); 1564 CI = dyn_cast<BitCastInst>(Malloc) ? 1565 extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc); 1566 } 1567 1568 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD); 1569 return true; 1570 } 1571 1572 return false; 1573} 1574 1575// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 1576// that only one value (besides its initializer) is ever stored to the global. 1577static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1578 Module::global_iterator &GVI, 1579 TargetData *TD) { 1580 // Ignore no-op GEPs and bitcasts. 1581 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1582 1583 // If we are dealing with a pointer global that is initialized to null and 1584 // only has one (non-null) value stored into it, then we can optimize any 1585 // users of the loaded value (often calls and loads) that would trap if the 1586 // value was null. 1587 if (GV->getInitializer()->getType()->isPointerTy() && 1588 GV->getInitializer()->isNullValue()) { 1589 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1590 if (GV->getInitializer()->getType() != SOVC->getType()) 1591 SOVC = 1592 ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1593 1594 // Optimize away any trapping uses of the loaded value. 1595 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC)) 1596 return true; 1597 } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) { 1598 const Type* MallocType = getMallocAllocatedType(CI); 1599 if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, 1600 GVI, TD)) 1601 return true; 1602 } 1603 } 1604 1605 return false; 1606} 1607 1608/// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 1609/// two values ever stored into GV are its initializer and OtherVal. See if we 1610/// can shrink the global into a boolean and select between the two values 1611/// whenever it is used. This exposes the values to other scalar optimizations. 1612static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1613 const Type *GVElType = GV->getType()->getElementType(); 1614 1615 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1616 // an FP value, pointer or vector, don't do this optimization because a select 1617 // between them is very expensive and unlikely to lead to later 1618 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1619 // where v1 and v2 both require constant pool loads, a big loss. 1620 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1621 GVElType->isFloatingPointTy() || 1622 GVElType->isPointerTy() || GVElType->isVectorTy()) 1623 return false; 1624 1625 // Walk the use list of the global seeing if all the uses are load or store. 1626 // If there is anything else, bail out. 1627 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){ 1628 User *U = *I; 1629 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1630 return false; 1631 } 1632 1633 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); 1634 1635 // Create the new global, initializing it to false. 1636 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1637 false, 1638 GlobalValue::InternalLinkage, 1639 ConstantInt::getFalse(GV->getContext()), 1640 GV->getName()+".b", 1641 GV->isThreadLocal()); 1642 GV->getParent()->getGlobalList().insert(GV, NewGV); 1643 1644 Constant *InitVal = GV->getInitializer(); 1645 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1646 "No reason to shrink to bool!"); 1647 1648 // If initialized to zero and storing one into the global, we can use a cast 1649 // instead of a select to synthesize the desired value. 1650 bool IsOneZero = false; 1651 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 1652 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1653 1654 while (!GV->use_empty()) { 1655 Instruction *UI = cast<Instruction>(GV->use_back()); 1656 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1657 // Change the store into a boolean store. 1658 bool StoringOther = SI->getOperand(0) == OtherVal; 1659 // Only do this if we weren't storing a loaded value. 1660 Value *StoreVal; 1661 if (StoringOther || SI->getOperand(0) == InitVal) 1662 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1663 StoringOther); 1664 else { 1665 // Otherwise, we are storing a previously loaded copy. To do this, 1666 // change the copy from copying the original value to just copying the 1667 // bool. 1668 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1669 1670 // If we've already replaced the input, StoredVal will be a cast or 1671 // select instruction. If not, it will be a load of the original 1672 // global. 1673 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1674 assert(LI->getOperand(0) == GV && "Not a copy!"); 1675 // Insert a new load, to preserve the saved value. 1676 StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI); 1677 } else { 1678 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1679 "This is not a form that we understand!"); 1680 StoreVal = StoredVal->getOperand(0); 1681 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1682 } 1683 } 1684 new StoreInst(StoreVal, NewGV, SI); 1685 } else { 1686 // Change the load into a load of bool then a select. 1687 LoadInst *LI = cast<LoadInst>(UI); 1688 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI); 1689 Value *NSI; 1690 if (IsOneZero) 1691 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1692 else 1693 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1694 NSI->takeName(LI); 1695 LI->replaceAllUsesWith(NSI); 1696 } 1697 UI->eraseFromParent(); 1698 } 1699 1700 GV->eraseFromParent(); 1701 return true; 1702} 1703 1704 1705/// ProcessInternalGlobal - Analyze the specified global variable and optimize 1706/// it if possible. If we make a change, return true. 1707bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, 1708 Module::global_iterator &GVI) { 1709 if (!GV->hasLocalLinkage()) 1710 return false; 1711 1712 // Do more involved optimizations if the global is internal. 1713 GV->removeDeadConstantUsers(); 1714 1715 if (GV->use_empty()) { 1716 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); 1717 GV->eraseFromParent(); 1718 ++NumDeleted; 1719 return true; 1720 } 1721 1722 SmallPtrSet<const PHINode*, 16> PHIUsers; 1723 GlobalStatus GS; 1724 1725 if (AnalyzeGlobal(GV, GS, PHIUsers)) 1726 return false; 1727 1728 if (!GS.isCompared && !GV->hasUnnamedAddr()) { 1729 GV->setUnnamedAddr(true); 1730 NumUnnamed++; 1731 } 1732 1733 if (GV->isConstant() || !GV->hasInitializer()) 1734 return false; 1735 1736 return ProcessInternalGlobal(GV, GVI, PHIUsers, GS); 1737} 1738 1739/// ProcessInternalGlobal - Analyze the specified global variable and optimize 1740/// it if possible. If we make a change, return true. 1741bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 1742 Module::global_iterator &GVI, 1743 const SmallPtrSet<const PHINode*, 16> &PHIUsers, 1744 const GlobalStatus &GS) { 1745 // If this is a first class global and has only one accessing function 1746 // and this function is main (which we know is not recursive we can make 1747 // this global a local variable) we replace the global with a local alloca 1748 // in this function. 1749 // 1750 // NOTE: It doesn't make sense to promote non single-value types since we 1751 // are just replacing static memory to stack memory. 1752 // 1753 // If the global is in different address space, don't bring it to stack. 1754 if (!GS.HasMultipleAccessingFunctions && 1755 GS.AccessingFunction && !GS.HasNonInstructionUser && 1756 GV->getType()->getElementType()->isSingleValueType() && 1757 GS.AccessingFunction->getName() == "main" && 1758 GS.AccessingFunction->hasExternalLinkage() && 1759 GV->getType()->getAddressSpace() == 0) { 1760 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); 1761 Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1762 ->getEntryBlock().begin()); 1763 const Type* ElemTy = GV->getType()->getElementType(); 1764 // FIXME: Pass Global's alignment when globals have alignment 1765 AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI); 1766 if (!isa<UndefValue>(GV->getInitializer())) 1767 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1768 1769 GV->replaceAllUsesWith(Alloca); 1770 GV->eraseFromParent(); 1771 ++NumLocalized; 1772 return true; 1773 } 1774 1775 // If the global is never loaded (but may be stored to), it is dead. 1776 // Delete it now. 1777 if (!GS.isLoaded) { 1778 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); 1779 1780 // Delete any stores we can find to the global. We may not be able to 1781 // make it completely dead though. 1782 bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer()); 1783 1784 // If the global is dead now, delete it. 1785 if (GV->use_empty()) { 1786 GV->eraseFromParent(); 1787 ++NumDeleted; 1788 Changed = true; 1789 } 1790 return Changed; 1791 1792 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) { 1793 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV); 1794 GV->setConstant(true); 1795 1796 // Clean up any obviously simplifiable users now. 1797 CleanupConstantGlobalUsers(GV, GV->getInitializer()); 1798 1799 // If the global is dead now, just nuke it. 1800 if (GV->use_empty()) { 1801 DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1802 << "all users and delete global!\n"); 1803 GV->eraseFromParent(); 1804 ++NumDeleted; 1805 } 1806 1807 ++NumMarked; 1808 return true; 1809 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 1810 if (TargetData *TD = getAnalysisIfAvailable<TargetData>()) 1811 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) { 1812 GVI = FirstNewGV; // Don't skip the newly produced globals! 1813 return true; 1814 } 1815 } else if (GS.StoredType == GlobalStatus::isStoredOnce) { 1816 // If the initial value for the global was an undef value, and if only 1817 // one other value was stored into it, we can just change the 1818 // initializer to be the stored value, then delete all stores to the 1819 // global. This allows us to mark it constant. 1820 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1821 if (isa<UndefValue>(GV->getInitializer())) { 1822 // Change the initial value here. 1823 GV->setInitializer(SOVConstant); 1824 1825 // Clean up any obviously simplifiable users now. 1826 CleanupConstantGlobalUsers(GV, GV->getInitializer()); 1827 1828 if (GV->use_empty()) { 1829 DEBUG(dbgs() << " *** Substituting initializer allowed us to " 1830 << "simplify all users and delete global!\n"); 1831 GV->eraseFromParent(); 1832 ++NumDeleted; 1833 } else { 1834 GVI = GV; 1835 } 1836 ++NumSubstitute; 1837 return true; 1838 } 1839 1840 // Try to optimize globals based on the knowledge that only one value 1841 // (besides its initializer) is ever stored to the global. 1842 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI, 1843 getAnalysisIfAvailable<TargetData>())) 1844 return true; 1845 1846 // Otherwise, if the global was not a boolean, we can shrink it to be a 1847 // boolean. 1848 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1849 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 1850 ++NumShrunkToBool; 1851 return true; 1852 } 1853 } 1854 1855 return false; 1856} 1857 1858/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 1859/// function, changing them to FastCC. 1860static void ChangeCalleesToFastCall(Function *F) { 1861 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1862 CallSite User(cast<Instruction>(*UI)); 1863 User.setCallingConv(CallingConv::Fast); 1864 } 1865} 1866 1867static AttrListPtr StripNest(const AttrListPtr &Attrs) { 1868 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1869 if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0) 1870 continue; 1871 1872 // There can be only one. 1873 return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest); 1874 } 1875 1876 return Attrs; 1877} 1878 1879static void RemoveNestAttribute(Function *F) { 1880 F->setAttributes(StripNest(F->getAttributes())); 1881 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1882 CallSite User(cast<Instruction>(*UI)); 1883 User.setAttributes(StripNest(User.getAttributes())); 1884 } 1885} 1886 1887bool GlobalOpt::OptimizeFunctions(Module &M) { 1888 bool Changed = false; 1889 // Optimize functions. 1890 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 1891 Function *F = FI++; 1892 // Functions without names cannot be referenced outside this module. 1893 if (!F->hasName() && !F->isDeclaration()) 1894 F->setLinkage(GlobalValue::InternalLinkage); 1895 F->removeDeadConstantUsers(); 1896 if (F->use_empty() && (F->hasLocalLinkage() || F->hasLinkOnceLinkage())) { 1897 F->eraseFromParent(); 1898 Changed = true; 1899 ++NumFnDeleted; 1900 } else if (F->hasLocalLinkage()) { 1901 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() && 1902 !F->hasAddressTaken()) { 1903 // If this function has C calling conventions, is not a varargs 1904 // function, and is only called directly, promote it to use the Fast 1905 // calling convention. 1906 F->setCallingConv(CallingConv::Fast); 1907 ChangeCalleesToFastCall(F); 1908 ++NumFastCallFns; 1909 Changed = true; 1910 } 1911 1912 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 1913 !F->hasAddressTaken()) { 1914 // The function is not used by a trampoline intrinsic, so it is safe 1915 // to remove the 'nest' attribute. 1916 RemoveNestAttribute(F); 1917 ++NumNestRemoved; 1918 Changed = true; 1919 } 1920 } 1921 } 1922 return Changed; 1923} 1924 1925bool GlobalOpt::OptimizeGlobalVars(Module &M) { 1926 bool Changed = false; 1927 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 1928 GVI != E; ) { 1929 GlobalVariable *GV = GVI++; 1930 // Global variables without names cannot be referenced outside this module. 1931 if (!GV->hasName() && !GV->isDeclaration()) 1932 GV->setLinkage(GlobalValue::InternalLinkage); 1933 // Simplify the initializer. 1934 if (GV->hasInitializer()) 1935 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { 1936 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 1937 Constant *New = ConstantFoldConstantExpression(CE, TD); 1938 if (New && New != CE) 1939 GV->setInitializer(New); 1940 } 1941 1942 Changed |= ProcessGlobal(GV, GVI); 1943 } 1944 return Changed; 1945} 1946 1947/// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all 1948/// initializers have an init priority of 65535. 1949GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { 1950 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1951 if (GV == 0) return 0; 1952 1953 // Verify that the initializer is simple enough for us to handle. We are 1954 // only allowed to optimize the initializer if it is unique. 1955 if (!GV->hasUniqueInitializer()) return 0; 1956 1957 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); 1958 if (!CA) return 0; 1959 1960 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1961 ConstantStruct *CS = dyn_cast<ConstantStruct>(*i); 1962 if (!CS) return 0; 1963 1964 if (isa<ConstantPointerNull>(CS->getOperand(1))) 1965 continue; 1966 1967 // Must have a function or null ptr. 1968 if (!isa<Function>(CS->getOperand(1))) 1969 return 0; 1970 1971 // Init priority must be standard. 1972 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0)); 1973 if (CI->getZExtValue() != 65535) 1974 return 0; 1975 } 1976 1977 return GV; 1978} 1979 1980/// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, 1981/// return a list of the functions and null terminator as a vector. 1982static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { 1983 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1984 std::vector<Function*> Result; 1985 Result.reserve(CA->getNumOperands()); 1986 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1987 ConstantStruct *CS = cast<ConstantStruct>(*i); 1988 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); 1989 } 1990 return Result; 1991} 1992 1993/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the 1994/// specified array, returning the new global to use. 1995static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, 1996 const std::vector<Function*> &Ctors) { 1997 // If we made a change, reassemble the initializer list. 1998 std::vector<Constant*> CSVals; 1999 CSVals.push_back(ConstantInt::get(Type::getInt32Ty(GCL->getContext()),65535)); 2000 CSVals.push_back(0); 2001 2002 // Create the new init list. 2003 std::vector<Constant*> CAList; 2004 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { 2005 if (Ctors[i]) { 2006 CSVals[1] = Ctors[i]; 2007 } else { 2008 const Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), 2009 false); 2010 const PointerType *PFTy = PointerType::getUnqual(FTy); 2011 CSVals[1] = Constant::getNullValue(PFTy); 2012 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 2013 2147483647); 2014 } 2015 CAList.push_back(ConstantStruct::get(GCL->getContext(), CSVals, false)); 2016 } 2017 2018 // Create the array initializer. 2019 const Type *StructTy = 2020 cast<ArrayType>(GCL->getType()->getElementType())->getElementType(); 2021 Constant *CA = ConstantArray::get(ArrayType::get(StructTy, 2022 CAList.size()), CAList); 2023 2024 // If we didn't change the number of elements, don't create a new GV. 2025 if (CA->getType() == GCL->getInitializer()->getType()) { 2026 GCL->setInitializer(CA); 2027 return GCL; 2028 } 2029 2030 // Create the new global and insert it next to the existing list. 2031 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(), 2032 GCL->getLinkage(), CA, "", 2033 GCL->isThreadLocal()); 2034 GCL->getParent()->getGlobalList().insert(GCL, NGV); 2035 NGV->takeName(GCL); 2036 2037 // Nuke the old list, replacing any uses with the new one. 2038 if (!GCL->use_empty()) { 2039 Constant *V = NGV; 2040 if (V->getType() != GCL->getType()) 2041 V = ConstantExpr::getBitCast(V, GCL->getType()); 2042 GCL->replaceAllUsesWith(V); 2043 } 2044 GCL->eraseFromParent(); 2045 2046 if (Ctors.size()) 2047 return NGV; 2048 else 2049 return 0; 2050} 2051 2052 2053static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, Value *V) { 2054 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 2055 Constant *R = ComputedValues[V]; 2056 assert(R && "Reference to an uncomputed value!"); 2057 return R; 2058} 2059 2060static inline bool 2061isSimpleEnoughValueToCommit(Constant *C, 2062 SmallPtrSet<Constant*, 8> &SimpleConstants); 2063 2064 2065/// isSimpleEnoughValueToCommit - Return true if the specified constant can be 2066/// handled by the code generator. We don't want to generate something like: 2067/// void *X = &X/42; 2068/// because the code generator doesn't have a relocation that can handle that. 2069/// 2070/// This function should be called if C was not found (but just got inserted) 2071/// in SimpleConstants to avoid having to rescan the same constants all the 2072/// time. 2073static bool isSimpleEnoughValueToCommitHelper(Constant *C, 2074 SmallPtrSet<Constant*, 8> &SimpleConstants) { 2075 // Simple integer, undef, constant aggregate zero, global addresses, etc are 2076 // all supported. 2077 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) || 2078 isa<GlobalValue>(C)) 2079 return true; 2080 2081 // Aggregate values are safe if all their elements are. 2082 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || 2083 isa<ConstantVector>(C)) { 2084 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { 2085 Constant *Op = cast<Constant>(C->getOperand(i)); 2086 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants)) 2087 return false; 2088 } 2089 return true; 2090 } 2091 2092 // We don't know exactly what relocations are allowed in constant expressions, 2093 // so we allow &global+constantoffset, which is safe and uniformly supported 2094 // across targets. 2095 ConstantExpr *CE = cast<ConstantExpr>(C); 2096 switch (CE->getOpcode()) { 2097 case Instruction::BitCast: 2098 case Instruction::IntToPtr: 2099 case Instruction::PtrToInt: 2100 // These casts are always fine if the casted value is. 2101 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants); 2102 2103 // GEP is fine if it is simple + constant offset. 2104 case Instruction::GetElementPtr: 2105 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) 2106 if (!isa<ConstantInt>(CE->getOperand(i))) 2107 return false; 2108 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants); 2109 2110 case Instruction::Add: 2111 // We allow simple+cst. 2112 if (!isa<ConstantInt>(CE->getOperand(1))) 2113 return false; 2114 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants); 2115 } 2116 return false; 2117} 2118 2119static inline bool 2120isSimpleEnoughValueToCommit(Constant *C, 2121 SmallPtrSet<Constant*, 8> &SimpleConstants) { 2122 // If we already checked this constant, we win. 2123 if (!SimpleConstants.insert(C)) return true; 2124 // Check the constant. 2125 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants); 2126} 2127 2128 2129/// isSimpleEnoughPointerToCommit - Return true if this constant is simple 2130/// enough for us to understand. In particular, if it is a cast to anything 2131/// other than from one pointer type to another pointer type, we punt. 2132/// We basically just support direct accesses to globals and GEP's of 2133/// globals. This should be kept up to date with CommitValueTo. 2134static bool isSimpleEnoughPointerToCommit(Constant *C) { 2135 // Conservatively, avoid aggregate types. This is because we don't 2136 // want to worry about them partially overlapping other stores. 2137 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) 2138 return false; 2139 2140 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 2141 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2142 // external globals. 2143 return GV->hasUniqueInitializer(); 2144 2145 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 2146 // Handle a constantexpr gep. 2147 if (CE->getOpcode() == Instruction::GetElementPtr && 2148 isa<GlobalVariable>(CE->getOperand(0)) && 2149 cast<GEPOperator>(CE)->isInBounds()) { 2150 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2151 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2152 // external globals. 2153 if (!GV->hasUniqueInitializer()) 2154 return false; 2155 2156 // The first index must be zero. 2157 ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin())); 2158 if (!CI || !CI->isZero()) return false; 2159 2160 // The remaining indices must be compile-time known integers within the 2161 // notional bounds of the corresponding static array types. 2162 if (!CE->isGEPWithNoNotionalOverIndexing()) 2163 return false; 2164 2165 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2166 2167 // A constantexpr bitcast from a pointer to another pointer is a no-op, 2168 // and we know how to evaluate it by moving the bitcast from the pointer 2169 // operand to the value operand. 2170 } else if (CE->getOpcode() == Instruction::BitCast && 2171 isa<GlobalVariable>(CE->getOperand(0))) { 2172 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2173 // external globals. 2174 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); 2175 } 2176 } 2177 2178 return false; 2179} 2180 2181/// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 2182/// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 2183/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 2184static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2185 ConstantExpr *Addr, unsigned OpNo) { 2186 // Base case of the recursion. 2187 if (OpNo == Addr->getNumOperands()) { 2188 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2189 return Val; 2190 } 2191 2192 std::vector<Constant*> Elts; 2193 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 2194 2195 // Break up the constant into its elements. 2196 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 2197 for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i) 2198 Elts.push_back(cast<Constant>(*i)); 2199 } else if (isa<ConstantAggregateZero>(Init)) { 2200 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2201 Elts.push_back(Constant::getNullValue(STy->getElementType(i))); 2202 } else if (isa<UndefValue>(Init)) { 2203 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2204 Elts.push_back(UndefValue::get(STy->getElementType(i))); 2205 } else { 2206 llvm_unreachable("This code is out of sync with " 2207 " ConstantFoldLoadThroughGEPConstantExpr"); 2208 } 2209 2210 // Replace the element that we are supposed to. 2211 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2212 unsigned Idx = CU->getZExtValue(); 2213 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2214 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2215 2216 // Return the modified struct. 2217 return ConstantStruct::get(Init->getContext(), &Elts[0], Elts.size(), 2218 STy->isPacked()); 2219 } else { 2220 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2221 const SequentialType *InitTy = cast<SequentialType>(Init->getType()); 2222 2223 uint64_t NumElts; 2224 if (const ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) 2225 NumElts = ATy->getNumElements(); 2226 else 2227 NumElts = cast<VectorType>(InitTy)->getNumElements(); 2228 2229 2230 // Break up the array into elements. 2231 if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 2232 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) 2233 Elts.push_back(cast<Constant>(*i)); 2234 } else if (ConstantVector *CV = dyn_cast<ConstantVector>(Init)) { 2235 for (User::op_iterator i = CV->op_begin(), e = CV->op_end(); i != e; ++i) 2236 Elts.push_back(cast<Constant>(*i)); 2237 } else if (isa<ConstantAggregateZero>(Init)) { 2238 Elts.assign(NumElts, Constant::getNullValue(InitTy->getElementType())); 2239 } else { 2240 assert(isa<UndefValue>(Init) && "This code is out of sync with " 2241 " ConstantFoldLoadThroughGEPConstantExpr"); 2242 Elts.assign(NumElts, UndefValue::get(InitTy->getElementType())); 2243 } 2244 2245 assert(CI->getZExtValue() < NumElts); 2246 Elts[CI->getZExtValue()] = 2247 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2248 2249 if (Init->getType()->isArrayTy()) 2250 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 2251 return ConstantVector::get(Elts); 2252 } 2253} 2254 2255/// CommitValueTo - We have decided that Addr (which satisfies the predicate 2256/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2257static void CommitValueTo(Constant *Val, Constant *Addr) { 2258 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2259 assert(GV->hasInitializer()); 2260 GV->setInitializer(Val); 2261 return; 2262 } 2263 2264 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2265 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2266 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2267} 2268 2269/// ComputeLoadResult - Return the value that would be computed by a load from 2270/// P after the stores reflected by 'memory' have been performed. If we can't 2271/// decide, return null. 2272static Constant *ComputeLoadResult(Constant *P, 2273 const DenseMap<Constant*, Constant*> &Memory) { 2274 // If this memory location has been recently stored, use the stored value: it 2275 // is the most up-to-date. 2276 DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P); 2277 if (I != Memory.end()) return I->second; 2278 2279 // Access it. 2280 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 2281 if (GV->hasDefinitiveInitializer()) 2282 return GV->getInitializer(); 2283 return 0; 2284 } 2285 2286 // Handle a constantexpr getelementptr. 2287 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 2288 if (CE->getOpcode() == Instruction::GetElementPtr && 2289 isa<GlobalVariable>(CE->getOperand(0))) { 2290 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2291 if (GV->hasDefinitiveInitializer()) 2292 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2293 } 2294 2295 return 0; // don't know how to evaluate. 2296} 2297 2298/// EvaluateFunction - Evaluate a call to function F, returning true if 2299/// successful, false if we can't evaluate it. ActualArgs contains the formal 2300/// arguments for the function. 2301static bool EvaluateFunction(Function *F, Constant *&RetVal, 2302 const SmallVectorImpl<Constant*> &ActualArgs, 2303 std::vector<Function*> &CallStack, 2304 DenseMap<Constant*, Constant*> &MutatedMemory, 2305 std::vector<GlobalVariable*> &AllocaTmps, 2306 SmallPtrSet<Constant*, 8> &SimpleConstants, 2307 const TargetData *TD) { 2308 // Check to see if this function is already executing (recursion). If so, 2309 // bail out. TODO: we might want to accept limited recursion. 2310 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 2311 return false; 2312 2313 CallStack.push_back(F); 2314 2315 /// Values - As we compute SSA register values, we store their contents here. 2316 DenseMap<Value*, Constant*> Values; 2317 2318 // Initialize arguments to the incoming values specified. 2319 unsigned ArgNo = 0; 2320 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 2321 ++AI, ++ArgNo) 2322 Values[AI] = ActualArgs[ArgNo]; 2323 2324 /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 2325 /// we can only evaluate any one basic block at most once. This set keeps 2326 /// track of what we have executed so we can detect recursive cases etc. 2327 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 2328 2329 // CurInst - The current instruction we're evaluating. 2330 BasicBlock::iterator CurInst = F->begin()->begin(); 2331 2332 // This is the main evaluation loop. 2333 while (1) { 2334 Constant *InstResult = 0; 2335 2336 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 2337 if (SI->isVolatile()) return false; // no volatile accesses. 2338 Constant *Ptr = getVal(Values, SI->getOperand(1)); 2339 if (!isSimpleEnoughPointerToCommit(Ptr)) 2340 // If this is too complex for us to commit, reject it. 2341 return false; 2342 2343 Constant *Val = getVal(Values, SI->getOperand(0)); 2344 2345 // If this might be too difficult for the backend to handle (e.g. the addr 2346 // of one global variable divided by another) then we can't commit it. 2347 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants)) 2348 return false; 2349 2350 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2351 if (CE->getOpcode() == Instruction::BitCast) { 2352 // If we're evaluating a store through a bitcast, then we need 2353 // to pull the bitcast off the pointer type and push it onto the 2354 // stored value. 2355 Ptr = CE->getOperand(0); 2356 2357 const Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType(); 2358 2359 // In order to push the bitcast onto the stored value, a bitcast 2360 // from NewTy to Val's type must be legal. If it's not, we can try 2361 // introspecting NewTy to find a legal conversion. 2362 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { 2363 // If NewTy is a struct, we can convert the pointer to the struct 2364 // into a pointer to its first member. 2365 // FIXME: This could be extended to support arrays as well. 2366 if (const StructType *STy = dyn_cast<StructType>(NewTy)) { 2367 NewTy = STy->getTypeAtIndex(0U); 2368 2369 const IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32); 2370 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); 2371 Constant * const IdxList[] = {IdxZero, IdxZero}; 2372 2373 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList, 2); 2374 2375 // If we can't improve the situation by introspecting NewTy, 2376 // we have to give up. 2377 } else { 2378 return 0; 2379 } 2380 } 2381 2382 // If we found compatible types, go ahead and push the bitcast 2383 // onto the stored value. 2384 Val = ConstantExpr::getBitCast(Val, NewTy); 2385 } 2386 2387 MutatedMemory[Ptr] = Val; 2388 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 2389 InstResult = ConstantExpr::get(BO->getOpcode(), 2390 getVal(Values, BO->getOperand(0)), 2391 getVal(Values, BO->getOperand(1))); 2392 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 2393 InstResult = ConstantExpr::getCompare(CI->getPredicate(), 2394 getVal(Values, CI->getOperand(0)), 2395 getVal(Values, CI->getOperand(1))); 2396 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 2397 InstResult = ConstantExpr::getCast(CI->getOpcode(), 2398 getVal(Values, CI->getOperand(0)), 2399 CI->getType()); 2400 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 2401 InstResult = ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)), 2402 getVal(Values, SI->getOperand(1)), 2403 getVal(Values, SI->getOperand(2))); 2404 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 2405 Constant *P = getVal(Values, GEP->getOperand(0)); 2406 SmallVector<Constant*, 8> GEPOps; 2407 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 2408 i != e; ++i) 2409 GEPOps.push_back(getVal(Values, *i)); 2410 InstResult = cast<GEPOperator>(GEP)->isInBounds() ? 2411 ConstantExpr::getInBoundsGetElementPtr(P, &GEPOps[0], GEPOps.size()) : 2412 ConstantExpr::getGetElementPtr(P, &GEPOps[0], GEPOps.size()); 2413 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 2414 if (LI->isVolatile()) return false; // no volatile accesses. 2415 InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)), 2416 MutatedMemory); 2417 if (InstResult == 0) return false; // Could not evaluate load. 2418 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 2419 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs. 2420 const Type *Ty = AI->getType()->getElementType(); 2421 AllocaTmps.push_back(new GlobalVariable(Ty, false, 2422 GlobalValue::InternalLinkage, 2423 UndefValue::get(Ty), 2424 AI->getName())); 2425 InstResult = AllocaTmps.back(); 2426 } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) { 2427 2428 // Debug info can safely be ignored here. 2429 if (isa<DbgInfoIntrinsic>(CI)) { 2430 ++CurInst; 2431 continue; 2432 } 2433 2434 // Cannot handle inline asm. 2435 if (isa<InlineAsm>(CI->getCalledValue())) return false; 2436 2437 // Resolve function pointers. 2438 Function *Callee = dyn_cast<Function>(getVal(Values, 2439 CI->getCalledValue())); 2440 if (!Callee) return false; // Cannot resolve. 2441 2442 SmallVector<Constant*, 8> Formals; 2443 CallSite CS(CI); 2444 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); 2445 i != e; ++i) 2446 Formals.push_back(getVal(Values, *i)); 2447 2448 if (Callee->isDeclaration()) { 2449 // If this is a function we can constant fold, do it. 2450 if (Constant *C = ConstantFoldCall(Callee, Formals.data(), 2451 Formals.size())) { 2452 InstResult = C; 2453 } else { 2454 return false; 2455 } 2456 } else { 2457 if (Callee->getFunctionType()->isVarArg()) 2458 return false; 2459 2460 Constant *RetVal; 2461 // Execute the call, if successful, use the return value. 2462 if (!EvaluateFunction(Callee, RetVal, Formals, CallStack, 2463 MutatedMemory, AllocaTmps, SimpleConstants, TD)) 2464 return false; 2465 InstResult = RetVal; 2466 } 2467 } else if (isa<TerminatorInst>(CurInst)) { 2468 BasicBlock *NewBB = 0; 2469 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 2470 if (BI->isUnconditional()) { 2471 NewBB = BI->getSuccessor(0); 2472 } else { 2473 ConstantInt *Cond = 2474 dyn_cast<ConstantInt>(getVal(Values, BI->getCondition())); 2475 if (!Cond) return false; // Cannot determine. 2476 2477 NewBB = BI->getSuccessor(!Cond->getZExtValue()); 2478 } 2479 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 2480 ConstantInt *Val = 2481 dyn_cast<ConstantInt>(getVal(Values, SI->getCondition())); 2482 if (!Val) return false; // Cannot determine. 2483 NewBB = SI->getSuccessor(SI->findCaseValue(Val)); 2484 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { 2485 Value *Val = getVal(Values, IBI->getAddress())->stripPointerCasts(); 2486 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) 2487 NewBB = BA->getBasicBlock(); 2488 else 2489 return false; // Cannot determine. 2490 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) { 2491 if (RI->getNumOperands()) 2492 RetVal = getVal(Values, RI->getOperand(0)); 2493 2494 CallStack.pop_back(); // return from fn. 2495 return true; // We succeeded at evaluating this ctor! 2496 } else { 2497 // invoke, unwind, unreachable. 2498 return false; // Cannot handle this terminator. 2499 } 2500 2501 // Okay, we succeeded in evaluating this control flow. See if we have 2502 // executed the new block before. If so, we have a looping function, 2503 // which we cannot evaluate in reasonable time. 2504 if (!ExecutedBlocks.insert(NewBB)) 2505 return false; // looped! 2506 2507 // Okay, we have never been in this block before. Check to see if there 2508 // are any PHI nodes. If so, evaluate them with information about where 2509 // we came from. 2510 BasicBlock *OldBB = CurInst->getParent(); 2511 CurInst = NewBB->begin(); 2512 PHINode *PN; 2513 for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 2514 Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB)); 2515 2516 // Do NOT increment CurInst. We know that the terminator had no value. 2517 continue; 2518 } else { 2519 // Did not know how to evaluate this! 2520 return false; 2521 } 2522 2523 if (!CurInst->use_empty()) { 2524 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) 2525 InstResult = ConstantFoldConstantExpression(CE, TD); 2526 2527 Values[CurInst] = InstResult; 2528 } 2529 2530 // Advance program counter. 2531 ++CurInst; 2532 } 2533} 2534 2535/// EvaluateStaticConstructor - Evaluate static constructors in the function, if 2536/// we can. Return true if we can, false otherwise. 2537static bool EvaluateStaticConstructor(Function *F, const TargetData *TD) { 2538 /// MutatedMemory - For each store we execute, we update this map. Loads 2539 /// check this to get the most up-to-date value. If evaluation is successful, 2540 /// this state is committed to the process. 2541 DenseMap<Constant*, Constant*> MutatedMemory; 2542 2543 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 2544 /// to represent its body. This vector is needed so we can delete the 2545 /// temporary globals when we are done. 2546 std::vector<GlobalVariable*> AllocaTmps; 2547 2548 /// CallStack - This is used to detect recursion. In pathological situations 2549 /// we could hit exponential behavior, but at least there is nothing 2550 /// unbounded. 2551 std::vector<Function*> CallStack; 2552 2553 /// SimpleConstants - These are constants we have checked and know to be 2554 /// simple enough to live in a static initializer of a global. 2555 SmallPtrSet<Constant*, 8> SimpleConstants; 2556 2557 // Call the function. 2558 Constant *RetValDummy; 2559 bool EvalSuccess = EvaluateFunction(F, RetValDummy, 2560 SmallVector<Constant*, 0>(), CallStack, 2561 MutatedMemory, AllocaTmps, 2562 SimpleConstants, TD); 2563 2564 if (EvalSuccess) { 2565 // We succeeded at evaluation: commit the result. 2566 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2567 << F->getName() << "' to " << MutatedMemory.size() 2568 << " stores.\n"); 2569 for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(), 2570 E = MutatedMemory.end(); I != E; ++I) 2571 CommitValueTo(I->second, I->first); 2572 } 2573 2574 // At this point, we are done interpreting. If we created any 'alloca' 2575 // temporaries, release them now. 2576 while (!AllocaTmps.empty()) { 2577 GlobalVariable *Tmp = AllocaTmps.back(); 2578 AllocaTmps.pop_back(); 2579 2580 // If there are still users of the alloca, the program is doing something 2581 // silly, e.g. storing the address of the alloca somewhere and using it 2582 // later. Since this is undefined, we'll just make it be null. 2583 if (!Tmp->use_empty()) 2584 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); 2585 delete Tmp; 2586 } 2587 2588 return EvalSuccess; 2589} 2590 2591 2592 2593/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. 2594/// Return true if anything changed. 2595bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { 2596 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); 2597 bool MadeChange = false; 2598 if (Ctors.empty()) return false; 2599 2600 const TargetData *TD = getAnalysisIfAvailable<TargetData>(); 2601 // Loop over global ctors, optimizing them when we can. 2602 for (unsigned i = 0; i != Ctors.size(); ++i) { 2603 Function *F = Ctors[i]; 2604 // Found a null terminator in the middle of the list, prune off the rest of 2605 // the list. 2606 if (F == 0) { 2607 if (i != Ctors.size()-1) { 2608 Ctors.resize(i+1); 2609 MadeChange = true; 2610 } 2611 break; 2612 } 2613 2614 // We cannot simplify external ctor functions. 2615 if (F->empty()) continue; 2616 2617 // If we can evaluate the ctor at compile time, do. 2618 if (EvaluateStaticConstructor(F, TD)) { 2619 Ctors.erase(Ctors.begin()+i); 2620 MadeChange = true; 2621 --i; 2622 ++NumCtorsEvaluated; 2623 continue; 2624 } 2625 } 2626 2627 if (!MadeChange) return false; 2628 2629 GCL = InstallGlobalCtors(GCL, Ctors); 2630 return true; 2631} 2632 2633bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 2634 bool Changed = false; 2635 2636 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2637 I != E;) { 2638 Module::alias_iterator J = I++; 2639 // Aliases without names cannot be referenced outside this module. 2640 if (!J->hasName() && !J->isDeclaration()) 2641 J->setLinkage(GlobalValue::InternalLinkage); 2642 // If the aliasee may change at link time, nothing can be done - bail out. 2643 if (J->mayBeOverridden()) 2644 continue; 2645 2646 Constant *Aliasee = J->getAliasee(); 2647 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2648 Target->removeDeadConstantUsers(); 2649 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse(); 2650 2651 // Make all users of the alias use the aliasee instead. 2652 if (!J->use_empty()) { 2653 J->replaceAllUsesWith(Aliasee); 2654 ++NumAliasesResolved; 2655 Changed = true; 2656 } 2657 2658 // If the alias is externally visible, we may still be able to simplify it. 2659 if (!J->hasLocalLinkage()) { 2660 // If the aliasee has internal linkage, give it the name and linkage 2661 // of the alias, and delete the alias. This turns: 2662 // define internal ... @f(...) 2663 // @a = alias ... @f 2664 // into: 2665 // define ... @a(...) 2666 if (!Target->hasLocalLinkage()) 2667 continue; 2668 2669 // Do not perform the transform if multiple aliases potentially target the 2670 // aliasee. This check also ensures that it is safe to replace the section 2671 // and other attributes of the aliasee with those of the alias. 2672 if (!hasOneUse) 2673 continue; 2674 2675 // Give the aliasee the name, linkage and other attributes of the alias. 2676 Target->takeName(J); 2677 Target->setLinkage(J->getLinkage()); 2678 Target->GlobalValue::copyAttributesFrom(J); 2679 } 2680 2681 // Delete the alias. 2682 M.getAliasList().erase(J); 2683 ++NumAliasesRemoved; 2684 Changed = true; 2685 } 2686 2687 return Changed; 2688} 2689 2690static Function *FindCXAAtExit(Module &M) { 2691 Function *Fn = M.getFunction("__cxa_atexit"); 2692 2693 if (!Fn) 2694 return 0; 2695 2696 const FunctionType *FTy = Fn->getFunctionType(); 2697 2698 // Checking that the function has the right return type, the right number of 2699 // parameters and that they all have pointer types should be enough. 2700 if (!FTy->getReturnType()->isIntegerTy() || 2701 FTy->getNumParams() != 3 || 2702 !FTy->getParamType(0)->isPointerTy() || 2703 !FTy->getParamType(1)->isPointerTy() || 2704 !FTy->getParamType(2)->isPointerTy()) 2705 return 0; 2706 2707 return Fn; 2708} 2709 2710/// cxxDtorIsEmpty - Returns whether the given function is an empty C++ 2711/// destructor and can therefore be eliminated. 2712/// Note that we assume that other optimization passes have already simplified 2713/// the code so we only look for a function with a single basic block, where 2714/// the only allowed instructions are 'ret' or 'call' to empty C++ dtor. 2715static bool cxxDtorIsEmpty(const Function &Fn, 2716 SmallPtrSet<const Function *, 8> &CalledFunctions) { 2717 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 2718 // nounwind, but that doesn't seem worth doing. 2719 if (Fn.isDeclaration()) 2720 return false; 2721 2722 if (++Fn.begin() != Fn.end()) 2723 return false; 2724 2725 const BasicBlock &EntryBlock = Fn.getEntryBlock(); 2726 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); 2727 I != E; ++I) { 2728 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 2729 // Ignore debug intrinsics. 2730 if (isa<DbgInfoIntrinsic>(CI)) 2731 continue; 2732 2733 const Function *CalledFn = CI->getCalledFunction(); 2734 2735 if (!CalledFn) 2736 return false; 2737 2738 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); 2739 2740 // Don't treat recursive functions as empty. 2741 if (!NewCalledFunctions.insert(CalledFn)) 2742 return false; 2743 2744 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) 2745 return false; 2746 } else if (isa<ReturnInst>(*I)) 2747 return true; 2748 else 2749 return false; 2750 } 2751 2752 return false; 2753} 2754 2755bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 2756 /// Itanium C++ ABI p3.3.5: 2757 /// 2758 /// After constructing a global (or local static) object, that will require 2759 /// destruction on exit, a termination function is registered as follows: 2760 /// 2761 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 2762 /// 2763 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 2764 /// call f(p) when DSO d is unloaded, before all such termination calls 2765 /// registered before this one. It returns zero if registration is 2766 /// successful, nonzero on failure. 2767 2768 // This pass will look for calls to __cxa_atexit where the function is trivial 2769 // and remove them. 2770 bool Changed = false; 2771 2772 for (Function::use_iterator I = CXAAtExitFn->use_begin(), 2773 E = CXAAtExitFn->use_end(); I != E;) { 2774 // We're only interested in calls. Theoretically, we could handle invoke 2775 // instructions as well, but neither llvm-gcc nor clang generate invokes 2776 // to __cxa_atexit. 2777 CallInst *CI = dyn_cast<CallInst>(*I++); 2778 if (!CI) 2779 continue; 2780 2781 Function *DtorFn = 2782 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 2783 if (!DtorFn) 2784 continue; 2785 2786 SmallPtrSet<const Function *, 8> CalledFunctions; 2787 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) 2788 continue; 2789 2790 // Just remove the call. 2791 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 2792 CI->eraseFromParent(); 2793 2794 ++NumCXXDtorsRemoved; 2795 2796 Changed |= true; 2797 } 2798 2799 return Changed; 2800} 2801 2802bool GlobalOpt::runOnModule(Module &M) { 2803 bool Changed = false; 2804 2805 // Try to find the llvm.globalctors list. 2806 GlobalVariable *GlobalCtors = FindGlobalCtors(M); 2807 2808 Function *CXAAtExitFn = FindCXAAtExit(M); 2809 2810 bool LocalChange = true; 2811 while (LocalChange) { 2812 LocalChange = false; 2813 2814 // Delete functions that are trivially dead, ccc -> fastcc 2815 LocalChange |= OptimizeFunctions(M); 2816 2817 // Optimize global_ctors list. 2818 if (GlobalCtors) 2819 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); 2820 2821 // Optimize non-address-taken globals. 2822 LocalChange |= OptimizeGlobalVars(M); 2823 2824 // Resolve aliases, when possible. 2825 LocalChange |= OptimizeGlobalAliases(M); 2826 2827 // Try to remove trivial global destructors. 2828 if (CXAAtExitFn) 2829 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 2830 2831 Changed |= LocalChange; 2832 } 2833 2834 // TODO: Move all global ctors functions to the end of the module for code 2835 // layout. 2836 2837 return Changed; 2838} 2839