GlobalOpt.cpp revision 9adc0abad3c3ed40a268ccbcee0c74cb9e1359fe
1//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass transforms simple global variables that never have their address 11// taken. If obviously true, it marks read/write globals as constant, deletes 12// variables only stored to, etc. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "globalopt" 17#include "llvm/Transforms/IPO.h" 18#include "llvm/CallingConv.h" 19#include "llvm/Constants.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/Instructions.h" 22#include "llvm/IntrinsicInst.h" 23#include "llvm/LLVMContext.h" 24#include "llvm/Module.h" 25#include "llvm/Pass.h" 26#include "llvm/Analysis/ConstantFolding.h" 27#include "llvm/Target/TargetData.h" 28#include "llvm/Support/CallSite.h" 29#include "llvm/Support/Compiler.h" 30#include "llvm/Support/Debug.h" 31#include "llvm/Support/ErrorHandling.h" 32#include "llvm/Support/GetElementPtrTypeIterator.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/ADT/DenseMap.h" 35#include "llvm/ADT/SmallPtrSet.h" 36#include "llvm/ADT/SmallVector.h" 37#include "llvm/ADT/Statistic.h" 38#include "llvm/ADT/StringExtras.h" 39#include "llvm/ADT/STLExtras.h" 40#include <algorithm> 41using namespace llvm; 42 43STATISTIC(NumMarked , "Number of globals marked constant"); 44STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 45STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 46STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 47STATISTIC(NumDeleted , "Number of globals deleted"); 48STATISTIC(NumFnDeleted , "Number of functions deleted"); 49STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 50STATISTIC(NumLocalized , "Number of globals localized"); 51STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 52STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 53STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 54STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 55STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 56STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 57 58namespace { 59 struct VISIBILITY_HIDDEN GlobalOpt : public ModulePass { 60 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.addRequired<TargetData>(); 62 } 63 static char ID; // Pass identification, replacement for typeid 64 GlobalOpt() : ModulePass(&ID) {} 65 66 bool runOnModule(Module &M); 67 68 private: 69 GlobalVariable *FindGlobalCtors(Module &M); 70 bool OptimizeFunctions(Module &M); 71 bool OptimizeGlobalVars(Module &M); 72 bool OptimizeGlobalAliases(Module &M); 73 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); 74 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 75 }; 76} 77 78char GlobalOpt::ID = 0; 79static RegisterPass<GlobalOpt> X("globalopt", "Global Variable Optimizer"); 80 81ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 82 83namespace { 84 85/// GlobalStatus - As we analyze each global, keep track of some information 86/// about it. If we find out that the address of the global is taken, none of 87/// this info will be accurate. 88struct VISIBILITY_HIDDEN GlobalStatus { 89 /// isLoaded - True if the global is ever loaded. If the global isn't ever 90 /// loaded it can be deleted. 91 bool isLoaded; 92 93 /// StoredType - Keep track of what stores to the global look like. 94 /// 95 enum StoredType { 96 /// NotStored - There is no store to this global. It can thus be marked 97 /// constant. 98 NotStored, 99 100 /// isInitializerStored - This global is stored to, but the only thing 101 /// stored is the constant it was initialized with. This is only tracked 102 /// for scalar globals. 103 isInitializerStored, 104 105 /// isStoredOnce - This global is stored to, but only its initializer and 106 /// one other value is ever stored to it. If this global isStoredOnce, we 107 /// track the value stored to it in StoredOnceValue below. This is only 108 /// tracked for scalar globals. 109 isStoredOnce, 110 111 /// isStored - This global is stored to by multiple values or something else 112 /// that we cannot track. 113 isStored 114 } StoredType; 115 116 /// StoredOnceValue - If only one value (besides the initializer constant) is 117 /// ever stored to this global, keep track of what value it is. 118 Value *StoredOnceValue; 119 120 /// AccessingFunction/HasMultipleAccessingFunctions - These start out 121 /// null/false. When the first accessing function is noticed, it is recorded. 122 /// When a second different accessing function is noticed, 123 /// HasMultipleAccessingFunctions is set to true. 124 Function *AccessingFunction; 125 bool HasMultipleAccessingFunctions; 126 127 /// HasNonInstructionUser - Set to true if this global has a user that is not 128 /// an instruction (e.g. a constant expr or GV initializer). 129 bool HasNonInstructionUser; 130 131 /// HasPHIUser - Set to true if this global has a user that is a PHI node. 132 bool HasPHIUser; 133 134 GlobalStatus() : isLoaded(false), StoredType(NotStored), StoredOnceValue(0), 135 AccessingFunction(0), HasMultipleAccessingFunctions(false), 136 HasNonInstructionUser(false), HasPHIUser(false) {} 137}; 138 139} 140 141// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used 142// by constants itself. Note that constants cannot be cyclic, so this test is 143// pretty easy to implement recursively. 144// 145static bool SafeToDestroyConstant(Constant *C) { 146 if (isa<GlobalValue>(C)) return false; 147 148 for (Value::use_iterator UI = C->use_begin(), E = C->use_end(); UI != E; ++UI) 149 if (Constant *CU = dyn_cast<Constant>(*UI)) { 150 if (!SafeToDestroyConstant(CU)) return false; 151 } else 152 return false; 153 return true; 154} 155 156 157/// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus 158/// structure. If the global has its address taken, return true to indicate we 159/// can't do anything with it. 160/// 161static bool AnalyzeGlobal(Value *V, GlobalStatus &GS, 162 SmallPtrSet<PHINode*, 16> &PHIUsers) { 163 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) 164 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) { 165 GS.HasNonInstructionUser = true; 166 167 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true; 168 169 } else if (Instruction *I = dyn_cast<Instruction>(*UI)) { 170 if (!GS.HasMultipleAccessingFunctions) { 171 Function *F = I->getParent()->getParent(); 172 if (GS.AccessingFunction == 0) 173 GS.AccessingFunction = F; 174 else if (GS.AccessingFunction != F) 175 GS.HasMultipleAccessingFunctions = true; 176 } 177 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 178 GS.isLoaded = true; 179 if (LI->isVolatile()) return true; // Don't hack on volatile loads. 180 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 181 // Don't allow a store OF the address, only stores TO the address. 182 if (SI->getOperand(0) == V) return true; 183 184 if (SI->isVolatile()) return true; // Don't hack on volatile stores. 185 186 // If this is a direct store to the global (i.e., the global is a scalar 187 // value, not an aggregate), keep more specific information about 188 // stores. 189 if (GS.StoredType != GlobalStatus::isStored) { 190 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(SI->getOperand(1))){ 191 Value *StoredVal = SI->getOperand(0); 192 if (StoredVal == GV->getInitializer()) { 193 if (GS.StoredType < GlobalStatus::isInitializerStored) 194 GS.StoredType = GlobalStatus::isInitializerStored; 195 } else if (isa<LoadInst>(StoredVal) && 196 cast<LoadInst>(StoredVal)->getOperand(0) == GV) { 197 // G = G 198 if (GS.StoredType < GlobalStatus::isInitializerStored) 199 GS.StoredType = GlobalStatus::isInitializerStored; 200 } else if (GS.StoredType < GlobalStatus::isStoredOnce) { 201 GS.StoredType = GlobalStatus::isStoredOnce; 202 GS.StoredOnceValue = StoredVal; 203 } else if (GS.StoredType == GlobalStatus::isStoredOnce && 204 GS.StoredOnceValue == StoredVal) { 205 // noop. 206 } else { 207 GS.StoredType = GlobalStatus::isStored; 208 } 209 } else { 210 GS.StoredType = GlobalStatus::isStored; 211 } 212 } 213 } else if (isa<GetElementPtrInst>(I)) { 214 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 215 } else if (isa<SelectInst>(I)) { 216 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 217 } else if (PHINode *PN = dyn_cast<PHINode>(I)) { 218 // PHI nodes we can check just like select or GEP instructions, but we 219 // have to be careful about infinite recursion. 220 if (PHIUsers.insert(PN)) // Not already visited. 221 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 222 GS.HasPHIUser = true; 223 } else if (isa<CmpInst>(I)) { 224 } else if (isa<MemTransferInst>(I)) { 225 if (I->getOperand(1) == V) 226 GS.StoredType = GlobalStatus::isStored; 227 if (I->getOperand(2) == V) 228 GS.isLoaded = true; 229 } else if (isa<MemSetInst>(I)) { 230 assert(I->getOperand(1) == V && "Memset only takes one pointer!"); 231 GS.StoredType = GlobalStatus::isStored; 232 } else { 233 return true; // Any other non-load instruction might take address! 234 } 235 } else if (Constant *C = dyn_cast<Constant>(*UI)) { 236 GS.HasNonInstructionUser = true; 237 // We might have a dead and dangling constant hanging off of here. 238 if (!SafeToDestroyConstant(C)) 239 return true; 240 } else { 241 GS.HasNonInstructionUser = true; 242 // Otherwise must be some other user. 243 return true; 244 } 245 246 return false; 247} 248 249static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx, 250 LLVMContext *Context) { 251 ConstantInt *CI = dyn_cast<ConstantInt>(Idx); 252 if (!CI) return 0; 253 unsigned IdxV = CI->getZExtValue(); 254 255 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) { 256 if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV); 257 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) { 258 if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV); 259 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) { 260 if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV); 261 } else if (isa<ConstantAggregateZero>(Agg)) { 262 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { 263 if (IdxV < STy->getNumElements()) 264 return Context->getNullValue(STy->getElementType(IdxV)); 265 } else if (const SequentialType *STy = 266 dyn_cast<SequentialType>(Agg->getType())) { 267 return Context->getNullValue(STy->getElementType()); 268 } 269 } else if (isa<UndefValue>(Agg)) { 270 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { 271 if (IdxV < STy->getNumElements()) 272 return Context->getUndef(STy->getElementType(IdxV)); 273 } else if (const SequentialType *STy = 274 dyn_cast<SequentialType>(Agg->getType())) { 275 return Context->getUndef(STy->getElementType()); 276 } 277 } 278 return 0; 279} 280 281 282/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 283/// users of the global, cleaning up the obvious ones. This is largely just a 284/// quick scan over the use list to clean up the easy and obvious cruft. This 285/// returns true if it made a change. 286static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 287 LLVMContext *Context) { 288 bool Changed = false; 289 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { 290 User *U = *UI++; 291 292 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 293 if (Init) { 294 // Replace the load with the initializer. 295 LI->replaceAllUsesWith(Init); 296 LI->eraseFromParent(); 297 Changed = true; 298 } 299 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 300 // Store must be unreachable or storing Init into the global. 301 SI->eraseFromParent(); 302 Changed = true; 303 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 304 if (CE->getOpcode() == Instruction::GetElementPtr) { 305 Constant *SubInit = 0; 306 if (Init) 307 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE, Context); 308 Changed |= CleanupConstantGlobalUsers(CE, SubInit, Context); 309 } else if (CE->getOpcode() == Instruction::BitCast && 310 isa<PointerType>(CE->getType())) { 311 // Pointer cast, delete any stores and memsets to the global. 312 Changed |= CleanupConstantGlobalUsers(CE, 0, Context); 313 } 314 315 if (CE->use_empty()) { 316 CE->destroyConstant(); 317 Changed = true; 318 } 319 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 320 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 321 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 322 // and will invalidate our notion of what Init is. 323 Constant *SubInit = 0; 324 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 325 ConstantExpr *CE = 326 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, Context)); 327 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 328 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE, Context); 329 } 330 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, Context); 331 332 if (GEP->use_empty()) { 333 GEP->eraseFromParent(); 334 Changed = true; 335 } 336 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 337 if (MI->getRawDest() == V) { 338 MI->eraseFromParent(); 339 Changed = true; 340 } 341 342 } else if (Constant *C = dyn_cast<Constant>(U)) { 343 // If we have a chain of dead constantexprs or other things dangling from 344 // us, and if they are all dead, nuke them without remorse. 345 if (SafeToDestroyConstant(C)) { 346 C->destroyConstant(); 347 // This could have invalidated UI, start over from scratch. 348 CleanupConstantGlobalUsers(V, Init, Context); 349 return true; 350 } 351 } 352 } 353 return Changed; 354} 355 356/// isSafeSROAElementUse - Return true if the specified instruction is a safe 357/// user of a derived expression from a global that we want to SROA. 358static bool isSafeSROAElementUse(Value *V) { 359 // We might have a dead and dangling constant hanging off of here. 360 if (Constant *C = dyn_cast<Constant>(V)) 361 return SafeToDestroyConstant(C); 362 363 Instruction *I = dyn_cast<Instruction>(V); 364 if (!I) return false; 365 366 // Loads are ok. 367 if (isa<LoadInst>(I)) return true; 368 369 // Stores *to* the pointer are ok. 370 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 371 return SI->getOperand(0) != V; 372 373 // Otherwise, it must be a GEP. 374 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 375 if (GEPI == 0) return false; 376 377 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 378 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 379 return false; 380 381 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end(); 382 I != E; ++I) 383 if (!isSafeSROAElementUse(*I)) 384 return false; 385 return true; 386} 387 388 389/// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 390/// Look at it and its uses and decide whether it is safe to SROA this global. 391/// 392static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 393 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 394 if (!isa<GetElementPtrInst>(U) && 395 (!isa<ConstantExpr>(U) || 396 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 397 return false; 398 399 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 400 // don't like < 3 operand CE's, and we don't like non-constant integer 401 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 402 // value of C. 403 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 404 !cast<Constant>(U->getOperand(1))->isNullValue() || 405 !isa<ConstantInt>(U->getOperand(2))) 406 return false; 407 408 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 409 ++GEPI; // Skip over the pointer index. 410 411 // If this is a use of an array allocation, do a bit more checking for sanity. 412 if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 413 uint64_t NumElements = AT->getNumElements(); 414 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 415 416 // Check to make sure that index falls within the array. If not, 417 // something funny is going on, so we won't do the optimization. 418 // 419 if (Idx->getZExtValue() >= NumElements) 420 return false; 421 422 // We cannot scalar repl this level of the array unless any array 423 // sub-indices are in-range constants. In particular, consider: 424 // A[0][i]. We cannot know that the user isn't doing invalid things like 425 // allowing i to index an out-of-range subscript that accesses A[1]. 426 // 427 // Scalar replacing *just* the outer index of the array is probably not 428 // going to be a win anyway, so just give up. 429 for (++GEPI; // Skip array index. 430 GEPI != E && (isa<ArrayType>(*GEPI) || isa<VectorType>(*GEPI)); 431 ++GEPI) { 432 uint64_t NumElements; 433 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 434 NumElements = SubArrayTy->getNumElements(); 435 else 436 NumElements = cast<VectorType>(*GEPI)->getNumElements(); 437 438 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 439 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 440 return false; 441 } 442 } 443 444 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I) 445 if (!isSafeSROAElementUse(*I)) 446 return false; 447 return true; 448} 449 450/// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 451/// is safe for us to perform this transformation. 452/// 453static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 454 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 455 UI != E; ++UI) { 456 if (!IsUserOfGlobalSafeForSRA(*UI, GV)) 457 return false; 458 } 459 return true; 460} 461 462 463/// SRAGlobal - Perform scalar replacement of aggregates on the specified global 464/// variable. This opens the door for other optimizations by exposing the 465/// behavior of the program in a more fine-grained way. We have determined that 466/// this transformation is safe already. We return the first global variable we 467/// insert so that the caller can reprocess it. 468static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD, 469 LLVMContext *Context) { 470 // Make sure this global only has simple uses that we can SRA. 471 if (!GlobalUsersSafeToSRA(GV)) 472 return 0; 473 474 assert(GV->hasLocalLinkage() && !GV->isConstant()); 475 Constant *Init = GV->getInitializer(); 476 const Type *Ty = Init->getType(); 477 478 std::vector<GlobalVariable*> NewGlobals; 479 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 480 481 // Get the alignment of the global, either explicit or target-specific. 482 unsigned StartAlignment = GV->getAlignment(); 483 if (StartAlignment == 0) 484 StartAlignment = TD.getABITypeAlignment(GV->getType()); 485 486 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 487 NewGlobals.reserve(STy->getNumElements()); 488 const StructLayout &Layout = *TD.getStructLayout(STy); 489 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 490 Constant *In = getAggregateConstantElement(Init, 491 Context->getConstantInt(Type::Int32Ty, i), 492 Context); 493 assert(In && "Couldn't get element of initializer?"); 494 GlobalVariable *NGV = new GlobalVariable(*Context, 495 STy->getElementType(i), false, 496 GlobalVariable::InternalLinkage, 497 In, GV->getName()+"."+utostr(i), 498 GV->isThreadLocal(), 499 GV->getType()->getAddressSpace()); 500 Globals.insert(GV, NGV); 501 NewGlobals.push_back(NGV); 502 503 // Calculate the known alignment of the field. If the original aggregate 504 // had 256 byte alignment for example, something might depend on that: 505 // propagate info to each field. 506 uint64_t FieldOffset = Layout.getElementOffset(i); 507 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 508 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i))) 509 NGV->setAlignment(NewAlign); 510 } 511 } else if (const SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 512 unsigned NumElements = 0; 513 if (const ArrayType *ATy = dyn_cast<ArrayType>(STy)) 514 NumElements = ATy->getNumElements(); 515 else 516 NumElements = cast<VectorType>(STy)->getNumElements(); 517 518 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 519 return 0; // It's not worth it. 520 NewGlobals.reserve(NumElements); 521 522 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType()); 523 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType()); 524 for (unsigned i = 0, e = NumElements; i != e; ++i) { 525 Constant *In = getAggregateConstantElement(Init, 526 Context->getConstantInt(Type::Int32Ty, i), 527 Context); 528 assert(In && "Couldn't get element of initializer?"); 529 530 GlobalVariable *NGV = new GlobalVariable(*Context, 531 STy->getElementType(), false, 532 GlobalVariable::InternalLinkage, 533 In, GV->getName()+"."+utostr(i), 534 GV->isThreadLocal(), 535 GV->getType()->getAddressSpace()); 536 Globals.insert(GV, NGV); 537 NewGlobals.push_back(NGV); 538 539 // Calculate the known alignment of the field. If the original aggregate 540 // had 256 byte alignment for example, something might depend on that: 541 // propagate info to each field. 542 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 543 if (NewAlign > EltAlign) 544 NGV->setAlignment(NewAlign); 545 } 546 } 547 548 if (NewGlobals.empty()) 549 return 0; 550 551 DOUT << "PERFORMING GLOBAL SRA ON: " << *GV; 552 553 Constant *NullInt = Context->getNullValue(Type::Int32Ty); 554 555 // Loop over all of the uses of the global, replacing the constantexpr geps, 556 // with smaller constantexpr geps or direct references. 557 while (!GV->use_empty()) { 558 User *GEP = GV->use_back(); 559 assert(((isa<ConstantExpr>(GEP) && 560 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 561 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 562 563 // Ignore the 1th operand, which has to be zero or else the program is quite 564 // broken (undefined). Get the 2nd operand, which is the structure or array 565 // index. 566 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 567 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 568 569 Value *NewPtr = NewGlobals[Val]; 570 571 // Form a shorter GEP if needed. 572 if (GEP->getNumOperands() > 3) { 573 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 574 SmallVector<Constant*, 8> Idxs; 575 Idxs.push_back(NullInt); 576 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 577 Idxs.push_back(CE->getOperand(i)); 578 NewPtr = Context->getConstantExprGetElementPtr(cast<Constant>(NewPtr), 579 &Idxs[0], Idxs.size()); 580 } else { 581 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 582 SmallVector<Value*, 8> Idxs; 583 Idxs.push_back(NullInt); 584 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 585 Idxs.push_back(GEPI->getOperand(i)); 586 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs.begin(), Idxs.end(), 587 GEPI->getName()+"."+utostr(Val), GEPI); 588 } 589 } 590 GEP->replaceAllUsesWith(NewPtr); 591 592 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 593 GEPI->eraseFromParent(); 594 else 595 cast<ConstantExpr>(GEP)->destroyConstant(); 596 } 597 598 // Delete the old global, now that it is dead. 599 Globals.erase(GV); 600 ++NumSRA; 601 602 // Loop over the new globals array deleting any globals that are obviously 603 // dead. This can arise due to scalarization of a structure or an array that 604 // has elements that are dead. 605 unsigned FirstGlobal = 0; 606 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 607 if (NewGlobals[i]->use_empty()) { 608 Globals.erase(NewGlobals[i]); 609 if (FirstGlobal == i) ++FirstGlobal; 610 } 611 612 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; 613} 614 615/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 616/// value will trap if the value is dynamically null. PHIs keeps track of any 617/// phi nodes we've seen to avoid reprocessing them. 618static bool AllUsesOfValueWillTrapIfNull(Value *V, 619 SmallPtrSet<PHINode*, 8> &PHIs) { 620 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) 621 if (isa<LoadInst>(*UI)) { 622 // Will trap. 623 } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) { 624 if (SI->getOperand(0) == V) { 625 //cerr << "NONTRAPPING USE: " << **UI; 626 return false; // Storing the value. 627 } 628 } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) { 629 if (CI->getOperand(0) != V) { 630 //cerr << "NONTRAPPING USE: " << **UI; 631 return false; // Not calling the ptr 632 } 633 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) { 634 if (II->getOperand(0) != V) { 635 //cerr << "NONTRAPPING USE: " << **UI; 636 return false; // Not calling the ptr 637 } 638 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(*UI)) { 639 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 640 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI)) { 641 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 642 } else if (PHINode *PN = dyn_cast<PHINode>(*UI)) { 643 // If we've already seen this phi node, ignore it, it has already been 644 // checked. 645 if (PHIs.insert(PN)) 646 return AllUsesOfValueWillTrapIfNull(PN, PHIs); 647 } else if (isa<ICmpInst>(*UI) && 648 isa<ConstantPointerNull>(UI->getOperand(1))) { 649 // Ignore setcc X, null 650 } else { 651 //cerr << "NONTRAPPING USE: " << **UI; 652 return false; 653 } 654 return true; 655} 656 657/// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 658/// from GV will trap if the loaded value is null. Note that this also permits 659/// comparisons of the loaded value against null, as a special case. 660static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) { 661 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI!=E; ++UI) 662 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 663 SmallPtrSet<PHINode*, 8> PHIs; 664 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 665 return false; 666 } else if (isa<StoreInst>(*UI)) { 667 // Ignore stores to the global. 668 } else { 669 // We don't know or understand this user, bail out. 670 //cerr << "UNKNOWN USER OF GLOBAL!: " << **UI; 671 return false; 672 } 673 674 return true; 675} 676 677static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV, 678 LLVMContext *Context) { 679 bool Changed = false; 680 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { 681 Instruction *I = cast<Instruction>(*UI++); 682 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 683 LI->setOperand(0, NewV); 684 Changed = true; 685 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 686 if (SI->getOperand(1) == V) { 687 SI->setOperand(1, NewV); 688 Changed = true; 689 } 690 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 691 if (I->getOperand(0) == V) { 692 // Calling through the pointer! Turn into a direct call, but be careful 693 // that the pointer is not also being passed as an argument. 694 I->setOperand(0, NewV); 695 Changed = true; 696 bool PassedAsArg = false; 697 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i) 698 if (I->getOperand(i) == V) { 699 PassedAsArg = true; 700 I->setOperand(i, NewV); 701 } 702 703 if (PassedAsArg) { 704 // Being passed as an argument also. Be careful to not invalidate UI! 705 UI = V->use_begin(); 706 } 707 } 708 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 709 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 710 Context->getConstantExprCast(CI->getOpcode(), 711 NewV, CI->getType()), Context); 712 if (CI->use_empty()) { 713 Changed = true; 714 CI->eraseFromParent(); 715 } 716 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 717 // Should handle GEP here. 718 SmallVector<Constant*, 8> Idxs; 719 Idxs.reserve(GEPI->getNumOperands()-1); 720 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 721 i != e; ++i) 722 if (Constant *C = dyn_cast<Constant>(*i)) 723 Idxs.push_back(C); 724 else 725 break; 726 if (Idxs.size() == GEPI->getNumOperands()-1) 727 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 728 Context->getConstantExprGetElementPtr(NewV, &Idxs[0], 729 Idxs.size()), Context); 730 if (GEPI->use_empty()) { 731 Changed = true; 732 GEPI->eraseFromParent(); 733 } 734 } 735 } 736 737 return Changed; 738} 739 740 741/// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 742/// value stored into it. If there are uses of the loaded value that would trap 743/// if the loaded value is dynamically null, then we know that they cannot be 744/// reachable with a null optimize away the load. 745static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 746 LLVMContext *Context) { 747 bool Changed = false; 748 749 // Keep track of whether we are able to remove all the uses of the global 750 // other than the store that defines it. 751 bool AllNonStoreUsesGone = true; 752 753 // Replace all uses of loads with uses of uses of the stored value. 754 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){ 755 User *GlobalUser = *GUI++; 756 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 757 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV, Context); 758 // If we were able to delete all uses of the loads 759 if (LI->use_empty()) { 760 LI->eraseFromParent(); 761 Changed = true; 762 } else { 763 AllNonStoreUsesGone = false; 764 } 765 } else if (isa<StoreInst>(GlobalUser)) { 766 // Ignore the store that stores "LV" to the global. 767 assert(GlobalUser->getOperand(1) == GV && 768 "Must be storing *to* the global"); 769 } else { 770 AllNonStoreUsesGone = false; 771 772 // If we get here we could have other crazy uses that are transitively 773 // loaded. 774 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 775 isa<ConstantExpr>(GlobalUser)) && "Only expect load and stores!"); 776 } 777 } 778 779 if (Changed) { 780 DOUT << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV; 781 ++NumGlobUses; 782 } 783 784 // If we nuked all of the loads, then none of the stores are needed either, 785 // nor is the global. 786 if (AllNonStoreUsesGone) { 787 DOUT << " *** GLOBAL NOW DEAD!\n"; 788 CleanupConstantGlobalUsers(GV, 0, Context); 789 if (GV->use_empty()) { 790 GV->eraseFromParent(); 791 ++NumDeleted; 792 } 793 Changed = true; 794 } 795 return Changed; 796} 797 798/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 799/// instructions that are foldable. 800static void ConstantPropUsersOf(Value *V, LLVMContext *Context) { 801 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) 802 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 803 if (Constant *NewC = ConstantFoldInstruction(I, Context)) { 804 I->replaceAllUsesWith(NewC); 805 806 // Advance UI to the next non-I use to avoid invalidating it! 807 // Instructions could multiply use V. 808 while (UI != E && *UI == I) 809 ++UI; 810 I->eraseFromParent(); 811 } 812} 813 814/// OptimizeGlobalAddressOfMalloc - This function takes the specified global 815/// variable, and transforms the program as if it always contained the result of 816/// the specified malloc. Because it is always the result of the specified 817/// malloc, there is no reason to actually DO the malloc. Instead, turn the 818/// malloc into a global, and any loads of GV as uses of the new global. 819static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 820 MallocInst *MI, 821 LLVMContext *Context) { 822 DOUT << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI; 823 ConstantInt *NElements = cast<ConstantInt>(MI->getArraySize()); 824 825 if (NElements->getZExtValue() != 1) { 826 // If we have an array allocation, transform it to a single element 827 // allocation to make the code below simpler. 828 Type *NewTy = Context->getArrayType(MI->getAllocatedType(), 829 NElements->getZExtValue()); 830 MallocInst *NewMI = 831 new MallocInst(*Context, NewTy, Context->getNullValue(Type::Int32Ty), 832 MI->getAlignment(), MI->getName(), MI); 833 Value* Indices[2]; 834 Indices[0] = Indices[1] = Context->getNullValue(Type::Int32Ty); 835 Value *NewGEP = GetElementPtrInst::Create(NewMI, Indices, Indices + 2, 836 NewMI->getName()+".el0", MI); 837 MI->replaceAllUsesWith(NewGEP); 838 MI->eraseFromParent(); 839 MI = NewMI; 840 } 841 842 // Create the new global variable. The contents of the malloc'd memory is 843 // undefined, so initialize with an undef value. 844 // FIXME: This new global should have the alignment returned by malloc. Code 845 // could depend on malloc returning large alignment (on the mac, 16 bytes) but 846 // this would only guarantee some lower alignment. 847 Constant *Init = Context->getUndef(MI->getAllocatedType()); 848 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 849 MI->getAllocatedType(), false, 850 GlobalValue::InternalLinkage, Init, 851 GV->getName()+".body", 852 GV, 853 GV->isThreadLocal()); 854 855 // Anything that used the malloc now uses the global directly. 856 MI->replaceAllUsesWith(NewGV); 857 858 Constant *RepValue = NewGV; 859 if (NewGV->getType() != GV->getType()->getElementType()) 860 RepValue = Context->getConstantExprBitCast(RepValue, 861 GV->getType()->getElementType()); 862 863 // If there is a comparison against null, we will insert a global bool to 864 // keep track of whether the global was initialized yet or not. 865 GlobalVariable *InitBool = 866 new GlobalVariable(*Context, Type::Int1Ty, false, 867 GlobalValue::InternalLinkage, 868 Context->getConstantIntFalse(), GV->getName()+".init", 869 GV->isThreadLocal()); 870 bool InitBoolUsed = false; 871 872 // Loop over all uses of GV, processing them in turn. 873 std::vector<StoreInst*> Stores; 874 while (!GV->use_empty()) 875 if (LoadInst *LI = dyn_cast<LoadInst>(GV->use_back())) { 876 while (!LI->use_empty()) { 877 Use &LoadUse = LI->use_begin().getUse(); 878 if (!isa<ICmpInst>(LoadUse.getUser())) 879 LoadUse = RepValue; 880 else { 881 ICmpInst *CI = cast<ICmpInst>(LoadUse.getUser()); 882 // Replace the cmp X, 0 with a use of the bool value. 883 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", CI); 884 InitBoolUsed = true; 885 switch (CI->getPredicate()) { 886 default: llvm_unreachable("Unknown ICmp Predicate!"); 887 case ICmpInst::ICMP_ULT: 888 case ICmpInst::ICMP_SLT: 889 LV = Context->getConstantIntFalse(); // X < null -> always false 890 break; 891 case ICmpInst::ICMP_ULE: 892 case ICmpInst::ICMP_SLE: 893 case ICmpInst::ICMP_EQ: 894 LV = BinaryOperator::CreateNot(*Context, LV, "notinit", CI); 895 break; 896 case ICmpInst::ICMP_NE: 897 case ICmpInst::ICMP_UGE: 898 case ICmpInst::ICMP_SGE: 899 case ICmpInst::ICMP_UGT: 900 case ICmpInst::ICMP_SGT: 901 break; // no change. 902 } 903 CI->replaceAllUsesWith(LV); 904 CI->eraseFromParent(); 905 } 906 } 907 LI->eraseFromParent(); 908 } else { 909 StoreInst *SI = cast<StoreInst>(GV->use_back()); 910 // The global is initialized when the store to it occurs. 911 new StoreInst(Context->getConstantIntTrue(), InitBool, SI); 912 SI->eraseFromParent(); 913 } 914 915 // If the initialization boolean was used, insert it, otherwise delete it. 916 if (!InitBoolUsed) { 917 while (!InitBool->use_empty()) // Delete initializations 918 cast<Instruction>(InitBool->use_back())->eraseFromParent(); 919 delete InitBool; 920 } else 921 GV->getParent()->getGlobalList().insert(GV, InitBool); 922 923 924 // Now the GV is dead, nuke it and the malloc. 925 GV->eraseFromParent(); 926 MI->eraseFromParent(); 927 928 // To further other optimizations, loop over all users of NewGV and try to 929 // constant prop them. This will promote GEP instructions with constant 930 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 931 ConstantPropUsersOf(NewGV, Context); 932 if (RepValue != NewGV) 933 ConstantPropUsersOf(RepValue, Context); 934 935 return NewGV; 936} 937 938/// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 939/// to make sure that there are no complex uses of V. We permit simple things 940/// like dereferencing the pointer, but not storing through the address, unless 941/// it is to the specified global. 942static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Instruction *V, 943 GlobalVariable *GV, 944 SmallPtrSet<PHINode*, 8> &PHIs) { 945 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ 946 Instruction *Inst = cast<Instruction>(*UI); 947 948 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 949 continue; // Fine, ignore. 950 } 951 952 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 953 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 954 return false; // Storing the pointer itself... bad. 955 continue; // Otherwise, storing through it, or storing into GV... fine. 956 } 957 958 if (isa<GetElementPtrInst>(Inst)) { 959 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 960 return false; 961 continue; 962 } 963 964 if (PHINode *PN = dyn_cast<PHINode>(Inst)) { 965 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 966 // cycles. 967 if (PHIs.insert(PN)) 968 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 969 return false; 970 continue; 971 } 972 973 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 974 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 975 return false; 976 continue; 977 } 978 979 return false; 980 } 981 return true; 982} 983 984/// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 985/// somewhere. Transform all uses of the allocation into loads from the 986/// global and uses of the resultant pointer. Further, delete the store into 987/// GV. This assumes that these value pass the 988/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 989static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 990 GlobalVariable *GV) { 991 while (!Alloc->use_empty()) { 992 Instruction *U = cast<Instruction>(*Alloc->use_begin()); 993 Instruction *InsertPt = U; 994 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 995 // If this is the store of the allocation into the global, remove it. 996 if (SI->getOperand(1) == GV) { 997 SI->eraseFromParent(); 998 continue; 999 } 1000 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1001 // Insert the load in the corresponding predecessor, not right before the 1002 // PHI. 1003 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator(); 1004 } else if (isa<BitCastInst>(U)) { 1005 // Must be bitcast between the malloc and store to initialize the global. 1006 ReplaceUsesOfMallocWithGlobal(U, GV); 1007 U->eraseFromParent(); 1008 continue; 1009 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1010 // If this is a "GEP bitcast" and the user is a store to the global, then 1011 // just process it as a bitcast. 1012 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1013 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back())) 1014 if (SI->getOperand(1) == GV) { 1015 // Must be bitcast GEP between the malloc and store to initialize 1016 // the global. 1017 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1018 GEPI->eraseFromParent(); 1019 continue; 1020 } 1021 } 1022 1023 // Insert a load from the global, and use it instead of the malloc. 1024 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1025 U->replaceUsesOfWith(Alloc, NL); 1026 } 1027} 1028 1029/// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 1030/// of a load) are simple enough to perform heap SRA on. This permits GEP's 1031/// that index through the array and struct field, icmps of null, and PHIs. 1032static bool LoadUsesSimpleEnoughForHeapSRA(Value *V, 1033 SmallPtrSet<PHINode*, 32> &LoadUsingPHIs, 1034 SmallPtrSet<PHINode*, 32> &LoadUsingPHIsPerLoad) { 1035 // We permit two users of the load: setcc comparing against the null 1036 // pointer, and a getelementptr of a specific form. 1037 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ 1038 Instruction *User = cast<Instruction>(*UI); 1039 1040 // Comparison against null is ok. 1041 if (ICmpInst *ICI = dyn_cast<ICmpInst>(User)) { 1042 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1043 return false; 1044 continue; 1045 } 1046 1047 // getelementptr is also ok, but only a simple form. 1048 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1049 // Must index into the array and into the struct. 1050 if (GEPI->getNumOperands() < 3) 1051 return false; 1052 1053 // Otherwise the GEP is ok. 1054 continue; 1055 } 1056 1057 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1058 if (!LoadUsingPHIsPerLoad.insert(PN)) 1059 // This means some phi nodes are dependent on each other. 1060 // Avoid infinite looping! 1061 return false; 1062 if (!LoadUsingPHIs.insert(PN)) 1063 // If we have already analyzed this PHI, then it is safe. 1064 continue; 1065 1066 // Make sure all uses of the PHI are simple enough to transform. 1067 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1068 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1069 return false; 1070 1071 continue; 1072 } 1073 1074 // Otherwise we don't know what this is, not ok. 1075 return false; 1076 } 1077 1078 return true; 1079} 1080 1081 1082/// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 1083/// GV are simple enough to perform HeapSRA, return true. 1084static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV, 1085 MallocInst *MI) { 1086 SmallPtrSet<PHINode*, 32> LoadUsingPHIs; 1087 SmallPtrSet<PHINode*, 32> LoadUsingPHIsPerLoad; 1088 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E; 1089 ++UI) 1090 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 1091 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1092 LoadUsingPHIsPerLoad)) 1093 return false; 1094 LoadUsingPHIsPerLoad.clear(); 1095 } 1096 1097 // If we reach here, we know that all uses of the loads and transitive uses 1098 // (through PHI nodes) are simple enough to transform. However, we don't know 1099 // that all inputs the to the PHI nodes are in the same equivalence sets. 1100 // Check to verify that all operands of the PHIs are either PHIS that can be 1101 // transformed, loads from GV, or MI itself. 1102 for (SmallPtrSet<PHINode*, 32>::iterator I = LoadUsingPHIs.begin(), 1103 E = LoadUsingPHIs.end(); I != E; ++I) { 1104 PHINode *PN = *I; 1105 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1106 Value *InVal = PN->getIncomingValue(op); 1107 1108 // PHI of the stored value itself is ok. 1109 if (InVal == MI) continue; 1110 1111 if (PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1112 // One of the PHIs in our set is (optimistically) ok. 1113 if (LoadUsingPHIs.count(InPN)) 1114 continue; 1115 return false; 1116 } 1117 1118 // Load from GV is ok. 1119 if (LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1120 if (LI->getOperand(0) == GV) 1121 continue; 1122 1123 // UNDEF? NULL? 1124 1125 // Anything else is rejected. 1126 return false; 1127 } 1128 } 1129 1130 return true; 1131} 1132 1133static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1134 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1135 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, 1136 LLVMContext *Context) { 1137 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 1138 1139 if (FieldNo >= FieldVals.size()) 1140 FieldVals.resize(FieldNo+1); 1141 1142 // If we already have this value, just reuse the previously scalarized 1143 // version. 1144 if (Value *FieldVal = FieldVals[FieldNo]) 1145 return FieldVal; 1146 1147 // Depending on what instruction this is, we have several cases. 1148 Value *Result; 1149 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1150 // This is a scalarized version of the load from the global. Just create 1151 // a new Load of the scalarized global. 1152 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1153 InsertedScalarizedValues, 1154 PHIsToRewrite, Context), 1155 LI->getName()+".f" + utostr(FieldNo), LI); 1156 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1157 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1158 // field. 1159 const StructType *ST = 1160 cast<StructType>(cast<PointerType>(PN->getType())->getElementType()); 1161 1162 Result = 1163 PHINode::Create(Context->getPointerTypeUnqual(ST->getElementType(FieldNo)), 1164 PN->getName()+".f"+utostr(FieldNo), PN); 1165 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1166 } else { 1167 llvm_unreachable("Unknown usable value"); 1168 Result = 0; 1169 } 1170 1171 return FieldVals[FieldNo] = Result; 1172} 1173 1174/// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 1175/// the load, rewrite the derived value to use the HeapSRoA'd load. 1176static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1177 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1178 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, 1179 LLVMContext *Context) { 1180 // If this is a comparison against null, handle it. 1181 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1182 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1183 // If we have a setcc of the loaded pointer, we can use a setcc of any 1184 // field. 1185 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1186 InsertedScalarizedValues, PHIsToRewrite, 1187 Context); 1188 1189 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1190 Context->getNullValue(NPtr->getType()), 1191 SCI->getName()); 1192 SCI->replaceAllUsesWith(New); 1193 SCI->eraseFromParent(); 1194 return; 1195 } 1196 1197 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1198 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1199 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1200 && "Unexpected GEPI!"); 1201 1202 // Load the pointer for this field. 1203 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1204 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1205 InsertedScalarizedValues, PHIsToRewrite, 1206 Context); 1207 1208 // Create the new GEP idx vector. 1209 SmallVector<Value*, 8> GEPIdx; 1210 GEPIdx.push_back(GEPI->getOperand(1)); 1211 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1212 1213 Value *NGEPI = GetElementPtrInst::Create(NewPtr, 1214 GEPIdx.begin(), GEPIdx.end(), 1215 GEPI->getName(), GEPI); 1216 GEPI->replaceAllUsesWith(NGEPI); 1217 GEPI->eraseFromParent(); 1218 return; 1219 } 1220 1221 // Recursively transform the users of PHI nodes. This will lazily create the 1222 // PHIs that are needed for individual elements. Keep track of what PHIs we 1223 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1224 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1225 // already been seen first by another load, so its uses have already been 1226 // processed. 1227 PHINode *PN = cast<PHINode>(LoadUser); 1228 bool Inserted; 1229 DenseMap<Value*, std::vector<Value*> >::iterator InsertPos; 1230 tie(InsertPos, Inserted) = 1231 InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>())); 1232 if (!Inserted) return; 1233 1234 // If this is the first time we've seen this PHI, recursively process all 1235 // users. 1236 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) { 1237 Instruction *User = cast<Instruction>(*UI++); 1238 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite, 1239 Context); 1240 } 1241} 1242 1243/// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 1244/// is a value loaded from the global. Eliminate all uses of Ptr, making them 1245/// use FieldGlobals instead. All uses of loaded values satisfy 1246/// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1247static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1248 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1249 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, 1250 LLVMContext *Context) { 1251 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); 1252 UI != E; ) { 1253 Instruction *User = cast<Instruction>(*UI++); 1254 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite, 1255 Context); 1256 } 1257 1258 if (Load->use_empty()) { 1259 Load->eraseFromParent(); 1260 InsertedScalarizedValues.erase(Load); 1261 } 1262} 1263 1264/// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break 1265/// it up into multiple allocations of arrays of the fields. 1266static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI, 1267 LLVMContext *Context){ 1268 DOUT << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI; 1269 const StructType *STy = cast<StructType>(MI->getAllocatedType()); 1270 1271 // There is guaranteed to be at least one use of the malloc (storing 1272 // it into GV). If there are other uses, change them to be uses of 1273 // the global to simplify later code. This also deletes the store 1274 // into GV. 1275 ReplaceUsesOfMallocWithGlobal(MI, GV); 1276 1277 // Okay, at this point, there are no users of the malloc. Insert N 1278 // new mallocs at the same place as MI, and N globals. 1279 std::vector<Value*> FieldGlobals; 1280 std::vector<MallocInst*> FieldMallocs; 1281 1282 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1283 const Type *FieldTy = STy->getElementType(FieldNo); 1284 const Type *PFieldTy = Context->getPointerTypeUnqual(FieldTy); 1285 1286 GlobalVariable *NGV = 1287 new GlobalVariable(*GV->getParent(), 1288 PFieldTy, false, GlobalValue::InternalLinkage, 1289 Context->getNullValue(PFieldTy), 1290 GV->getName() + ".f" + utostr(FieldNo), GV, 1291 GV->isThreadLocal()); 1292 FieldGlobals.push_back(NGV); 1293 1294 MallocInst *NMI = new MallocInst(*Context, FieldTy, MI->getArraySize(), 1295 MI->getName() + ".f" + utostr(FieldNo),MI); 1296 FieldMallocs.push_back(NMI); 1297 new StoreInst(NMI, NGV, MI); 1298 } 1299 1300 // The tricky aspect of this transformation is handling the case when malloc 1301 // fails. In the original code, malloc failing would set the result pointer 1302 // of malloc to null. In this case, some mallocs could succeed and others 1303 // could fail. As such, we emit code that looks like this: 1304 // F0 = malloc(field0) 1305 // F1 = malloc(field1) 1306 // F2 = malloc(field2) 1307 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1308 // if (F0) { free(F0); F0 = 0; } 1309 // if (F1) { free(F1); F1 = 0; } 1310 // if (F2) { free(F2); F2 = 0; } 1311 // } 1312 Value *RunningOr = 0; 1313 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1314 Value *Cond = new ICmpInst(MI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1315 Context->getNullValue(FieldMallocs[i]->getType()), 1316 "isnull"); 1317 if (!RunningOr) 1318 RunningOr = Cond; // First seteq 1319 else 1320 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", MI); 1321 } 1322 1323 // Split the basic block at the old malloc. 1324 BasicBlock *OrigBB = MI->getParent(); 1325 BasicBlock *ContBB = OrigBB->splitBasicBlock(MI, "malloc_cont"); 1326 1327 // Create the block to check the first condition. Put all these blocks at the 1328 // end of the function as they are unlikely to be executed. 1329 BasicBlock *NullPtrBlock = BasicBlock::Create("malloc_ret_null", 1330 OrigBB->getParent()); 1331 1332 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1333 // branch on RunningOr. 1334 OrigBB->getTerminator()->eraseFromParent(); 1335 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1336 1337 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1338 // pointer, because some may be null while others are not. 1339 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1340 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1341 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1342 Context->getNullValue(GVVal->getType()), 1343 "tmp"); 1344 BasicBlock *FreeBlock = BasicBlock::Create("free_it", OrigBB->getParent()); 1345 BasicBlock *NextBlock = BasicBlock::Create("next", OrigBB->getParent()); 1346 BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock); 1347 1348 // Fill in FreeBlock. 1349 new FreeInst(GVVal, FreeBlock); 1350 new StoreInst(Context->getNullValue(GVVal->getType()), FieldGlobals[i], 1351 FreeBlock); 1352 BranchInst::Create(NextBlock, FreeBlock); 1353 1354 NullPtrBlock = NextBlock; 1355 } 1356 1357 BranchInst::Create(ContBB, NullPtrBlock); 1358 1359 // MI is no longer needed, remove it. 1360 MI->eraseFromParent(); 1361 1362 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 1363 /// update all uses of the load, keep track of what scalarized loads are 1364 /// inserted for a given load. 1365 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 1366 InsertedScalarizedValues[GV] = FieldGlobals; 1367 1368 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 1369 1370 // Okay, the malloc site is completely handled. All of the uses of GV are now 1371 // loads, and all uses of those loads are simple. Rewrite them to use loads 1372 // of the per-field globals instead. 1373 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { 1374 Instruction *User = cast<Instruction>(*UI++); 1375 1376 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1377 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite, 1378 Context); 1379 continue; 1380 } 1381 1382 // Must be a store of null. 1383 StoreInst *SI = cast<StoreInst>(User); 1384 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1385 "Unexpected heap-sra user!"); 1386 1387 // Insert a store of null into each global. 1388 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1389 const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 1390 Constant *Null = Context->getNullValue(PT->getElementType()); 1391 new StoreInst(Null, FieldGlobals[i], SI); 1392 } 1393 // Erase the original store. 1394 SI->eraseFromParent(); 1395 } 1396 1397 // While we have PHIs that are interesting to rewrite, do it. 1398 while (!PHIsToRewrite.empty()) { 1399 PHINode *PN = PHIsToRewrite.back().first; 1400 unsigned FieldNo = PHIsToRewrite.back().second; 1401 PHIsToRewrite.pop_back(); 1402 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1403 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1404 1405 // Add all the incoming values. This can materialize more phis. 1406 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1407 Value *InVal = PN->getIncomingValue(i); 1408 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1409 PHIsToRewrite, Context); 1410 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1411 } 1412 } 1413 1414 // Drop all inter-phi links and any loads that made it this far. 1415 for (DenseMap<Value*, std::vector<Value*> >::iterator 1416 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1417 I != E; ++I) { 1418 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1419 PN->dropAllReferences(); 1420 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1421 LI->dropAllReferences(); 1422 } 1423 1424 // Delete all the phis and loads now that inter-references are dead. 1425 for (DenseMap<Value*, std::vector<Value*> >::iterator 1426 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1427 I != E; ++I) { 1428 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1429 PN->eraseFromParent(); 1430 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1431 LI->eraseFromParent(); 1432 } 1433 1434 // The old global is now dead, remove it. 1435 GV->eraseFromParent(); 1436 1437 ++NumHeapSRA; 1438 return cast<GlobalVariable>(FieldGlobals[0]); 1439} 1440 1441/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 1442/// pointer global variable with a single value stored it that is a malloc or 1443/// cast of malloc. 1444static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 1445 MallocInst *MI, 1446 Module::global_iterator &GVI, 1447 TargetData &TD, 1448 LLVMContext *Context) { 1449 // If this is a malloc of an abstract type, don't touch it. 1450 if (!MI->getAllocatedType()->isSized()) 1451 return false; 1452 1453 // We can't optimize this global unless all uses of it are *known* to be 1454 // of the malloc value, not of the null initializer value (consider a use 1455 // that compares the global's value against zero to see if the malloc has 1456 // been reached). To do this, we check to see if all uses of the global 1457 // would trap if the global were null: this proves that they must all 1458 // happen after the malloc. 1459 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1460 return false; 1461 1462 // We can't optimize this if the malloc itself is used in a complex way, 1463 // for example, being stored into multiple globals. This allows the 1464 // malloc to be stored into the specified global, loaded setcc'd, and 1465 // GEP'd. These are all things we could transform to using the global 1466 // for. 1467 { 1468 SmallPtrSet<PHINode*, 8> PHIs; 1469 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(MI, GV, PHIs)) 1470 return false; 1471 } 1472 1473 1474 // If we have a global that is only initialized with a fixed size malloc, 1475 // transform the program to use global memory instead of malloc'd memory. 1476 // This eliminates dynamic allocation, avoids an indirection accessing the 1477 // data, and exposes the resultant global to further GlobalOpt. 1478 if (ConstantInt *NElements = dyn_cast<ConstantInt>(MI->getArraySize())) { 1479 // Restrict this transformation to only working on small allocations 1480 // (2048 bytes currently), as we don't want to introduce a 16M global or 1481 // something. 1482 if (NElements->getZExtValue()* 1483 TD.getTypeAllocSize(MI->getAllocatedType()) < 2048) { 1484 GVI = OptimizeGlobalAddressOfMalloc(GV, MI, Context); 1485 return true; 1486 } 1487 } 1488 1489 // If the allocation is an array of structures, consider transforming this 1490 // into multiple malloc'd arrays, one for each field. This is basically 1491 // SRoA for malloc'd memory. 1492 const Type *AllocTy = MI->getAllocatedType(); 1493 1494 // If this is an allocation of a fixed size array of structs, analyze as a 1495 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1496 if (!MI->isArrayAllocation()) 1497 if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1498 AllocTy = AT->getElementType(); 1499 1500 if (const StructType *AllocSTy = dyn_cast<StructType>(AllocTy)) { 1501 // This the structure has an unreasonable number of fields, leave it 1502 // alone. 1503 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1504 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, MI)) { 1505 1506 // If this is a fixed size array, transform the Malloc to be an alloc of 1507 // structs. malloc [100 x struct],1 -> malloc struct, 100 1508 if (const ArrayType *AT = dyn_cast<ArrayType>(MI->getAllocatedType())) { 1509 MallocInst *NewMI = 1510 new MallocInst(*Context, AllocSTy, 1511 Context->getConstantInt(Type::Int32Ty, AT->getNumElements()), 1512 "", MI); 1513 NewMI->takeName(MI); 1514 Value *Cast = new BitCastInst(NewMI, MI->getType(), "tmp", MI); 1515 MI->replaceAllUsesWith(Cast); 1516 MI->eraseFromParent(); 1517 MI = NewMI; 1518 } 1519 1520 GVI = PerformHeapAllocSRoA(GV, MI, Context); 1521 return true; 1522 } 1523 } 1524 1525 return false; 1526} 1527 1528// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 1529// that only one value (besides its initializer) is ever stored to the global. 1530static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1531 Module::global_iterator &GVI, 1532 TargetData &TD, LLVMContext *Context) { 1533 // Ignore no-op GEPs and bitcasts. 1534 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1535 1536 // If we are dealing with a pointer global that is initialized to null and 1537 // only has one (non-null) value stored into it, then we can optimize any 1538 // users of the loaded value (often calls and loads) that would trap if the 1539 // value was null. 1540 if (isa<PointerType>(GV->getInitializer()->getType()) && 1541 GV->getInitializer()->isNullValue()) { 1542 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1543 if (GV->getInitializer()->getType() != SOVC->getType()) 1544 SOVC = 1545 Context->getConstantExprBitCast(SOVC, GV->getInitializer()->getType()); 1546 1547 // Optimize away any trapping uses of the loaded value. 1548 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, Context)) 1549 return true; 1550 } else if (MallocInst *MI = dyn_cast<MallocInst>(StoredOnceVal)) { 1551 if (TryToOptimizeStoreOfMallocToGlobal(GV, MI, GVI, TD, Context)) 1552 return true; 1553 } 1554 } 1555 1556 return false; 1557} 1558 1559/// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 1560/// two values ever stored into GV are its initializer and OtherVal. See if we 1561/// can shrink the global into a boolean and select between the two values 1562/// whenever it is used. This exposes the values to other scalar optimizations. 1563static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal, 1564 LLVMContext *Context) { 1565 const Type *GVElType = GV->getType()->getElementType(); 1566 1567 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1568 // an FP value, pointer or vector, don't do this optimization because a select 1569 // between them is very expensive and unlikely to lead to later 1570 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1571 // where v1 and v2 both require constant pool loads, a big loss. 1572 if (GVElType == Type::Int1Ty || GVElType->isFloatingPoint() || 1573 isa<PointerType>(GVElType) || isa<VectorType>(GVElType)) 1574 return false; 1575 1576 // Walk the use list of the global seeing if all the uses are load or store. 1577 // If there is anything else, bail out. 1578 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I) 1579 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 1580 return false; 1581 1582 DOUT << " *** SHRINKING TO BOOL: " << *GV; 1583 1584 // Create the new global, initializing it to false. 1585 GlobalVariable *NewGV = new GlobalVariable(*Context, Type::Int1Ty, false, 1586 GlobalValue::InternalLinkage, Context->getConstantIntFalse(), 1587 GV->getName()+".b", 1588 GV->isThreadLocal()); 1589 GV->getParent()->getGlobalList().insert(GV, NewGV); 1590 1591 Constant *InitVal = GV->getInitializer(); 1592 assert(InitVal->getType() != Type::Int1Ty && "No reason to shrink to bool!"); 1593 1594 // If initialized to zero and storing one into the global, we can use a cast 1595 // instead of a select to synthesize the desired value. 1596 bool IsOneZero = false; 1597 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 1598 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1599 1600 while (!GV->use_empty()) { 1601 Instruction *UI = cast<Instruction>(GV->use_back()); 1602 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1603 // Change the store into a boolean store. 1604 bool StoringOther = SI->getOperand(0) == OtherVal; 1605 // Only do this if we weren't storing a loaded value. 1606 Value *StoreVal; 1607 if (StoringOther || SI->getOperand(0) == InitVal) 1608 StoreVal = Context->getConstantInt(Type::Int1Ty, StoringOther); 1609 else { 1610 // Otherwise, we are storing a previously loaded copy. To do this, 1611 // change the copy from copying the original value to just copying the 1612 // bool. 1613 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1614 1615 // If we're already replaced the input, StoredVal will be a cast or 1616 // select instruction. If not, it will be a load of the original 1617 // global. 1618 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1619 assert(LI->getOperand(0) == GV && "Not a copy!"); 1620 // Insert a new load, to preserve the saved value. 1621 StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI); 1622 } else { 1623 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1624 "This is not a form that we understand!"); 1625 StoreVal = StoredVal->getOperand(0); 1626 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1627 } 1628 } 1629 new StoreInst(StoreVal, NewGV, SI); 1630 } else { 1631 // Change the load into a load of bool then a select. 1632 LoadInst *LI = cast<LoadInst>(UI); 1633 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI); 1634 Value *NSI; 1635 if (IsOneZero) 1636 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1637 else 1638 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1639 NSI->takeName(LI); 1640 LI->replaceAllUsesWith(NSI); 1641 } 1642 UI->eraseFromParent(); 1643 } 1644 1645 GV->eraseFromParent(); 1646 return true; 1647} 1648 1649 1650/// ProcessInternalGlobal - Analyze the specified global variable and optimize 1651/// it if possible. If we make a change, return true. 1652bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 1653 Module::global_iterator &GVI) { 1654 SmallPtrSet<PHINode*, 16> PHIUsers; 1655 GlobalStatus GS; 1656 GV->removeDeadConstantUsers(); 1657 1658 if (GV->use_empty()) { 1659 DOUT << "GLOBAL DEAD: " << *GV; 1660 GV->eraseFromParent(); 1661 ++NumDeleted; 1662 return true; 1663 } 1664 1665 if (!AnalyzeGlobal(GV, GS, PHIUsers)) { 1666#if 0 1667 cerr << "Global: " << *GV; 1668 cerr << " isLoaded = " << GS.isLoaded << "\n"; 1669 cerr << " StoredType = "; 1670 switch (GS.StoredType) { 1671 case GlobalStatus::NotStored: cerr << "NEVER STORED\n"; break; 1672 case GlobalStatus::isInitializerStored: cerr << "INIT STORED\n"; break; 1673 case GlobalStatus::isStoredOnce: cerr << "STORED ONCE\n"; break; 1674 case GlobalStatus::isStored: cerr << "stored\n"; break; 1675 } 1676 if (GS.StoredType == GlobalStatus::isStoredOnce && GS.StoredOnceValue) 1677 cerr << " StoredOnceValue = " << *GS.StoredOnceValue << "\n"; 1678 if (GS.AccessingFunction && !GS.HasMultipleAccessingFunctions) 1679 cerr << " AccessingFunction = " << GS.AccessingFunction->getName() 1680 << "\n"; 1681 cerr << " HasMultipleAccessingFunctions = " 1682 << GS.HasMultipleAccessingFunctions << "\n"; 1683 cerr << " HasNonInstructionUser = " << GS.HasNonInstructionUser<<"\n"; 1684 cerr << "\n"; 1685#endif 1686 1687 // If this is a first class global and has only one accessing function 1688 // and this function is main (which we know is not recursive we can make 1689 // this global a local variable) we replace the global with a local alloca 1690 // in this function. 1691 // 1692 // NOTE: It doesn't make sense to promote non single-value types since we 1693 // are just replacing static memory to stack memory. 1694 // 1695 // If the global is in different address space, don't bring it to stack. 1696 if (!GS.HasMultipleAccessingFunctions && 1697 GS.AccessingFunction && !GS.HasNonInstructionUser && 1698 GV->getType()->getElementType()->isSingleValueType() && 1699 GS.AccessingFunction->getName() == "main" && 1700 GS.AccessingFunction->hasExternalLinkage() && 1701 GV->getType()->getAddressSpace() == 0) { 1702 DOUT << "LOCALIZING GLOBAL: " << *GV; 1703 Instruction* FirstI = GS.AccessingFunction->getEntryBlock().begin(); 1704 const Type* ElemTy = GV->getType()->getElementType(); 1705 // FIXME: Pass Global's alignment when globals have alignment 1706 AllocaInst* Alloca = new AllocaInst(*Context, ElemTy, NULL, 1707 GV->getName(), FirstI); 1708 if (!isa<UndefValue>(GV->getInitializer())) 1709 new StoreInst(GV->getInitializer(), Alloca, FirstI); 1710 1711 GV->replaceAllUsesWith(Alloca); 1712 GV->eraseFromParent(); 1713 ++NumLocalized; 1714 return true; 1715 } 1716 1717 // If the global is never loaded (but may be stored to), it is dead. 1718 // Delete it now. 1719 if (!GS.isLoaded) { 1720 DOUT << "GLOBAL NEVER LOADED: " << *GV; 1721 1722 // Delete any stores we can find to the global. We may not be able to 1723 // make it completely dead though. 1724 bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), 1725 Context); 1726 1727 // If the global is dead now, delete it. 1728 if (GV->use_empty()) { 1729 GV->eraseFromParent(); 1730 ++NumDeleted; 1731 Changed = true; 1732 } 1733 return Changed; 1734 1735 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) { 1736 DOUT << "MARKING CONSTANT: " << *GV; 1737 GV->setConstant(true); 1738 1739 // Clean up any obviously simplifiable users now. 1740 CleanupConstantGlobalUsers(GV, GV->getInitializer(), Context); 1741 1742 // If the global is dead now, just nuke it. 1743 if (GV->use_empty()) { 1744 DOUT << " *** Marking constant allowed us to simplify " 1745 << "all users and delete global!\n"; 1746 GV->eraseFromParent(); 1747 ++NumDeleted; 1748 } 1749 1750 ++NumMarked; 1751 return true; 1752 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 1753 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, 1754 getAnalysis<TargetData>(), 1755 Context)) { 1756 GVI = FirstNewGV; // Don't skip the newly produced globals! 1757 return true; 1758 } 1759 } else if (GS.StoredType == GlobalStatus::isStoredOnce) { 1760 // If the initial value for the global was an undef value, and if only 1761 // one other value was stored into it, we can just change the 1762 // initializer to be the stored value, then delete all stores to the 1763 // global. This allows us to mark it constant. 1764 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1765 if (isa<UndefValue>(GV->getInitializer())) { 1766 // Change the initial value here. 1767 GV->setInitializer(SOVConstant); 1768 1769 // Clean up any obviously simplifiable users now. 1770 CleanupConstantGlobalUsers(GV, GV->getInitializer(), Context); 1771 1772 if (GV->use_empty()) { 1773 DOUT << " *** Substituting initializer allowed us to " 1774 << "simplify all users and delete global!\n"; 1775 GV->eraseFromParent(); 1776 ++NumDeleted; 1777 } else { 1778 GVI = GV; 1779 } 1780 ++NumSubstitute; 1781 return true; 1782 } 1783 1784 // Try to optimize globals based on the knowledge that only one value 1785 // (besides its initializer) is ever stored to the global. 1786 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI, 1787 getAnalysis<TargetData>(), Context)) 1788 return true; 1789 1790 // Otherwise, if the global was not a boolean, we can shrink it to be a 1791 // boolean. 1792 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1793 if (TryToShrinkGlobalToBoolean(GV, SOVConstant, Context)) { 1794 ++NumShrunkToBool; 1795 return true; 1796 } 1797 } 1798 } 1799 return false; 1800} 1801 1802/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 1803/// function, changing them to FastCC. 1804static void ChangeCalleesToFastCall(Function *F) { 1805 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1806 CallSite User(cast<Instruction>(*UI)); 1807 User.setCallingConv(CallingConv::Fast); 1808 } 1809} 1810 1811static AttrListPtr StripNest(const AttrListPtr &Attrs) { 1812 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1813 if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0) 1814 continue; 1815 1816 // There can be only one. 1817 return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest); 1818 } 1819 1820 return Attrs; 1821} 1822 1823static void RemoveNestAttribute(Function *F) { 1824 F->setAttributes(StripNest(F->getAttributes())); 1825 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1826 CallSite User(cast<Instruction>(*UI)); 1827 User.setAttributes(StripNest(User.getAttributes())); 1828 } 1829} 1830 1831bool GlobalOpt::OptimizeFunctions(Module &M) { 1832 bool Changed = false; 1833 // Optimize functions. 1834 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 1835 Function *F = FI++; 1836 // Functions without names cannot be referenced outside this module. 1837 if (!F->hasName() && !F->isDeclaration()) 1838 F->setLinkage(GlobalValue::InternalLinkage); 1839 F->removeDeadConstantUsers(); 1840 if (F->use_empty() && (F->hasLocalLinkage() || 1841 F->hasLinkOnceLinkage())) { 1842 M.getFunctionList().erase(F); 1843 Changed = true; 1844 ++NumFnDeleted; 1845 } else if (F->hasLocalLinkage()) { 1846 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() && 1847 !F->hasAddressTaken()) { 1848 // If this function has C calling conventions, is not a varargs 1849 // function, and is only called directly, promote it to use the Fast 1850 // calling convention. 1851 F->setCallingConv(CallingConv::Fast); 1852 ChangeCalleesToFastCall(F); 1853 ++NumFastCallFns; 1854 Changed = true; 1855 } 1856 1857 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 1858 !F->hasAddressTaken()) { 1859 // The function is not used by a trampoline intrinsic, so it is safe 1860 // to remove the 'nest' attribute. 1861 RemoveNestAttribute(F); 1862 ++NumNestRemoved; 1863 Changed = true; 1864 } 1865 } 1866 } 1867 return Changed; 1868} 1869 1870bool GlobalOpt::OptimizeGlobalVars(Module &M) { 1871 bool Changed = false; 1872 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 1873 GVI != E; ) { 1874 GlobalVariable *GV = GVI++; 1875 // Global variables without names cannot be referenced outside this module. 1876 if (!GV->hasName() && !GV->isDeclaration()) 1877 GV->setLinkage(GlobalValue::InternalLinkage); 1878 if (!GV->isConstant() && GV->hasLocalLinkage() && 1879 GV->hasInitializer()) 1880 Changed |= ProcessInternalGlobal(GV, GVI); 1881 } 1882 return Changed; 1883} 1884 1885/// FindGlobalCtors - Find the llvm.globalctors list, verifying that all 1886/// initializers have an init priority of 65535. 1887GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { 1888 for (Module::global_iterator I = M.global_begin(), E = M.global_end(); 1889 I != E; ++I) 1890 if (I->getName() == "llvm.global_ctors") { 1891 // Found it, verify it's an array of { int, void()* }. 1892 const ArrayType *ATy =dyn_cast<ArrayType>(I->getType()->getElementType()); 1893 if (!ATy) return 0; 1894 const StructType *STy = dyn_cast<StructType>(ATy->getElementType()); 1895 if (!STy || STy->getNumElements() != 2 || 1896 STy->getElementType(0) != Type::Int32Ty) return 0; 1897 const PointerType *PFTy = dyn_cast<PointerType>(STy->getElementType(1)); 1898 if (!PFTy) return 0; 1899 const FunctionType *FTy = dyn_cast<FunctionType>(PFTy->getElementType()); 1900 if (!FTy || FTy->getReturnType() != Type::VoidTy || FTy->isVarArg() || 1901 FTy->getNumParams() != 0) 1902 return 0; 1903 1904 // Verify that the initializer is simple enough for us to handle. 1905 if (!I->hasInitializer()) return 0; 1906 ConstantArray *CA = dyn_cast<ConstantArray>(I->getInitializer()); 1907 if (!CA) return 0; 1908 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) 1909 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(*i)) { 1910 if (isa<ConstantPointerNull>(CS->getOperand(1))) 1911 continue; 1912 1913 // Must have a function or null ptr. 1914 if (!isa<Function>(CS->getOperand(1))) 1915 return 0; 1916 1917 // Init priority must be standard. 1918 ConstantInt *CI = dyn_cast<ConstantInt>(CS->getOperand(0)); 1919 if (!CI || CI->getZExtValue() != 65535) 1920 return 0; 1921 } else { 1922 return 0; 1923 } 1924 1925 return I; 1926 } 1927 return 0; 1928} 1929 1930/// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, 1931/// return a list of the functions and null terminator as a vector. 1932static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { 1933 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1934 std::vector<Function*> Result; 1935 Result.reserve(CA->getNumOperands()); 1936 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1937 ConstantStruct *CS = cast<ConstantStruct>(*i); 1938 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); 1939 } 1940 return Result; 1941} 1942 1943/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the 1944/// specified array, returning the new global to use. 1945static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, 1946 const std::vector<Function*> &Ctors, 1947 LLVMContext *Context) { 1948 // If we made a change, reassemble the initializer list. 1949 std::vector<Constant*> CSVals; 1950 CSVals.push_back(Context->getConstantInt(Type::Int32Ty, 65535)); 1951 CSVals.push_back(0); 1952 1953 // Create the new init list. 1954 std::vector<Constant*> CAList; 1955 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { 1956 if (Ctors[i]) { 1957 CSVals[1] = Ctors[i]; 1958 } else { 1959 const Type *FTy = Context->getFunctionType(Type::VoidTy, false); 1960 const PointerType *PFTy = Context->getPointerTypeUnqual(FTy); 1961 CSVals[1] = Context->getNullValue(PFTy); 1962 CSVals[0] = Context->getConstantInt(Type::Int32Ty, 2147483647); 1963 } 1964 CAList.push_back(Context->getConstantStruct(CSVals)); 1965 } 1966 1967 // Create the array initializer. 1968 const Type *StructTy = 1969 cast<ArrayType>(GCL->getType()->getElementType())->getElementType(); 1970 Constant *CA = Context->getConstantArray(ArrayType::get(StructTy, 1971 CAList.size()), CAList); 1972 1973 // If we didn't change the number of elements, don't create a new GV. 1974 if (CA->getType() == GCL->getInitializer()->getType()) { 1975 GCL->setInitializer(CA); 1976 return GCL; 1977 } 1978 1979 // Create the new global and insert it next to the existing list. 1980 GlobalVariable *NGV = new GlobalVariable(*Context, CA->getType(), 1981 GCL->isConstant(), 1982 GCL->getLinkage(), CA, "", 1983 GCL->isThreadLocal()); 1984 GCL->getParent()->getGlobalList().insert(GCL, NGV); 1985 NGV->takeName(GCL); 1986 1987 // Nuke the old list, replacing any uses with the new one. 1988 if (!GCL->use_empty()) { 1989 Constant *V = NGV; 1990 if (V->getType() != GCL->getType()) 1991 V = Context->getConstantExprBitCast(V, GCL->getType()); 1992 GCL->replaceAllUsesWith(V); 1993 } 1994 GCL->eraseFromParent(); 1995 1996 if (Ctors.size()) 1997 return NGV; 1998 else 1999 return 0; 2000} 2001 2002 2003static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, 2004 Value *V) { 2005 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 2006 Constant *R = ComputedValues[V]; 2007 assert(R && "Reference to an uncomputed value!"); 2008 return R; 2009} 2010 2011/// isSimpleEnoughPointerToCommit - Return true if this constant is simple 2012/// enough for us to understand. In particular, if it is a cast of something, 2013/// we punt. We basically just support direct accesses to globals and GEP's of 2014/// globals. This should be kept up to date with CommitValueTo. 2015static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext *Context) { 2016 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { 2017 if (!GV->hasExternalLinkage() && !GV->hasLocalLinkage()) 2018 return false; // do not allow weak/linkonce/dllimport/dllexport linkage. 2019 return !GV->isDeclaration(); // reject external globals. 2020 } 2021 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2022 // Handle a constantexpr gep. 2023 if (CE->getOpcode() == Instruction::GetElementPtr && 2024 isa<GlobalVariable>(CE->getOperand(0))) { 2025 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2026 if (!GV->hasExternalLinkage() && !GV->hasLocalLinkage()) 2027 return false; // do not allow weak/linkonce/dllimport/dllexport linkage. 2028 return GV->hasInitializer() && 2029 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE, 2030 Context); 2031 } 2032 return false; 2033} 2034 2035/// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 2036/// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 2037/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 2038static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2039 ConstantExpr *Addr, unsigned OpNo, 2040 LLVMContext *Context) { 2041 // Base case of the recursion. 2042 if (OpNo == Addr->getNumOperands()) { 2043 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2044 return Val; 2045 } 2046 2047 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 2048 std::vector<Constant*> Elts; 2049 2050 // Break up the constant into its elements. 2051 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 2052 for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i) 2053 Elts.push_back(cast<Constant>(*i)); 2054 } else if (isa<ConstantAggregateZero>(Init)) { 2055 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2056 Elts.push_back(Context->getNullValue(STy->getElementType(i))); 2057 } else if (isa<UndefValue>(Init)) { 2058 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2059 Elts.push_back(Context->getUndef(STy->getElementType(i))); 2060 } else { 2061 llvm_unreachable("This code is out of sync with " 2062 " ConstantFoldLoadThroughGEPConstantExpr"); 2063 } 2064 2065 // Replace the element that we are supposed to. 2066 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2067 unsigned Idx = CU->getZExtValue(); 2068 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2069 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1, Context); 2070 2071 // Return the modified struct. 2072 return Context->getConstantStruct(&Elts[0], Elts.size(), STy->isPacked()); 2073 } else { 2074 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2075 const ArrayType *ATy = cast<ArrayType>(Init->getType()); 2076 2077 // Break up the array into elements. 2078 std::vector<Constant*> Elts; 2079 if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 2080 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) 2081 Elts.push_back(cast<Constant>(*i)); 2082 } else if (isa<ConstantAggregateZero>(Init)) { 2083 Constant *Elt = Context->getNullValue(ATy->getElementType()); 2084 Elts.assign(ATy->getNumElements(), Elt); 2085 } else if (isa<UndefValue>(Init)) { 2086 Constant *Elt = Context->getUndef(ATy->getElementType()); 2087 Elts.assign(ATy->getNumElements(), Elt); 2088 } else { 2089 llvm_unreachable("This code is out of sync with " 2090 " ConstantFoldLoadThroughGEPConstantExpr"); 2091 } 2092 2093 assert(CI->getZExtValue() < ATy->getNumElements()); 2094 Elts[CI->getZExtValue()] = 2095 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1, Context); 2096 return Context->getConstantArray(ATy, Elts); 2097 } 2098} 2099 2100/// CommitValueTo - We have decided that Addr (which satisfies the predicate 2101/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2102static void CommitValueTo(Constant *Val, Constant *Addr, 2103 LLVMContext *Context) { 2104 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2105 assert(GV->hasInitializer()); 2106 GV->setInitializer(Val); 2107 return; 2108 } 2109 2110 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2111 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2112 2113 Constant *Init = GV->getInitializer(); 2114 Init = EvaluateStoreInto(Init, Val, CE, 2, Context); 2115 GV->setInitializer(Init); 2116} 2117 2118/// ComputeLoadResult - Return the value that would be computed by a load from 2119/// P after the stores reflected by 'memory' have been performed. If we can't 2120/// decide, return null. 2121static Constant *ComputeLoadResult(Constant *P, 2122 const DenseMap<Constant*, Constant*> &Memory, 2123 LLVMContext *Context) { 2124 // If this memory location has been recently stored, use the stored value: it 2125 // is the most up-to-date. 2126 DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P); 2127 if (I != Memory.end()) return I->second; 2128 2129 // Access it. 2130 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 2131 if (GV->hasInitializer()) 2132 return GV->getInitializer(); 2133 return 0; 2134 } 2135 2136 // Handle a constantexpr getelementptr. 2137 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 2138 if (CE->getOpcode() == Instruction::GetElementPtr && 2139 isa<GlobalVariable>(CE->getOperand(0))) { 2140 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2141 if (GV->hasInitializer()) 2142 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE, 2143 Context); 2144 } 2145 2146 return 0; // don't know how to evaluate. 2147} 2148 2149/// EvaluateFunction - Evaluate a call to function F, returning true if 2150/// successful, false if we can't evaluate it. ActualArgs contains the formal 2151/// arguments for the function. 2152static bool EvaluateFunction(Function *F, Constant *&RetVal, 2153 const std::vector<Constant*> &ActualArgs, 2154 std::vector<Function*> &CallStack, 2155 DenseMap<Constant*, Constant*> &MutatedMemory, 2156 std::vector<GlobalVariable*> &AllocaTmps) { 2157 // Check to see if this function is already executing (recursion). If so, 2158 // bail out. TODO: we might want to accept limited recursion. 2159 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 2160 return false; 2161 2162 LLVMContext *Context = F->getContext(); 2163 2164 CallStack.push_back(F); 2165 2166 /// Values - As we compute SSA register values, we store their contents here. 2167 DenseMap<Value*, Constant*> Values; 2168 2169 // Initialize arguments to the incoming values specified. 2170 unsigned ArgNo = 0; 2171 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 2172 ++AI, ++ArgNo) 2173 Values[AI] = ActualArgs[ArgNo]; 2174 2175 /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 2176 /// we can only evaluate any one basic block at most once. This set keeps 2177 /// track of what we have executed so we can detect recursive cases etc. 2178 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 2179 2180 // CurInst - The current instruction we're evaluating. 2181 BasicBlock::iterator CurInst = F->begin()->begin(); 2182 2183 // This is the main evaluation loop. 2184 while (1) { 2185 Constant *InstResult = 0; 2186 2187 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 2188 if (SI->isVolatile()) return false; // no volatile accesses. 2189 Constant *Ptr = getVal(Values, SI->getOperand(1)); 2190 if (!isSimpleEnoughPointerToCommit(Ptr, Context)) 2191 // If this is too complex for us to commit, reject it. 2192 return false; 2193 Constant *Val = getVal(Values, SI->getOperand(0)); 2194 MutatedMemory[Ptr] = Val; 2195 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 2196 InstResult = Context->getConstantExpr(BO->getOpcode(), 2197 getVal(Values, BO->getOperand(0)), 2198 getVal(Values, BO->getOperand(1))); 2199 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 2200 InstResult = Context->getConstantExprCompare(CI->getPredicate(), 2201 getVal(Values, CI->getOperand(0)), 2202 getVal(Values, CI->getOperand(1))); 2203 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 2204 InstResult = Context->getConstantExprCast(CI->getOpcode(), 2205 getVal(Values, CI->getOperand(0)), 2206 CI->getType()); 2207 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 2208 InstResult = 2209 Context->getConstantExprSelect(getVal(Values, SI->getOperand(0)), 2210 getVal(Values, SI->getOperand(1)), 2211 getVal(Values, SI->getOperand(2))); 2212 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 2213 Constant *P = getVal(Values, GEP->getOperand(0)); 2214 SmallVector<Constant*, 8> GEPOps; 2215 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 2216 i != e; ++i) 2217 GEPOps.push_back(getVal(Values, *i)); 2218 InstResult = 2219 Context->getConstantExprGetElementPtr(P, &GEPOps[0], GEPOps.size()); 2220 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 2221 if (LI->isVolatile()) return false; // no volatile accesses. 2222 InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)), 2223 MutatedMemory, Context); 2224 if (InstResult == 0) return false; // Could not evaluate load. 2225 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 2226 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs. 2227 const Type *Ty = AI->getType()->getElementType(); 2228 AllocaTmps.push_back(new GlobalVariable(*Context, Ty, false, 2229 GlobalValue::InternalLinkage, 2230 Context->getUndef(Ty), 2231 AI->getName())); 2232 InstResult = AllocaTmps.back(); 2233 } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) { 2234 2235 // Debug info can safely be ignored here. 2236 if (isa<DbgInfoIntrinsic>(CI)) { 2237 ++CurInst; 2238 continue; 2239 } 2240 2241 // Cannot handle inline asm. 2242 if (isa<InlineAsm>(CI->getOperand(0))) return false; 2243 2244 // Resolve function pointers. 2245 Function *Callee = dyn_cast<Function>(getVal(Values, CI->getOperand(0))); 2246 if (!Callee) return false; // Cannot resolve. 2247 2248 std::vector<Constant*> Formals; 2249 for (User::op_iterator i = CI->op_begin() + 1, e = CI->op_end(); 2250 i != e; ++i) 2251 Formals.push_back(getVal(Values, *i)); 2252 2253 if (Callee->isDeclaration()) { 2254 // If this is a function we can constant fold, do it. 2255 if (Constant *C = ConstantFoldCall(Callee, &Formals[0], 2256 Formals.size())) { 2257 InstResult = C; 2258 } else { 2259 return false; 2260 } 2261 } else { 2262 if (Callee->getFunctionType()->isVarArg()) 2263 return false; 2264 2265 Constant *RetVal; 2266 // Execute the call, if successful, use the return value. 2267 if (!EvaluateFunction(Callee, RetVal, Formals, CallStack, 2268 MutatedMemory, AllocaTmps)) 2269 return false; 2270 InstResult = RetVal; 2271 } 2272 } else if (isa<TerminatorInst>(CurInst)) { 2273 BasicBlock *NewBB = 0; 2274 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 2275 if (BI->isUnconditional()) { 2276 NewBB = BI->getSuccessor(0); 2277 } else { 2278 ConstantInt *Cond = 2279 dyn_cast<ConstantInt>(getVal(Values, BI->getCondition())); 2280 if (!Cond) return false; // Cannot determine. 2281 2282 NewBB = BI->getSuccessor(!Cond->getZExtValue()); 2283 } 2284 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 2285 ConstantInt *Val = 2286 dyn_cast<ConstantInt>(getVal(Values, SI->getCondition())); 2287 if (!Val) return false; // Cannot determine. 2288 NewBB = SI->getSuccessor(SI->findCaseValue(Val)); 2289 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) { 2290 if (RI->getNumOperands()) 2291 RetVal = getVal(Values, RI->getOperand(0)); 2292 2293 CallStack.pop_back(); // return from fn. 2294 return true; // We succeeded at evaluating this ctor! 2295 } else { 2296 // invoke, unwind, unreachable. 2297 return false; // Cannot handle this terminator. 2298 } 2299 2300 // Okay, we succeeded in evaluating this control flow. See if we have 2301 // executed the new block before. If so, we have a looping function, 2302 // which we cannot evaluate in reasonable time. 2303 if (!ExecutedBlocks.insert(NewBB)) 2304 return false; // looped! 2305 2306 // Okay, we have never been in this block before. Check to see if there 2307 // are any PHI nodes. If so, evaluate them with information about where 2308 // we came from. 2309 BasicBlock *OldBB = CurInst->getParent(); 2310 CurInst = NewBB->begin(); 2311 PHINode *PN; 2312 for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 2313 Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB)); 2314 2315 // Do NOT increment CurInst. We know that the terminator had no value. 2316 continue; 2317 } else { 2318 // Did not know how to evaluate this! 2319 return false; 2320 } 2321 2322 if (!CurInst->use_empty()) 2323 Values[CurInst] = InstResult; 2324 2325 // Advance program counter. 2326 ++CurInst; 2327 } 2328} 2329 2330/// EvaluateStaticConstructor - Evaluate static constructors in the function, if 2331/// we can. Return true if we can, false otherwise. 2332static bool EvaluateStaticConstructor(Function *F) { 2333 /// MutatedMemory - For each store we execute, we update this map. Loads 2334 /// check this to get the most up-to-date value. If evaluation is successful, 2335 /// this state is committed to the process. 2336 DenseMap<Constant*, Constant*> MutatedMemory; 2337 2338 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 2339 /// to represent its body. This vector is needed so we can delete the 2340 /// temporary globals when we are done. 2341 std::vector<GlobalVariable*> AllocaTmps; 2342 2343 /// CallStack - This is used to detect recursion. In pathological situations 2344 /// we could hit exponential behavior, but at least there is nothing 2345 /// unbounded. 2346 std::vector<Function*> CallStack; 2347 2348 // Call the function. 2349 Constant *RetValDummy; 2350 bool EvalSuccess = EvaluateFunction(F, RetValDummy, std::vector<Constant*>(), 2351 CallStack, MutatedMemory, AllocaTmps); 2352 if (EvalSuccess) { 2353 // We succeeded at evaluation: commit the result. 2354 DOUT << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2355 << F->getName() << "' to " << MutatedMemory.size() 2356 << " stores.\n"; 2357 for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(), 2358 E = MutatedMemory.end(); I != E; ++I) 2359 CommitValueTo(I->second, I->first, F->getContext()); 2360 } 2361 2362 // At this point, we are done interpreting. If we created any 'alloca' 2363 // temporaries, release them now. 2364 while (!AllocaTmps.empty()) { 2365 GlobalVariable *Tmp = AllocaTmps.back(); 2366 AllocaTmps.pop_back(); 2367 2368 // If there are still users of the alloca, the program is doing something 2369 // silly, e.g. storing the address of the alloca somewhere and using it 2370 // later. Since this is undefined, we'll just make it be null. 2371 if (!Tmp->use_empty()) 2372 Tmp->replaceAllUsesWith(F->getContext()->getNullValue(Tmp->getType())); 2373 delete Tmp; 2374 } 2375 2376 return EvalSuccess; 2377} 2378 2379 2380 2381/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. 2382/// Return true if anything changed. 2383bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { 2384 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); 2385 bool MadeChange = false; 2386 if (Ctors.empty()) return false; 2387 2388 // Loop over global ctors, optimizing them when we can. 2389 for (unsigned i = 0; i != Ctors.size(); ++i) { 2390 Function *F = Ctors[i]; 2391 // Found a null terminator in the middle of the list, prune off the rest of 2392 // the list. 2393 if (F == 0) { 2394 if (i != Ctors.size()-1) { 2395 Ctors.resize(i+1); 2396 MadeChange = true; 2397 } 2398 break; 2399 } 2400 2401 // We cannot simplify external ctor functions. 2402 if (F->empty()) continue; 2403 2404 // If we can evaluate the ctor at compile time, do. 2405 if (EvaluateStaticConstructor(F)) { 2406 Ctors.erase(Ctors.begin()+i); 2407 MadeChange = true; 2408 --i; 2409 ++NumCtorsEvaluated; 2410 continue; 2411 } 2412 } 2413 2414 if (!MadeChange) return false; 2415 2416 GCL = InstallGlobalCtors(GCL, Ctors, Context); 2417 return true; 2418} 2419 2420bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 2421 bool Changed = false; 2422 2423 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2424 I != E;) { 2425 Module::alias_iterator J = I++; 2426 // Aliases without names cannot be referenced outside this module. 2427 if (!J->hasName() && !J->isDeclaration()) 2428 J->setLinkage(GlobalValue::InternalLinkage); 2429 // If the aliasee may change at link time, nothing can be done - bail out. 2430 if (J->mayBeOverridden()) 2431 continue; 2432 2433 Constant *Aliasee = J->getAliasee(); 2434 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2435 Target->removeDeadConstantUsers(); 2436 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse(); 2437 2438 // Make all users of the alias use the aliasee instead. 2439 if (!J->use_empty()) { 2440 J->replaceAllUsesWith(Aliasee); 2441 ++NumAliasesResolved; 2442 Changed = true; 2443 } 2444 2445 // If the aliasee has internal linkage, give it the name and linkage 2446 // of the alias, and delete the alias. This turns: 2447 // define internal ... @f(...) 2448 // @a = alias ... @f 2449 // into: 2450 // define ... @a(...) 2451 if (!Target->hasLocalLinkage()) 2452 continue; 2453 2454 // The transform is only useful if the alias does not have internal linkage. 2455 if (J->hasLocalLinkage()) 2456 continue; 2457 2458 // Do not perform the transform if multiple aliases potentially target the 2459 // aliasee. This check also ensures that it is safe to replace the section 2460 // and other attributes of the aliasee with those of the alias. 2461 if (!hasOneUse) 2462 continue; 2463 2464 // Give the aliasee the name, linkage and other attributes of the alias. 2465 Target->takeName(J); 2466 Target->setLinkage(J->getLinkage()); 2467 Target->GlobalValue::copyAttributesFrom(J); 2468 2469 // Delete the alias. 2470 M.getAliasList().erase(J); 2471 ++NumAliasesRemoved; 2472 Changed = true; 2473 } 2474 2475 return Changed; 2476} 2477 2478bool GlobalOpt::runOnModule(Module &M) { 2479 bool Changed = false; 2480 2481 // Try to find the llvm.globalctors list. 2482 GlobalVariable *GlobalCtors = FindGlobalCtors(M); 2483 2484 bool LocalChange = true; 2485 while (LocalChange) { 2486 LocalChange = false; 2487 2488 // Delete functions that are trivially dead, ccc -> fastcc 2489 LocalChange |= OptimizeFunctions(M); 2490 2491 // Optimize global_ctors list. 2492 if (GlobalCtors) 2493 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); 2494 2495 // Optimize non-address-taken globals. 2496 LocalChange |= OptimizeGlobalVars(M); 2497 2498 // Resolve aliases, when possible. 2499 LocalChange |= OptimizeGlobalAliases(M); 2500 Changed |= LocalChange; 2501 } 2502 2503 // TODO: Move all global ctors functions to the end of the module for code 2504 // layout. 2505 2506 return Changed; 2507} 2508