GlobalOpt.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass transforms simple global variables that never have their address 11// taken. If obviously true, it marks read/write globals as constant, deletes 12// variables only stored to, etc. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "globalopt" 17#include "llvm/Transforms/IPO.h" 18#include "llvm/ADT/DenseMap.h" 19#include "llvm/ADT/STLExtras.h" 20#include "llvm/ADT/SmallPtrSet.h" 21#include "llvm/ADT/SmallVector.h" 22#include "llvm/ADT/Statistic.h" 23#include "llvm/Analysis/ConstantFolding.h" 24#include "llvm/Analysis/MemoryBuiltins.h" 25#include "llvm/IR/CallSite.h" 26#include "llvm/IR/CallingConv.h" 27#include "llvm/IR/Constants.h" 28#include "llvm/IR/DataLayout.h" 29#include "llvm/IR/DerivedTypes.h" 30#include "llvm/IR/GetElementPtrTypeIterator.h" 31#include "llvm/IR/Instructions.h" 32#include "llvm/IR/IntrinsicInst.h" 33#include "llvm/IR/Module.h" 34#include "llvm/IR/Operator.h" 35#include "llvm/IR/ValueHandle.h" 36#include "llvm/Pass.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/MathExtras.h" 40#include "llvm/Support/raw_ostream.h" 41#include "llvm/Target/TargetLibraryInfo.h" 42#include "llvm/Transforms/Utils/GlobalStatus.h" 43#include "llvm/Transforms/Utils/ModuleUtils.h" 44#include <algorithm> 45using namespace llvm; 46 47STATISTIC(NumMarked , "Number of globals marked constant"); 48STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 49STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 50STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 51STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 52STATISTIC(NumDeleted , "Number of globals deleted"); 53STATISTIC(NumFnDeleted , "Number of functions deleted"); 54STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 55STATISTIC(NumLocalized , "Number of globals localized"); 56STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 57STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 58STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 59STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 60STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 61STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 62STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 63 64namespace { 65 struct GlobalOpt : public ModulePass { 66 void getAnalysisUsage(AnalysisUsage &AU) const override { 67 AU.addRequired<TargetLibraryInfo>(); 68 } 69 static char ID; // Pass identification, replacement for typeid 70 GlobalOpt() : ModulePass(ID) { 71 initializeGlobalOptPass(*PassRegistry::getPassRegistry()); 72 } 73 74 bool runOnModule(Module &M) override; 75 76 private: 77 GlobalVariable *FindGlobalCtors(Module &M); 78 bool OptimizeFunctions(Module &M); 79 bool OptimizeGlobalVars(Module &M); 80 bool OptimizeGlobalAliases(Module &M); 81 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); 82 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 83 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, 84 const GlobalStatus &GS); 85 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); 86 87 const DataLayout *DL; 88 TargetLibraryInfo *TLI; 89 }; 90} 91 92char GlobalOpt::ID = 0; 93INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt", 94 "Global Variable Optimizer", false, false) 95INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 96INITIALIZE_PASS_END(GlobalOpt, "globalopt", 97 "Global Variable Optimizer", false, false) 98 99ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 100 101/// isLeakCheckerRoot - Is this global variable possibly used by a leak checker 102/// as a root? If so, we might not really want to eliminate the stores to it. 103static bool isLeakCheckerRoot(GlobalVariable *GV) { 104 // A global variable is a root if it is a pointer, or could plausibly contain 105 // a pointer. There are two challenges; one is that we could have a struct 106 // the has an inner member which is a pointer. We recurse through the type to 107 // detect these (up to a point). The other is that we may actually be a union 108 // of a pointer and another type, and so our LLVM type is an integer which 109 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 110 // potentially contained here. 111 112 if (GV->hasPrivateLinkage()) 113 return false; 114 115 SmallVector<Type *, 4> Types; 116 Types.push_back(cast<PointerType>(GV->getType())->getElementType()); 117 118 unsigned Limit = 20; 119 do { 120 Type *Ty = Types.pop_back_val(); 121 switch (Ty->getTypeID()) { 122 default: break; 123 case Type::PointerTyID: return true; 124 case Type::ArrayTyID: 125 case Type::VectorTyID: { 126 SequentialType *STy = cast<SequentialType>(Ty); 127 Types.push_back(STy->getElementType()); 128 break; 129 } 130 case Type::StructTyID: { 131 StructType *STy = cast<StructType>(Ty); 132 if (STy->isOpaque()) return true; 133 for (StructType::element_iterator I = STy->element_begin(), 134 E = STy->element_end(); I != E; ++I) { 135 Type *InnerTy = *I; 136 if (isa<PointerType>(InnerTy)) return true; 137 if (isa<CompositeType>(InnerTy)) 138 Types.push_back(InnerTy); 139 } 140 break; 141 } 142 } 143 if (--Limit == 0) return true; 144 } while (!Types.empty()); 145 return false; 146} 147 148/// Given a value that is stored to a global but never read, determine whether 149/// it's safe to remove the store and the chain of computation that feeds the 150/// store. 151static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { 152 do { 153 if (isa<Constant>(V)) 154 return true; 155 if (!V->hasOneUse()) 156 return false; 157 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 158 isa<GlobalValue>(V)) 159 return false; 160 if (isAllocationFn(V, TLI)) 161 return true; 162 163 Instruction *I = cast<Instruction>(V); 164 if (I->mayHaveSideEffects()) 165 return false; 166 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 167 if (!GEP->hasAllConstantIndices()) 168 return false; 169 } else if (I->getNumOperands() != 1) { 170 return false; 171 } 172 173 V = I->getOperand(0); 174 } while (1); 175} 176 177/// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users 178/// of the global and clean up any that obviously don't assign the global a 179/// value that isn't dynamically allocated. 180/// 181static bool CleanupPointerRootUsers(GlobalVariable *GV, 182 const TargetLibraryInfo *TLI) { 183 // A brief explanation of leak checkers. The goal is to find bugs where 184 // pointers are forgotten, causing an accumulating growth in memory 185 // usage over time. The common strategy for leak checkers is to whitelist the 186 // memory pointed to by globals at exit. This is popular because it also 187 // solves another problem where the main thread of a C++ program may shut down 188 // before other threads that are still expecting to use those globals. To 189 // handle that case, we expect the program may create a singleton and never 190 // destroy it. 191 192 bool Changed = false; 193 194 // If Dead[n].first is the only use of a malloc result, we can delete its 195 // chain of computation and the store to the global in Dead[n].second. 196 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 197 198 // Constants can't be pointers to dynamically allocated memory. 199 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end(); 200 UI != E;) { 201 User *U = *UI++; 202 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 203 Value *V = SI->getValueOperand(); 204 if (isa<Constant>(V)) { 205 Changed = true; 206 SI->eraseFromParent(); 207 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 208 if (I->hasOneUse()) 209 Dead.push_back(std::make_pair(I, SI)); 210 } 211 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 212 if (isa<Constant>(MSI->getValue())) { 213 Changed = true; 214 MSI->eraseFromParent(); 215 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 216 if (I->hasOneUse()) 217 Dead.push_back(std::make_pair(I, MSI)); 218 } 219 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 220 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 221 if (MemSrc && MemSrc->isConstant()) { 222 Changed = true; 223 MTI->eraseFromParent(); 224 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { 225 if (I->hasOneUse()) 226 Dead.push_back(std::make_pair(I, MTI)); 227 } 228 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 229 if (CE->use_empty()) { 230 CE->destroyConstant(); 231 Changed = true; 232 } 233 } else if (Constant *C = dyn_cast<Constant>(U)) { 234 if (isSafeToDestroyConstant(C)) { 235 C->destroyConstant(); 236 // This could have invalidated UI, start over from scratch. 237 Dead.clear(); 238 CleanupPointerRootUsers(GV, TLI); 239 return true; 240 } 241 } 242 } 243 244 for (int i = 0, e = Dead.size(); i != e; ++i) { 245 if (IsSafeComputationToRemove(Dead[i].first, TLI)) { 246 Dead[i].second->eraseFromParent(); 247 Instruction *I = Dead[i].first; 248 do { 249 if (isAllocationFn(I, TLI)) 250 break; 251 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 252 if (!J) 253 break; 254 I->eraseFromParent(); 255 I = J; 256 } while (1); 257 I->eraseFromParent(); 258 } 259 } 260 261 return Changed; 262} 263 264/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 265/// users of the global, cleaning up the obvious ones. This is largely just a 266/// quick scan over the use list to clean up the easy and obvious cruft. This 267/// returns true if it made a change. 268static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 269 const DataLayout *DL, 270 TargetLibraryInfo *TLI) { 271 bool Changed = false; 272 // Note that we need to use a weak value handle for the worklist items. When 273 // we delete a constant array, we may also be holding pointer to one of its 274 // elements (or an element of one of its elements if we're dealing with an 275 // array of arrays) in the worklist. 276 SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end()); 277 while (!WorkList.empty()) { 278 Value *UV = WorkList.pop_back_val(); 279 if (!UV) 280 continue; 281 282 User *U = cast<User>(UV); 283 284 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 285 if (Init) { 286 // Replace the load with the initializer. 287 LI->replaceAllUsesWith(Init); 288 LI->eraseFromParent(); 289 Changed = true; 290 } 291 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 292 // Store must be unreachable or storing Init into the global. 293 SI->eraseFromParent(); 294 Changed = true; 295 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 296 if (CE->getOpcode() == Instruction::GetElementPtr) { 297 Constant *SubInit = 0; 298 if (Init) 299 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 300 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI); 301 } else if ((CE->getOpcode() == Instruction::BitCast && 302 CE->getType()->isPointerTy()) || 303 CE->getOpcode() == Instruction::AddrSpaceCast) { 304 // Pointer cast, delete any stores and memsets to the global. 305 Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI); 306 } 307 308 if (CE->use_empty()) { 309 CE->destroyConstant(); 310 Changed = true; 311 } 312 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 313 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 314 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 315 // and will invalidate our notion of what Init is. 316 Constant *SubInit = 0; 317 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 318 ConstantExpr *CE = 319 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI)); 320 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 321 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 322 323 // If the initializer is an all-null value and we have an inbounds GEP, 324 // we already know what the result of any load from that GEP is. 325 // TODO: Handle splats. 326 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) 327 SubInit = Constant::getNullValue(GEP->getType()->getElementType()); 328 } 329 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI); 330 331 if (GEP->use_empty()) { 332 GEP->eraseFromParent(); 333 Changed = true; 334 } 335 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 336 if (MI->getRawDest() == V) { 337 MI->eraseFromParent(); 338 Changed = true; 339 } 340 341 } else if (Constant *C = dyn_cast<Constant>(U)) { 342 // If we have a chain of dead constantexprs or other things dangling from 343 // us, and if they are all dead, nuke them without remorse. 344 if (isSafeToDestroyConstant(C)) { 345 C->destroyConstant(); 346 CleanupConstantGlobalUsers(V, Init, DL, TLI); 347 return true; 348 } 349 } 350 } 351 return Changed; 352} 353 354/// isSafeSROAElementUse - Return true if the specified instruction is a safe 355/// user of a derived expression from a global that we want to SROA. 356static bool isSafeSROAElementUse(Value *V) { 357 // We might have a dead and dangling constant hanging off of here. 358 if (Constant *C = dyn_cast<Constant>(V)) 359 return isSafeToDestroyConstant(C); 360 361 Instruction *I = dyn_cast<Instruction>(V); 362 if (!I) return false; 363 364 // Loads are ok. 365 if (isa<LoadInst>(I)) return true; 366 367 // Stores *to* the pointer are ok. 368 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 369 return SI->getOperand(0) != V; 370 371 // Otherwise, it must be a GEP. 372 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 373 if (GEPI == 0) return false; 374 375 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 376 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 377 return false; 378 379 for (User *U : GEPI->users()) 380 if (!isSafeSROAElementUse(U)) 381 return false; 382 return true; 383} 384 385 386/// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 387/// Look at it and its uses and decide whether it is safe to SROA this global. 388/// 389static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 390 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 391 if (!isa<GetElementPtrInst>(U) && 392 (!isa<ConstantExpr>(U) || 393 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 394 return false; 395 396 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 397 // don't like < 3 operand CE's, and we don't like non-constant integer 398 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 399 // value of C. 400 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 401 !cast<Constant>(U->getOperand(1))->isNullValue() || 402 !isa<ConstantInt>(U->getOperand(2))) 403 return false; 404 405 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 406 ++GEPI; // Skip over the pointer index. 407 408 // If this is a use of an array allocation, do a bit more checking for sanity. 409 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 410 uint64_t NumElements = AT->getNumElements(); 411 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 412 413 // Check to make sure that index falls within the array. If not, 414 // something funny is going on, so we won't do the optimization. 415 // 416 if (Idx->getZExtValue() >= NumElements) 417 return false; 418 419 // We cannot scalar repl this level of the array unless any array 420 // sub-indices are in-range constants. In particular, consider: 421 // A[0][i]. We cannot know that the user isn't doing invalid things like 422 // allowing i to index an out-of-range subscript that accesses A[1]. 423 // 424 // Scalar replacing *just* the outer index of the array is probably not 425 // going to be a win anyway, so just give up. 426 for (++GEPI; // Skip array index. 427 GEPI != E; 428 ++GEPI) { 429 uint64_t NumElements; 430 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 431 NumElements = SubArrayTy->getNumElements(); 432 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) 433 NumElements = SubVectorTy->getNumElements(); 434 else { 435 assert((*GEPI)->isStructTy() && 436 "Indexed GEP type is not array, vector, or struct!"); 437 continue; 438 } 439 440 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 441 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 442 return false; 443 } 444 } 445 446 for (User *UU : U->users()) 447 if (!isSafeSROAElementUse(UU)) 448 return false; 449 450 return true; 451} 452 453/// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 454/// is safe for us to perform this transformation. 455/// 456static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 457 for (User *U : GV->users()) 458 if (!IsUserOfGlobalSafeForSRA(U, GV)) 459 return false; 460 461 return true; 462} 463 464 465/// SRAGlobal - Perform scalar replacement of aggregates on the specified global 466/// variable. This opens the door for other optimizations by exposing the 467/// behavior of the program in a more fine-grained way. We have determined that 468/// this transformation is safe already. We return the first global variable we 469/// insert so that the caller can reprocess it. 470static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { 471 // Make sure this global only has simple uses that we can SRA. 472 if (!GlobalUsersSafeToSRA(GV)) 473 return 0; 474 475 assert(GV->hasLocalLinkage() && !GV->isConstant()); 476 Constant *Init = GV->getInitializer(); 477 Type *Ty = Init->getType(); 478 479 std::vector<GlobalVariable*> NewGlobals; 480 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 481 482 // Get the alignment of the global, either explicit or target-specific. 483 unsigned StartAlignment = GV->getAlignment(); 484 if (StartAlignment == 0) 485 StartAlignment = DL.getABITypeAlignment(GV->getType()); 486 487 if (StructType *STy = dyn_cast<StructType>(Ty)) { 488 NewGlobals.reserve(STy->getNumElements()); 489 const StructLayout &Layout = *DL.getStructLayout(STy); 490 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 491 Constant *In = Init->getAggregateElement(i); 492 assert(In && "Couldn't get element of initializer?"); 493 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 494 GlobalVariable::InternalLinkage, 495 In, GV->getName()+"."+Twine(i), 496 GV->getThreadLocalMode(), 497 GV->getType()->getAddressSpace()); 498 Globals.insert(GV, NGV); 499 NewGlobals.push_back(NGV); 500 501 // Calculate the known alignment of the field. If the original aggregate 502 // had 256 byte alignment for example, something might depend on that: 503 // propagate info to each field. 504 uint64_t FieldOffset = Layout.getElementOffset(i); 505 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 506 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i))) 507 NGV->setAlignment(NewAlign); 508 } 509 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 510 unsigned NumElements = 0; 511 if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) 512 NumElements = ATy->getNumElements(); 513 else 514 NumElements = cast<VectorType>(STy)->getNumElements(); 515 516 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 517 return 0; // It's not worth it. 518 NewGlobals.reserve(NumElements); 519 520 uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType()); 521 unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType()); 522 for (unsigned i = 0, e = NumElements; i != e; ++i) { 523 Constant *In = Init->getAggregateElement(i); 524 assert(In && "Couldn't get element of initializer?"); 525 526 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 527 GlobalVariable::InternalLinkage, 528 In, GV->getName()+"."+Twine(i), 529 GV->getThreadLocalMode(), 530 GV->getType()->getAddressSpace()); 531 Globals.insert(GV, NGV); 532 NewGlobals.push_back(NGV); 533 534 // Calculate the known alignment of the field. If the original aggregate 535 // had 256 byte alignment for example, something might depend on that: 536 // propagate info to each field. 537 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 538 if (NewAlign > EltAlign) 539 NGV->setAlignment(NewAlign); 540 } 541 } 542 543 if (NewGlobals.empty()) 544 return 0; 545 546 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); 547 548 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 549 550 // Loop over all of the uses of the global, replacing the constantexpr geps, 551 // with smaller constantexpr geps or direct references. 552 while (!GV->use_empty()) { 553 User *GEP = GV->user_back(); 554 assert(((isa<ConstantExpr>(GEP) && 555 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 556 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 557 558 // Ignore the 1th operand, which has to be zero or else the program is quite 559 // broken (undefined). Get the 2nd operand, which is the structure or array 560 // index. 561 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 562 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 563 564 Value *NewPtr = NewGlobals[Val]; 565 566 // Form a shorter GEP if needed. 567 if (GEP->getNumOperands() > 3) { 568 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 569 SmallVector<Constant*, 8> Idxs; 570 Idxs.push_back(NullInt); 571 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 572 Idxs.push_back(CE->getOperand(i)); 573 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs); 574 } else { 575 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 576 SmallVector<Value*, 8> Idxs; 577 Idxs.push_back(NullInt); 578 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 579 Idxs.push_back(GEPI->getOperand(i)); 580 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs, 581 GEPI->getName()+"."+Twine(Val),GEPI); 582 } 583 } 584 GEP->replaceAllUsesWith(NewPtr); 585 586 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 587 GEPI->eraseFromParent(); 588 else 589 cast<ConstantExpr>(GEP)->destroyConstant(); 590 } 591 592 // Delete the old global, now that it is dead. 593 Globals.erase(GV); 594 ++NumSRA; 595 596 // Loop over the new globals array deleting any globals that are obviously 597 // dead. This can arise due to scalarization of a structure or an array that 598 // has elements that are dead. 599 unsigned FirstGlobal = 0; 600 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 601 if (NewGlobals[i]->use_empty()) { 602 Globals.erase(NewGlobals[i]); 603 if (FirstGlobal == i) ++FirstGlobal; 604 } 605 606 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; 607} 608 609/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 610/// value will trap if the value is dynamically null. PHIs keeps track of any 611/// phi nodes we've seen to avoid reprocessing them. 612static bool AllUsesOfValueWillTrapIfNull(const Value *V, 613 SmallPtrSet<const PHINode*, 8> &PHIs) { 614 for (const User *U : V->users()) 615 if (isa<LoadInst>(U)) { 616 // Will trap. 617 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 618 if (SI->getOperand(0) == V) { 619 //cerr << "NONTRAPPING USE: " << *U; 620 return false; // Storing the value. 621 } 622 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 623 if (CI->getCalledValue() != V) { 624 //cerr << "NONTRAPPING USE: " << *U; 625 return false; // Not calling the ptr 626 } 627 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 628 if (II->getCalledValue() != V) { 629 //cerr << "NONTRAPPING USE: " << *U; 630 return false; // Not calling the ptr 631 } 632 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 633 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 634 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 635 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 636 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 637 // If we've already seen this phi node, ignore it, it has already been 638 // checked. 639 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 640 return false; 641 } else if (isa<ICmpInst>(U) && 642 isa<ConstantPointerNull>(U->getOperand(1))) { 643 // Ignore icmp X, null 644 } else { 645 //cerr << "NONTRAPPING USE: " << *U; 646 return false; 647 } 648 649 return true; 650} 651 652/// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 653/// from GV will trap if the loaded value is null. Note that this also permits 654/// comparisons of the loaded value against null, as a special case. 655static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 656 for (const User *U : GV->users()) 657 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 658 SmallPtrSet<const PHINode*, 8> PHIs; 659 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 660 return false; 661 } else if (isa<StoreInst>(U)) { 662 // Ignore stores to the global. 663 } else { 664 // We don't know or understand this user, bail out. 665 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 666 return false; 667 } 668 return true; 669} 670 671static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 672 bool Changed = false; 673 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { 674 Instruction *I = cast<Instruction>(*UI++); 675 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 676 LI->setOperand(0, NewV); 677 Changed = true; 678 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 679 if (SI->getOperand(1) == V) { 680 SI->setOperand(1, NewV); 681 Changed = true; 682 } 683 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 684 CallSite CS(I); 685 if (CS.getCalledValue() == V) { 686 // Calling through the pointer! Turn into a direct call, but be careful 687 // that the pointer is not also being passed as an argument. 688 CS.setCalledFunction(NewV); 689 Changed = true; 690 bool PassedAsArg = false; 691 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 692 if (CS.getArgument(i) == V) { 693 PassedAsArg = true; 694 CS.setArgument(i, NewV); 695 } 696 697 if (PassedAsArg) { 698 // Being passed as an argument also. Be careful to not invalidate UI! 699 UI = V->user_begin(); 700 } 701 } 702 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 703 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 704 ConstantExpr::getCast(CI->getOpcode(), 705 NewV, CI->getType())); 706 if (CI->use_empty()) { 707 Changed = true; 708 CI->eraseFromParent(); 709 } 710 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 711 // Should handle GEP here. 712 SmallVector<Constant*, 8> Idxs; 713 Idxs.reserve(GEPI->getNumOperands()-1); 714 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 715 i != e; ++i) 716 if (Constant *C = dyn_cast<Constant>(*i)) 717 Idxs.push_back(C); 718 else 719 break; 720 if (Idxs.size() == GEPI->getNumOperands()-1) 721 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 722 ConstantExpr::getGetElementPtr(NewV, Idxs)); 723 if (GEPI->use_empty()) { 724 Changed = true; 725 GEPI->eraseFromParent(); 726 } 727 } 728 } 729 730 return Changed; 731} 732 733 734/// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 735/// value stored into it. If there are uses of the loaded value that would trap 736/// if the loaded value is dynamically null, then we know that they cannot be 737/// reachable with a null optimize away the load. 738static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 739 const DataLayout *DL, 740 TargetLibraryInfo *TLI) { 741 bool Changed = false; 742 743 // Keep track of whether we are able to remove all the uses of the global 744 // other than the store that defines it. 745 bool AllNonStoreUsesGone = true; 746 747 // Replace all uses of loads with uses of uses of the stored value. 748 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){ 749 User *GlobalUser = *GUI++; 750 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 751 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 752 // If we were able to delete all uses of the loads 753 if (LI->use_empty()) { 754 LI->eraseFromParent(); 755 Changed = true; 756 } else { 757 AllNonStoreUsesGone = false; 758 } 759 } else if (isa<StoreInst>(GlobalUser)) { 760 // Ignore the store that stores "LV" to the global. 761 assert(GlobalUser->getOperand(1) == GV && 762 "Must be storing *to* the global"); 763 } else { 764 AllNonStoreUsesGone = false; 765 766 // If we get here we could have other crazy uses that are transitively 767 // loaded. 768 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 769 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || 770 isa<BitCastInst>(GlobalUser) || 771 isa<GetElementPtrInst>(GlobalUser)) && 772 "Only expect load and stores!"); 773 } 774 } 775 776 if (Changed) { 777 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); 778 ++NumGlobUses; 779 } 780 781 // If we nuked all of the loads, then none of the stores are needed either, 782 // nor is the global. 783 if (AllNonStoreUsesGone) { 784 if (isLeakCheckerRoot(GV)) { 785 Changed |= CleanupPointerRootUsers(GV, TLI); 786 } else { 787 Changed = true; 788 CleanupConstantGlobalUsers(GV, 0, DL, TLI); 789 } 790 if (GV->use_empty()) { 791 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 792 Changed = true; 793 GV->eraseFromParent(); 794 ++NumDeleted; 795 } 796 } 797 return Changed; 798} 799 800/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 801/// instructions that are foldable. 802static void ConstantPropUsersOf(Value *V, const DataLayout *DL, 803 TargetLibraryInfo *TLI) { 804 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) 805 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 806 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { 807 I->replaceAllUsesWith(NewC); 808 809 // Advance UI to the next non-I use to avoid invalidating it! 810 // Instructions could multiply use V. 811 while (UI != E && *UI == I) 812 ++UI; 813 I->eraseFromParent(); 814 } 815} 816 817/// OptimizeGlobalAddressOfMalloc - This function takes the specified global 818/// variable, and transforms the program as if it always contained the result of 819/// the specified malloc. Because it is always the result of the specified 820/// malloc, there is no reason to actually DO the malloc. Instead, turn the 821/// malloc into a global, and any loads of GV as uses of the new global. 822static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 823 CallInst *CI, 824 Type *AllocTy, 825 ConstantInt *NElements, 826 const DataLayout *DL, 827 TargetLibraryInfo *TLI) { 828 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); 829 830 Type *GlobalType; 831 if (NElements->getZExtValue() == 1) 832 GlobalType = AllocTy; 833 else 834 // If we have an array allocation, the global variable is of an array. 835 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 836 837 // Create the new global variable. The contents of the malloc'd memory is 838 // undefined, so initialize with an undef value. 839 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 840 GlobalType, false, 841 GlobalValue::InternalLinkage, 842 UndefValue::get(GlobalType), 843 GV->getName()+".body", 844 GV, 845 GV->getThreadLocalMode()); 846 847 // If there are bitcast users of the malloc (which is typical, usually we have 848 // a malloc + bitcast) then replace them with uses of the new global. Update 849 // other users to use the global as well. 850 BitCastInst *TheBC = 0; 851 while (!CI->use_empty()) { 852 Instruction *User = cast<Instruction>(CI->user_back()); 853 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 854 if (BCI->getType() == NewGV->getType()) { 855 BCI->replaceAllUsesWith(NewGV); 856 BCI->eraseFromParent(); 857 } else { 858 BCI->setOperand(0, NewGV); 859 } 860 } else { 861 if (TheBC == 0) 862 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 863 User->replaceUsesOfWith(CI, TheBC); 864 } 865 } 866 867 Constant *RepValue = NewGV; 868 if (NewGV->getType() != GV->getType()->getElementType()) 869 RepValue = ConstantExpr::getBitCast(RepValue, 870 GV->getType()->getElementType()); 871 872 // If there is a comparison against null, we will insert a global bool to 873 // keep track of whether the global was initialized yet or not. 874 GlobalVariable *InitBool = 875 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 876 GlobalValue::InternalLinkage, 877 ConstantInt::getFalse(GV->getContext()), 878 GV->getName()+".init", GV->getThreadLocalMode()); 879 bool InitBoolUsed = false; 880 881 // Loop over all uses of GV, processing them in turn. 882 while (!GV->use_empty()) { 883 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { 884 // The global is initialized when the store to it occurs. 885 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, 886 SI->getOrdering(), SI->getSynchScope(), SI); 887 SI->eraseFromParent(); 888 continue; 889 } 890 891 LoadInst *LI = cast<LoadInst>(GV->user_back()); 892 while (!LI->use_empty()) { 893 Use &LoadUse = *LI->use_begin(); 894 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); 895 if (!ICI) { 896 LoadUse = RepValue; 897 continue; 898 } 899 900 // Replace the cmp X, 0 with a use of the bool value. 901 // Sink the load to where the compare was, if atomic rules allow us to. 902 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, 903 LI->getOrdering(), LI->getSynchScope(), 904 LI->isUnordered() ? (Instruction*)ICI : LI); 905 InitBoolUsed = true; 906 switch (ICI->getPredicate()) { 907 default: llvm_unreachable("Unknown ICmp Predicate!"); 908 case ICmpInst::ICMP_ULT: 909 case ICmpInst::ICMP_SLT: // X < null -> always false 910 LV = ConstantInt::getFalse(GV->getContext()); 911 break; 912 case ICmpInst::ICMP_ULE: 913 case ICmpInst::ICMP_SLE: 914 case ICmpInst::ICMP_EQ: 915 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 916 break; 917 case ICmpInst::ICMP_NE: 918 case ICmpInst::ICMP_UGE: 919 case ICmpInst::ICMP_SGE: 920 case ICmpInst::ICMP_UGT: 921 case ICmpInst::ICMP_SGT: 922 break; // no change. 923 } 924 ICI->replaceAllUsesWith(LV); 925 ICI->eraseFromParent(); 926 } 927 LI->eraseFromParent(); 928 } 929 930 // If the initialization boolean was used, insert it, otherwise delete it. 931 if (!InitBoolUsed) { 932 while (!InitBool->use_empty()) // Delete initializations 933 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); 934 delete InitBool; 935 } else 936 GV->getParent()->getGlobalList().insert(GV, InitBool); 937 938 // Now the GV is dead, nuke it and the malloc.. 939 GV->eraseFromParent(); 940 CI->eraseFromParent(); 941 942 // To further other optimizations, loop over all users of NewGV and try to 943 // constant prop them. This will promote GEP instructions with constant 944 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 945 ConstantPropUsersOf(NewGV, DL, TLI); 946 if (RepValue != NewGV) 947 ConstantPropUsersOf(RepValue, DL, TLI); 948 949 return NewGV; 950} 951 952/// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 953/// to make sure that there are no complex uses of V. We permit simple things 954/// like dereferencing the pointer, but not storing through the address, unless 955/// it is to the specified global. 956static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 957 const GlobalVariable *GV, 958 SmallPtrSet<const PHINode*, 8> &PHIs) { 959 for (const User *U : V->users()) { 960 const Instruction *Inst = cast<Instruction>(U); 961 962 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 963 continue; // Fine, ignore. 964 } 965 966 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 967 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 968 return false; // Storing the pointer itself... bad. 969 continue; // Otherwise, storing through it, or storing into GV... fine. 970 } 971 972 // Must index into the array and into the struct. 973 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 974 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 975 return false; 976 continue; 977 } 978 979 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 980 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 981 // cycles. 982 if (PHIs.insert(PN)) 983 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 984 return false; 985 continue; 986 } 987 988 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 989 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 990 return false; 991 continue; 992 } 993 994 return false; 995 } 996 return true; 997} 998 999/// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 1000/// somewhere. Transform all uses of the allocation into loads from the 1001/// global and uses of the resultant pointer. Further, delete the store into 1002/// GV. This assumes that these value pass the 1003/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1004static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1005 GlobalVariable *GV) { 1006 while (!Alloc->use_empty()) { 1007 Instruction *U = cast<Instruction>(*Alloc->user_begin()); 1008 Instruction *InsertPt = U; 1009 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1010 // If this is the store of the allocation into the global, remove it. 1011 if (SI->getOperand(1) == GV) { 1012 SI->eraseFromParent(); 1013 continue; 1014 } 1015 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1016 // Insert the load in the corresponding predecessor, not right before the 1017 // PHI. 1018 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator(); 1019 } else if (isa<BitCastInst>(U)) { 1020 // Must be bitcast between the malloc and store to initialize the global. 1021 ReplaceUsesOfMallocWithGlobal(U, GV); 1022 U->eraseFromParent(); 1023 continue; 1024 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1025 // If this is a "GEP bitcast" and the user is a store to the global, then 1026 // just process it as a bitcast. 1027 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1028 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back())) 1029 if (SI->getOperand(1) == GV) { 1030 // Must be bitcast GEP between the malloc and store to initialize 1031 // the global. 1032 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1033 GEPI->eraseFromParent(); 1034 continue; 1035 } 1036 } 1037 1038 // Insert a load from the global, and use it instead of the malloc. 1039 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1040 U->replaceUsesOfWith(Alloc, NL); 1041 } 1042} 1043 1044/// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 1045/// of a load) are simple enough to perform heap SRA on. This permits GEP's 1046/// that index through the array and struct field, icmps of null, and PHIs. 1047static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1048 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs, 1049 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) { 1050 // We permit two users of the load: setcc comparing against the null 1051 // pointer, and a getelementptr of a specific form. 1052 for (const User *U : V->users()) { 1053 const Instruction *UI = cast<Instruction>(U); 1054 1055 // Comparison against null is ok. 1056 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) { 1057 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1058 return false; 1059 continue; 1060 } 1061 1062 // getelementptr is also ok, but only a simple form. 1063 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { 1064 // Must index into the array and into the struct. 1065 if (GEPI->getNumOperands() < 3) 1066 return false; 1067 1068 // Otherwise the GEP is ok. 1069 continue; 1070 } 1071 1072 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1073 if (!LoadUsingPHIsPerLoad.insert(PN)) 1074 // This means some phi nodes are dependent on each other. 1075 // Avoid infinite looping! 1076 return false; 1077 if (!LoadUsingPHIs.insert(PN)) 1078 // If we have already analyzed this PHI, then it is safe. 1079 continue; 1080 1081 // Make sure all uses of the PHI are simple enough to transform. 1082 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1083 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1084 return false; 1085 1086 continue; 1087 } 1088 1089 // Otherwise we don't know what this is, not ok. 1090 return false; 1091 } 1092 1093 return true; 1094} 1095 1096 1097/// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 1098/// GV are simple enough to perform HeapSRA, return true. 1099static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1100 Instruction *StoredVal) { 1101 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1102 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1103 for (const User *U : GV->users()) 1104 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 1105 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1106 LoadUsingPHIsPerLoad)) 1107 return false; 1108 LoadUsingPHIsPerLoad.clear(); 1109 } 1110 1111 // If we reach here, we know that all uses of the loads and transitive uses 1112 // (through PHI nodes) are simple enough to transform. However, we don't know 1113 // that all inputs the to the PHI nodes are in the same equivalence sets. 1114 // Check to verify that all operands of the PHIs are either PHIS that can be 1115 // transformed, loads from GV, or MI itself. 1116 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin() 1117 , E = LoadUsingPHIs.end(); I != E; ++I) { 1118 const PHINode *PN = *I; 1119 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1120 Value *InVal = PN->getIncomingValue(op); 1121 1122 // PHI of the stored value itself is ok. 1123 if (InVal == StoredVal) continue; 1124 1125 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1126 // One of the PHIs in our set is (optimistically) ok. 1127 if (LoadUsingPHIs.count(InPN)) 1128 continue; 1129 return false; 1130 } 1131 1132 // Load from GV is ok. 1133 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1134 if (LI->getOperand(0) == GV) 1135 continue; 1136 1137 // UNDEF? NULL? 1138 1139 // Anything else is rejected. 1140 return false; 1141 } 1142 } 1143 1144 return true; 1145} 1146 1147static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1148 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1149 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1150 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 1151 1152 if (FieldNo >= FieldVals.size()) 1153 FieldVals.resize(FieldNo+1); 1154 1155 // If we already have this value, just reuse the previously scalarized 1156 // version. 1157 if (Value *FieldVal = FieldVals[FieldNo]) 1158 return FieldVal; 1159 1160 // Depending on what instruction this is, we have several cases. 1161 Value *Result; 1162 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1163 // This is a scalarized version of the load from the global. Just create 1164 // a new Load of the scalarized global. 1165 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1166 InsertedScalarizedValues, 1167 PHIsToRewrite), 1168 LI->getName()+".f"+Twine(FieldNo), LI); 1169 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1170 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1171 // field. 1172 StructType *ST = cast<StructType>(PN->getType()->getPointerElementType()); 1173 1174 PHINode *NewPN = 1175 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)), 1176 PN->getNumIncomingValues(), 1177 PN->getName()+".f"+Twine(FieldNo), PN); 1178 Result = NewPN; 1179 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1180 } else { 1181 llvm_unreachable("Unknown usable value"); 1182 } 1183 1184 return FieldVals[FieldNo] = Result; 1185} 1186 1187/// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 1188/// the load, rewrite the derived value to use the HeapSRoA'd load. 1189static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1190 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1191 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1192 // If this is a comparison against null, handle it. 1193 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1194 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1195 // If we have a setcc of the loaded pointer, we can use a setcc of any 1196 // field. 1197 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1198 InsertedScalarizedValues, PHIsToRewrite); 1199 1200 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1201 Constant::getNullValue(NPtr->getType()), 1202 SCI->getName()); 1203 SCI->replaceAllUsesWith(New); 1204 SCI->eraseFromParent(); 1205 return; 1206 } 1207 1208 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1209 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1210 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1211 && "Unexpected GEPI!"); 1212 1213 // Load the pointer for this field. 1214 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1215 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1216 InsertedScalarizedValues, PHIsToRewrite); 1217 1218 // Create the new GEP idx vector. 1219 SmallVector<Value*, 8> GEPIdx; 1220 GEPIdx.push_back(GEPI->getOperand(1)); 1221 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1222 1223 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx, 1224 GEPI->getName(), GEPI); 1225 GEPI->replaceAllUsesWith(NGEPI); 1226 GEPI->eraseFromParent(); 1227 return; 1228 } 1229 1230 // Recursively transform the users of PHI nodes. This will lazily create the 1231 // PHIs that are needed for individual elements. Keep track of what PHIs we 1232 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1233 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1234 // already been seen first by another load, so its uses have already been 1235 // processed. 1236 PHINode *PN = cast<PHINode>(LoadUser); 1237 if (!InsertedScalarizedValues.insert(std::make_pair(PN, 1238 std::vector<Value*>())).second) 1239 return; 1240 1241 // If this is the first time we've seen this PHI, recursively process all 1242 // users. 1243 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { 1244 Instruction *User = cast<Instruction>(*UI++); 1245 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1246 } 1247} 1248 1249/// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 1250/// is a value loaded from the global. Eliminate all uses of Ptr, making them 1251/// use FieldGlobals instead. All uses of loaded values satisfy 1252/// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1253static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1254 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1255 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1256 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) { 1257 Instruction *User = cast<Instruction>(*UI++); 1258 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1259 } 1260 1261 if (Load->use_empty()) { 1262 Load->eraseFromParent(); 1263 InsertedScalarizedValues.erase(Load); 1264 } 1265} 1266 1267/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break 1268/// it up into multiple allocations of arrays of the fields. 1269static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1270 Value *NElems, const DataLayout *DL, 1271 const TargetLibraryInfo *TLI) { 1272 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); 1273 Type *MAT = getMallocAllocatedType(CI, TLI); 1274 StructType *STy = cast<StructType>(MAT); 1275 1276 // There is guaranteed to be at least one use of the malloc (storing 1277 // it into GV). If there are other uses, change them to be uses of 1278 // the global to simplify later code. This also deletes the store 1279 // into GV. 1280 ReplaceUsesOfMallocWithGlobal(CI, GV); 1281 1282 // Okay, at this point, there are no users of the malloc. Insert N 1283 // new mallocs at the same place as CI, and N globals. 1284 std::vector<Value*> FieldGlobals; 1285 std::vector<Value*> FieldMallocs; 1286 1287 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1288 Type *FieldTy = STy->getElementType(FieldNo); 1289 PointerType *PFieldTy = PointerType::getUnqual(FieldTy); 1290 1291 GlobalVariable *NGV = 1292 new GlobalVariable(*GV->getParent(), 1293 PFieldTy, false, GlobalValue::InternalLinkage, 1294 Constant::getNullValue(PFieldTy), 1295 GV->getName() + ".f" + Twine(FieldNo), GV, 1296 GV->getThreadLocalMode()); 1297 FieldGlobals.push_back(NGV); 1298 1299 unsigned TypeSize = DL->getTypeAllocSize(FieldTy); 1300 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 1301 TypeSize = DL->getStructLayout(ST)->getSizeInBytes(); 1302 Type *IntPtrTy = DL->getIntPtrType(CI->getType()); 1303 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1304 ConstantInt::get(IntPtrTy, TypeSize), 1305 NElems, 0, 1306 CI->getName() + ".f" + Twine(FieldNo)); 1307 FieldMallocs.push_back(NMI); 1308 new StoreInst(NMI, NGV, CI); 1309 } 1310 1311 // The tricky aspect of this transformation is handling the case when malloc 1312 // fails. In the original code, malloc failing would set the result pointer 1313 // of malloc to null. In this case, some mallocs could succeed and others 1314 // could fail. As such, we emit code that looks like this: 1315 // F0 = malloc(field0) 1316 // F1 = malloc(field1) 1317 // F2 = malloc(field2) 1318 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1319 // if (F0) { free(F0); F0 = 0; } 1320 // if (F1) { free(F1); F1 = 0; } 1321 // if (F2) { free(F2); F2 = 0; } 1322 // } 1323 // The malloc can also fail if its argument is too large. 1324 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1325 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1326 ConstantZero, "isneg"); 1327 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1328 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1329 Constant::getNullValue(FieldMallocs[i]->getType()), 1330 "isnull"); 1331 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1332 } 1333 1334 // Split the basic block at the old malloc. 1335 BasicBlock *OrigBB = CI->getParent(); 1336 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); 1337 1338 // Create the block to check the first condition. Put all these blocks at the 1339 // end of the function as they are unlikely to be executed. 1340 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1341 "malloc_ret_null", 1342 OrigBB->getParent()); 1343 1344 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1345 // branch on RunningOr. 1346 OrigBB->getTerminator()->eraseFromParent(); 1347 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1348 1349 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1350 // pointer, because some may be null while others are not. 1351 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1352 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1353 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1354 Constant::getNullValue(GVVal->getType())); 1355 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1356 OrigBB->getParent()); 1357 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1358 OrigBB->getParent()); 1359 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1360 Cmp, NullPtrBlock); 1361 1362 // Fill in FreeBlock. 1363 CallInst::CreateFree(GVVal, BI); 1364 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1365 FreeBlock); 1366 BranchInst::Create(NextBlock, FreeBlock); 1367 1368 NullPtrBlock = NextBlock; 1369 } 1370 1371 BranchInst::Create(ContBB, NullPtrBlock); 1372 1373 // CI is no longer needed, remove it. 1374 CI->eraseFromParent(); 1375 1376 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 1377 /// update all uses of the load, keep track of what scalarized loads are 1378 /// inserted for a given load. 1379 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 1380 InsertedScalarizedValues[GV] = FieldGlobals; 1381 1382 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 1383 1384 // Okay, the malloc site is completely handled. All of the uses of GV are now 1385 // loads, and all uses of those loads are simple. Rewrite them to use loads 1386 // of the per-field globals instead. 1387 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) { 1388 Instruction *User = cast<Instruction>(*UI++); 1389 1390 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1391 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1392 continue; 1393 } 1394 1395 // Must be a store of null. 1396 StoreInst *SI = cast<StoreInst>(User); 1397 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1398 "Unexpected heap-sra user!"); 1399 1400 // Insert a store of null into each global. 1401 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1402 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 1403 Constant *Null = Constant::getNullValue(PT->getElementType()); 1404 new StoreInst(Null, FieldGlobals[i], SI); 1405 } 1406 // Erase the original store. 1407 SI->eraseFromParent(); 1408 } 1409 1410 // While we have PHIs that are interesting to rewrite, do it. 1411 while (!PHIsToRewrite.empty()) { 1412 PHINode *PN = PHIsToRewrite.back().first; 1413 unsigned FieldNo = PHIsToRewrite.back().second; 1414 PHIsToRewrite.pop_back(); 1415 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1416 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1417 1418 // Add all the incoming values. This can materialize more phis. 1419 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1420 Value *InVal = PN->getIncomingValue(i); 1421 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1422 PHIsToRewrite); 1423 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1424 } 1425 } 1426 1427 // Drop all inter-phi links and any loads that made it this far. 1428 for (DenseMap<Value*, std::vector<Value*> >::iterator 1429 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1430 I != E; ++I) { 1431 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1432 PN->dropAllReferences(); 1433 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1434 LI->dropAllReferences(); 1435 } 1436 1437 // Delete all the phis and loads now that inter-references are dead. 1438 for (DenseMap<Value*, std::vector<Value*> >::iterator 1439 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1440 I != E; ++I) { 1441 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1442 PN->eraseFromParent(); 1443 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1444 LI->eraseFromParent(); 1445 } 1446 1447 // The old global is now dead, remove it. 1448 GV->eraseFromParent(); 1449 1450 ++NumHeapSRA; 1451 return cast<GlobalVariable>(FieldGlobals[0]); 1452} 1453 1454/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 1455/// pointer global variable with a single value stored it that is a malloc or 1456/// cast of malloc. 1457static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 1458 CallInst *CI, 1459 Type *AllocTy, 1460 AtomicOrdering Ordering, 1461 Module::global_iterator &GVI, 1462 const DataLayout *DL, 1463 TargetLibraryInfo *TLI) { 1464 if (!DL) 1465 return false; 1466 1467 // If this is a malloc of an abstract type, don't touch it. 1468 if (!AllocTy->isSized()) 1469 return false; 1470 1471 // We can't optimize this global unless all uses of it are *known* to be 1472 // of the malloc value, not of the null initializer value (consider a use 1473 // that compares the global's value against zero to see if the malloc has 1474 // been reached). To do this, we check to see if all uses of the global 1475 // would trap if the global were null: this proves that they must all 1476 // happen after the malloc. 1477 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1478 return false; 1479 1480 // We can't optimize this if the malloc itself is used in a complex way, 1481 // for example, being stored into multiple globals. This allows the 1482 // malloc to be stored into the specified global, loaded icmp'd, and 1483 // GEP'd. These are all things we could transform to using the global 1484 // for. 1485 SmallPtrSet<const PHINode*, 8> PHIs; 1486 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1487 return false; 1488 1489 // If we have a global that is only initialized with a fixed size malloc, 1490 // transform the program to use global memory instead of malloc'd memory. 1491 // This eliminates dynamic allocation, avoids an indirection accessing the 1492 // data, and exposes the resultant global to further GlobalOpt. 1493 // We cannot optimize the malloc if we cannot determine malloc array size. 1494 Value *NElems = getMallocArraySize(CI, DL, TLI, true); 1495 if (!NElems) 1496 return false; 1497 1498 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1499 // Restrict this transformation to only working on small allocations 1500 // (2048 bytes currently), as we don't want to introduce a 16M global or 1501 // something. 1502 if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) { 1503 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); 1504 return true; 1505 } 1506 1507 // If the allocation is an array of structures, consider transforming this 1508 // into multiple malloc'd arrays, one for each field. This is basically 1509 // SRoA for malloc'd memory. 1510 1511 if (Ordering != NotAtomic) 1512 return false; 1513 1514 // If this is an allocation of a fixed size array of structs, analyze as a 1515 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1516 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1517 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1518 AllocTy = AT->getElementType(); 1519 1520 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1521 if (!AllocSTy) 1522 return false; 1523 1524 // This the structure has an unreasonable number of fields, leave it 1525 // alone. 1526 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1527 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1528 1529 // If this is a fixed size array, transform the Malloc to be an alloc of 1530 // structs. malloc [100 x struct],1 -> malloc struct, 100 1531 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { 1532 Type *IntPtrTy = DL->getIntPtrType(CI->getType()); 1533 unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes(); 1534 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1535 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1536 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, 1537 AllocSize, NumElements, 1538 0, CI->getName()); 1539 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1540 CI->replaceAllUsesWith(Cast); 1541 CI->eraseFromParent(); 1542 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) 1543 CI = cast<CallInst>(BCI->getOperand(0)); 1544 else 1545 CI = cast<CallInst>(Malloc); 1546 } 1547 1548 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), 1549 DL, TLI); 1550 return true; 1551 } 1552 1553 return false; 1554} 1555 1556// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 1557// that only one value (besides its initializer) is ever stored to the global. 1558static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1559 AtomicOrdering Ordering, 1560 Module::global_iterator &GVI, 1561 const DataLayout *DL, 1562 TargetLibraryInfo *TLI) { 1563 // Ignore no-op GEPs and bitcasts. 1564 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1565 1566 // If we are dealing with a pointer global that is initialized to null and 1567 // only has one (non-null) value stored into it, then we can optimize any 1568 // users of the loaded value (often calls and loads) that would trap if the 1569 // value was null. 1570 if (GV->getInitializer()->getType()->isPointerTy() && 1571 GV->getInitializer()->isNullValue()) { 1572 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1573 if (GV->getInitializer()->getType() != SOVC->getType()) 1574 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1575 1576 // Optimize away any trapping uses of the loaded value. 1577 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI)) 1578 return true; 1579 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { 1580 Type *MallocType = getMallocAllocatedType(CI, TLI); 1581 if (MallocType && 1582 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, 1583 DL, TLI)) 1584 return true; 1585 } 1586 } 1587 1588 return false; 1589} 1590 1591/// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 1592/// two values ever stored into GV are its initializer and OtherVal. See if we 1593/// can shrink the global into a boolean and select between the two values 1594/// whenever it is used. This exposes the values to other scalar optimizations. 1595static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1596 Type *GVElType = GV->getType()->getElementType(); 1597 1598 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1599 // an FP value, pointer or vector, don't do this optimization because a select 1600 // between them is very expensive and unlikely to lead to later 1601 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1602 // where v1 and v2 both require constant pool loads, a big loss. 1603 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1604 GVElType->isFloatingPointTy() || 1605 GVElType->isPointerTy() || GVElType->isVectorTy()) 1606 return false; 1607 1608 // Walk the use list of the global seeing if all the uses are load or store. 1609 // If there is anything else, bail out. 1610 for (User *U : GV->users()) 1611 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1612 return false; 1613 1614 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); 1615 1616 // Create the new global, initializing it to false. 1617 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1618 false, 1619 GlobalValue::InternalLinkage, 1620 ConstantInt::getFalse(GV->getContext()), 1621 GV->getName()+".b", 1622 GV->getThreadLocalMode(), 1623 GV->getType()->getAddressSpace()); 1624 GV->getParent()->getGlobalList().insert(GV, NewGV); 1625 1626 Constant *InitVal = GV->getInitializer(); 1627 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1628 "No reason to shrink to bool!"); 1629 1630 // If initialized to zero and storing one into the global, we can use a cast 1631 // instead of a select to synthesize the desired value. 1632 bool IsOneZero = false; 1633 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 1634 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1635 1636 while (!GV->use_empty()) { 1637 Instruction *UI = cast<Instruction>(GV->user_back()); 1638 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1639 // Change the store into a boolean store. 1640 bool StoringOther = SI->getOperand(0) == OtherVal; 1641 // Only do this if we weren't storing a loaded value. 1642 Value *StoreVal; 1643 if (StoringOther || SI->getOperand(0) == InitVal) { 1644 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1645 StoringOther); 1646 } else { 1647 // Otherwise, we are storing a previously loaded copy. To do this, 1648 // change the copy from copying the original value to just copying the 1649 // bool. 1650 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1651 1652 // If we've already replaced the input, StoredVal will be a cast or 1653 // select instruction. If not, it will be a load of the original 1654 // global. 1655 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1656 assert(LI->getOperand(0) == GV && "Not a copy!"); 1657 // Insert a new load, to preserve the saved value. 1658 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1659 LI->getOrdering(), LI->getSynchScope(), LI); 1660 } else { 1661 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1662 "This is not a form that we understand!"); 1663 StoreVal = StoredVal->getOperand(0); 1664 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1665 } 1666 } 1667 new StoreInst(StoreVal, NewGV, false, 0, 1668 SI->getOrdering(), SI->getSynchScope(), SI); 1669 } else { 1670 // Change the load into a load of bool then a select. 1671 LoadInst *LI = cast<LoadInst>(UI); 1672 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1673 LI->getOrdering(), LI->getSynchScope(), LI); 1674 Value *NSI; 1675 if (IsOneZero) 1676 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1677 else 1678 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1679 NSI->takeName(LI); 1680 LI->replaceAllUsesWith(NSI); 1681 } 1682 UI->eraseFromParent(); 1683 } 1684 1685 // Retain the name of the old global variable. People who are debugging their 1686 // programs may expect these variables to be named the same. 1687 NewGV->takeName(GV); 1688 GV->eraseFromParent(); 1689 return true; 1690} 1691 1692 1693/// ProcessGlobal - Analyze the specified global variable and optimize it if 1694/// possible. If we make a change, return true. 1695bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, 1696 Module::global_iterator &GVI) { 1697 if (!GV->isDiscardableIfUnused()) 1698 return false; 1699 1700 // Do more involved optimizations if the global is internal. 1701 GV->removeDeadConstantUsers(); 1702 1703 if (GV->use_empty()) { 1704 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); 1705 GV->eraseFromParent(); 1706 ++NumDeleted; 1707 return true; 1708 } 1709 1710 if (!GV->hasLocalLinkage()) 1711 return false; 1712 1713 GlobalStatus GS; 1714 1715 if (GlobalStatus::analyzeGlobal(GV, GS)) 1716 return false; 1717 1718 if (!GS.IsCompared && !GV->hasUnnamedAddr()) { 1719 GV->setUnnamedAddr(true); 1720 NumUnnamed++; 1721 } 1722 1723 if (GV->isConstant() || !GV->hasInitializer()) 1724 return false; 1725 1726 return ProcessInternalGlobal(GV, GVI, GS); 1727} 1728 1729/// ProcessInternalGlobal - Analyze the specified global variable and optimize 1730/// it if possible. If we make a change, return true. 1731bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 1732 Module::global_iterator &GVI, 1733 const GlobalStatus &GS) { 1734 // If this is a first class global and has only one accessing function 1735 // and this function is main (which we know is not recursive), we replace 1736 // the global with a local alloca in this function. 1737 // 1738 // NOTE: It doesn't make sense to promote non-single-value types since we 1739 // are just replacing static memory to stack memory. 1740 // 1741 // If the global is in different address space, don't bring it to stack. 1742 if (!GS.HasMultipleAccessingFunctions && 1743 GS.AccessingFunction && !GS.HasNonInstructionUser && 1744 GV->getType()->getElementType()->isSingleValueType() && 1745 GS.AccessingFunction->getName() == "main" && 1746 GS.AccessingFunction->hasExternalLinkage() && 1747 GV->getType()->getAddressSpace() == 0) { 1748 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); 1749 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1750 ->getEntryBlock().begin()); 1751 Type *ElemTy = GV->getType()->getElementType(); 1752 // FIXME: Pass Global's alignment when globals have alignment 1753 AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI); 1754 if (!isa<UndefValue>(GV->getInitializer())) 1755 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1756 1757 GV->replaceAllUsesWith(Alloca); 1758 GV->eraseFromParent(); 1759 ++NumLocalized; 1760 return true; 1761 } 1762 1763 // If the global is never loaded (but may be stored to), it is dead. 1764 // Delete it now. 1765 if (!GS.IsLoaded) { 1766 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); 1767 1768 bool Changed; 1769 if (isLeakCheckerRoot(GV)) { 1770 // Delete any constant stores to the global. 1771 Changed = CleanupPointerRootUsers(GV, TLI); 1772 } else { 1773 // Delete any stores we can find to the global. We may not be able to 1774 // make it completely dead though. 1775 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1776 } 1777 1778 // If the global is dead now, delete it. 1779 if (GV->use_empty()) { 1780 GV->eraseFromParent(); 1781 ++NumDeleted; 1782 Changed = true; 1783 } 1784 return Changed; 1785 1786 } else if (GS.StoredType <= GlobalStatus::InitializerStored) { 1787 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); 1788 GV->setConstant(true); 1789 1790 // Clean up any obviously simplifiable users now. 1791 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1792 1793 // If the global is dead now, just nuke it. 1794 if (GV->use_empty()) { 1795 DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1796 << "all users and delete global!\n"); 1797 GV->eraseFromParent(); 1798 ++NumDeleted; 1799 } 1800 1801 ++NumMarked; 1802 return true; 1803 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 1804 if (DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>()) { 1805 const DataLayout &DL = DLP->getDataLayout(); 1806 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) { 1807 GVI = FirstNewGV; // Don't skip the newly produced globals! 1808 return true; 1809 } 1810 } 1811 } else if (GS.StoredType == GlobalStatus::StoredOnce) { 1812 // If the initial value for the global was an undef value, and if only 1813 // one other value was stored into it, we can just change the 1814 // initializer to be the stored value, then delete all stores to the 1815 // global. This allows us to mark it constant. 1816 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1817 if (isa<UndefValue>(GV->getInitializer())) { 1818 // Change the initial value here. 1819 GV->setInitializer(SOVConstant); 1820 1821 // Clean up any obviously simplifiable users now. 1822 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1823 1824 if (GV->use_empty()) { 1825 DEBUG(dbgs() << " *** Substituting initializer allowed us to " 1826 << "simplify all users and delete global!\n"); 1827 GV->eraseFromParent(); 1828 ++NumDeleted; 1829 } else { 1830 GVI = GV; 1831 } 1832 ++NumSubstitute; 1833 return true; 1834 } 1835 1836 // Try to optimize globals based on the knowledge that only one value 1837 // (besides its initializer) is ever stored to the global. 1838 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI, 1839 DL, TLI)) 1840 return true; 1841 1842 // Otherwise, if the global was not a boolean, we can shrink it to be a 1843 // boolean. 1844 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { 1845 if (GS.Ordering == NotAtomic) { 1846 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 1847 ++NumShrunkToBool; 1848 return true; 1849 } 1850 } 1851 } 1852 } 1853 1854 return false; 1855} 1856 1857/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 1858/// function, changing them to FastCC. 1859static void ChangeCalleesToFastCall(Function *F) { 1860 for (User *U : F->users()) { 1861 if (isa<BlockAddress>(U)) 1862 continue; 1863 CallSite CS(cast<Instruction>(U)); 1864 CS.setCallingConv(CallingConv::Fast); 1865 } 1866} 1867 1868static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) { 1869 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1870 unsigned Index = Attrs.getSlotIndex(i); 1871 if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest)) 1872 continue; 1873 1874 // There can be only one. 1875 return Attrs.removeAttribute(C, Index, Attribute::Nest); 1876 } 1877 1878 return Attrs; 1879} 1880 1881static void RemoveNestAttribute(Function *F) { 1882 F->setAttributes(StripNest(F->getContext(), F->getAttributes())); 1883 for (User *U : F->users()) { 1884 if (isa<BlockAddress>(U)) 1885 continue; 1886 CallSite CS(cast<Instruction>(U)); 1887 CS.setAttributes(StripNest(F->getContext(), CS.getAttributes())); 1888 } 1889} 1890 1891/// Return true if this is a calling convention that we'd like to change. The 1892/// idea here is that we don't want to mess with the convention if the user 1893/// explicitly requested something with performance implications like coldcc, 1894/// GHC, or anyregcc. 1895static bool isProfitableToMakeFastCC(Function *F) { 1896 CallingConv::ID CC = F->getCallingConv(); 1897 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? 1898 return CC == CallingConv::C || CC == CallingConv::X86_ThisCall; 1899} 1900 1901bool GlobalOpt::OptimizeFunctions(Module &M) { 1902 bool Changed = false; 1903 // Optimize functions. 1904 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 1905 Function *F = FI++; 1906 // Functions without names cannot be referenced outside this module. 1907 if (!F->hasName() && !F->isDeclaration()) 1908 F->setLinkage(GlobalValue::InternalLinkage); 1909 F->removeDeadConstantUsers(); 1910 if (F->isDefTriviallyDead()) { 1911 F->eraseFromParent(); 1912 Changed = true; 1913 ++NumFnDeleted; 1914 } else if (F->hasLocalLinkage()) { 1915 if (isProfitableToMakeFastCC(F) && !F->isVarArg() && 1916 !F->hasAddressTaken()) { 1917 // If this function has a calling convention worth changing, is not a 1918 // varargs function, and is only called directly, promote it to use the 1919 // Fast calling convention. 1920 F->setCallingConv(CallingConv::Fast); 1921 ChangeCalleesToFastCall(F); 1922 ++NumFastCallFns; 1923 Changed = true; 1924 } 1925 1926 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 1927 !F->hasAddressTaken()) { 1928 // The function is not used by a trampoline intrinsic, so it is safe 1929 // to remove the 'nest' attribute. 1930 RemoveNestAttribute(F); 1931 ++NumNestRemoved; 1932 Changed = true; 1933 } 1934 } 1935 } 1936 return Changed; 1937} 1938 1939bool GlobalOpt::OptimizeGlobalVars(Module &M) { 1940 bool Changed = false; 1941 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 1942 GVI != E; ) { 1943 GlobalVariable *GV = GVI++; 1944 // Global variables without names cannot be referenced outside this module. 1945 if (!GV->hasName() && !GV->isDeclaration()) 1946 GV->setLinkage(GlobalValue::InternalLinkage); 1947 // Simplify the initializer. 1948 if (GV->hasInitializer()) 1949 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { 1950 Constant *New = ConstantFoldConstantExpression(CE, DL, TLI); 1951 if (New && New != CE) 1952 GV->setInitializer(New); 1953 } 1954 1955 Changed |= ProcessGlobal(GV, GVI); 1956 } 1957 return Changed; 1958} 1959 1960/// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all 1961/// initializers have an init priority of 65535. 1962GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { 1963 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1964 if (GV == 0) return 0; 1965 1966 // Verify that the initializer is simple enough for us to handle. We are 1967 // only allowed to optimize the initializer if it is unique. 1968 if (!GV->hasUniqueInitializer()) return 0; 1969 1970 if (isa<ConstantAggregateZero>(GV->getInitializer())) 1971 return GV; 1972 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1973 1974 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1975 if (isa<ConstantAggregateZero>(*i)) 1976 continue; 1977 ConstantStruct *CS = cast<ConstantStruct>(*i); 1978 if (isa<ConstantPointerNull>(CS->getOperand(1))) 1979 continue; 1980 1981 // Must have a function or null ptr. 1982 if (!isa<Function>(CS->getOperand(1))) 1983 return 0; 1984 1985 // Init priority must be standard. 1986 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0)); 1987 if (CI->getZExtValue() != 65535) 1988 return 0; 1989 } 1990 1991 return GV; 1992} 1993 1994/// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, 1995/// return a list of the functions and null terminator as a vector. 1996static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { 1997 if (GV->getInitializer()->isNullValue()) 1998 return std::vector<Function*>(); 1999 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 2000 std::vector<Function*> Result; 2001 Result.reserve(CA->getNumOperands()); 2002 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 2003 ConstantStruct *CS = cast<ConstantStruct>(*i); 2004 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); 2005 } 2006 return Result; 2007} 2008 2009/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the 2010/// specified array, returning the new global to use. 2011static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, 2012 const std::vector<Function*> &Ctors) { 2013 // If we made a change, reassemble the initializer list. 2014 Constant *CSVals[2]; 2015 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535); 2016 CSVals[1] = 0; 2017 2018 StructType *StructTy = 2019 cast<StructType>(GCL->getType()->getElementType()->getArrayElementType()); 2020 2021 // Create the new init list. 2022 std::vector<Constant*> CAList; 2023 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { 2024 if (Ctors[i]) { 2025 CSVals[1] = Ctors[i]; 2026 } else { 2027 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), 2028 false); 2029 PointerType *PFTy = PointerType::getUnqual(FTy); 2030 CSVals[1] = Constant::getNullValue(PFTy); 2031 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 2032 0x7fffffff); 2033 } 2034 CAList.push_back(ConstantStruct::get(StructTy, CSVals)); 2035 } 2036 2037 // Create the array initializer. 2038 Constant *CA = ConstantArray::get(ArrayType::get(StructTy, 2039 CAList.size()), CAList); 2040 2041 // If we didn't change the number of elements, don't create a new GV. 2042 if (CA->getType() == GCL->getInitializer()->getType()) { 2043 GCL->setInitializer(CA); 2044 return GCL; 2045 } 2046 2047 // Create the new global and insert it next to the existing list. 2048 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(), 2049 GCL->getLinkage(), CA, "", 2050 GCL->getThreadLocalMode()); 2051 GCL->getParent()->getGlobalList().insert(GCL, NGV); 2052 NGV->takeName(GCL); 2053 2054 // Nuke the old list, replacing any uses with the new one. 2055 if (!GCL->use_empty()) { 2056 Constant *V = NGV; 2057 if (V->getType() != GCL->getType()) 2058 V = ConstantExpr::getBitCast(V, GCL->getType()); 2059 GCL->replaceAllUsesWith(V); 2060 } 2061 GCL->eraseFromParent(); 2062 2063 if (Ctors.size()) 2064 return NGV; 2065 else 2066 return 0; 2067} 2068 2069 2070static inline bool 2071isSimpleEnoughValueToCommit(Constant *C, 2072 SmallPtrSet<Constant*, 8> &SimpleConstants, 2073 const DataLayout *DL); 2074 2075 2076/// isSimpleEnoughValueToCommit - Return true if the specified constant can be 2077/// handled by the code generator. We don't want to generate something like: 2078/// void *X = &X/42; 2079/// because the code generator doesn't have a relocation that can handle that. 2080/// 2081/// This function should be called if C was not found (but just got inserted) 2082/// in SimpleConstants to avoid having to rescan the same constants all the 2083/// time. 2084static bool isSimpleEnoughValueToCommitHelper(Constant *C, 2085 SmallPtrSet<Constant*, 8> &SimpleConstants, 2086 const DataLayout *DL) { 2087 // Simple integer, undef, constant aggregate zero, global addresses, etc are 2088 // all supported. 2089 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) || 2090 isa<GlobalValue>(C)) 2091 return true; 2092 2093 // Aggregate values are safe if all their elements are. 2094 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || 2095 isa<ConstantVector>(C)) { 2096 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { 2097 Constant *Op = cast<Constant>(C->getOperand(i)); 2098 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL)) 2099 return false; 2100 } 2101 return true; 2102 } 2103 2104 // We don't know exactly what relocations are allowed in constant expressions, 2105 // so we allow &global+constantoffset, which is safe and uniformly supported 2106 // across targets. 2107 ConstantExpr *CE = cast<ConstantExpr>(C); 2108 switch (CE->getOpcode()) { 2109 case Instruction::BitCast: 2110 // Bitcast is fine if the casted value is fine. 2111 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2112 2113 case Instruction::IntToPtr: 2114 case Instruction::PtrToInt: 2115 // int <=> ptr is fine if the int type is the same size as the 2116 // pointer type. 2117 if (!DL || DL->getTypeSizeInBits(CE->getType()) != 2118 DL->getTypeSizeInBits(CE->getOperand(0)->getType())) 2119 return false; 2120 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2121 2122 // GEP is fine if it is simple + constant offset. 2123 case Instruction::GetElementPtr: 2124 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) 2125 if (!isa<ConstantInt>(CE->getOperand(i))) 2126 return false; 2127 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2128 2129 case Instruction::Add: 2130 // We allow simple+cst. 2131 if (!isa<ConstantInt>(CE->getOperand(1))) 2132 return false; 2133 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2134 } 2135 return false; 2136} 2137 2138static inline bool 2139isSimpleEnoughValueToCommit(Constant *C, 2140 SmallPtrSet<Constant*, 8> &SimpleConstants, 2141 const DataLayout *DL) { 2142 // If we already checked this constant, we win. 2143 if (!SimpleConstants.insert(C)) return true; 2144 // Check the constant. 2145 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL); 2146} 2147 2148 2149/// isSimpleEnoughPointerToCommit - Return true if this constant is simple 2150/// enough for us to understand. In particular, if it is a cast to anything 2151/// other than from one pointer type to another pointer type, we punt. 2152/// We basically just support direct accesses to globals and GEP's of 2153/// globals. This should be kept up to date with CommitValueTo. 2154static bool isSimpleEnoughPointerToCommit(Constant *C) { 2155 // Conservatively, avoid aggregate types. This is because we don't 2156 // want to worry about them partially overlapping other stores. 2157 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) 2158 return false; 2159 2160 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 2161 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2162 // external globals. 2163 return GV->hasUniqueInitializer(); 2164 2165 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 2166 // Handle a constantexpr gep. 2167 if (CE->getOpcode() == Instruction::GetElementPtr && 2168 isa<GlobalVariable>(CE->getOperand(0)) && 2169 cast<GEPOperator>(CE)->isInBounds()) { 2170 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2171 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2172 // external globals. 2173 if (!GV->hasUniqueInitializer()) 2174 return false; 2175 2176 // The first index must be zero. 2177 ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin())); 2178 if (!CI || !CI->isZero()) return false; 2179 2180 // The remaining indices must be compile-time known integers within the 2181 // notional bounds of the corresponding static array types. 2182 if (!CE->isGEPWithNoNotionalOverIndexing()) 2183 return false; 2184 2185 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2186 2187 // A constantexpr bitcast from a pointer to another pointer is a no-op, 2188 // and we know how to evaluate it by moving the bitcast from the pointer 2189 // operand to the value operand. 2190 } else if (CE->getOpcode() == Instruction::BitCast && 2191 isa<GlobalVariable>(CE->getOperand(0))) { 2192 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2193 // external globals. 2194 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); 2195 } 2196 } 2197 2198 return false; 2199} 2200 2201/// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 2202/// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 2203/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 2204static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2205 ConstantExpr *Addr, unsigned OpNo) { 2206 // Base case of the recursion. 2207 if (OpNo == Addr->getNumOperands()) { 2208 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2209 return Val; 2210 } 2211 2212 SmallVector<Constant*, 32> Elts; 2213 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 2214 // Break up the constant into its elements. 2215 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2216 Elts.push_back(Init->getAggregateElement(i)); 2217 2218 // Replace the element that we are supposed to. 2219 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2220 unsigned Idx = CU->getZExtValue(); 2221 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2222 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2223 2224 // Return the modified struct. 2225 return ConstantStruct::get(STy, Elts); 2226 } 2227 2228 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2229 SequentialType *InitTy = cast<SequentialType>(Init->getType()); 2230 2231 uint64_t NumElts; 2232 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) 2233 NumElts = ATy->getNumElements(); 2234 else 2235 NumElts = InitTy->getVectorNumElements(); 2236 2237 // Break up the array into elements. 2238 for (uint64_t i = 0, e = NumElts; i != e; ++i) 2239 Elts.push_back(Init->getAggregateElement(i)); 2240 2241 assert(CI->getZExtValue() < NumElts); 2242 Elts[CI->getZExtValue()] = 2243 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2244 2245 if (Init->getType()->isArrayTy()) 2246 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 2247 return ConstantVector::get(Elts); 2248} 2249 2250/// CommitValueTo - We have decided that Addr (which satisfies the predicate 2251/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2252static void CommitValueTo(Constant *Val, Constant *Addr) { 2253 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2254 assert(GV->hasInitializer()); 2255 GV->setInitializer(Val); 2256 return; 2257 } 2258 2259 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2260 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2261 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2262} 2263 2264namespace { 2265 2266/// Evaluator - This class evaluates LLVM IR, producing the Constant 2267/// representing each SSA instruction. Changes to global variables are stored 2268/// in a mapping that can be iterated over after the evaluation is complete. 2269/// Once an evaluation call fails, the evaluation object should not be reused. 2270class Evaluator { 2271public: 2272 Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI) 2273 : DL(DL), TLI(TLI) { 2274 ValueStack.push_back(new DenseMap<Value*, Constant*>); 2275 } 2276 2277 ~Evaluator() { 2278 DeleteContainerPointers(ValueStack); 2279 while (!AllocaTmps.empty()) { 2280 GlobalVariable *Tmp = AllocaTmps.back(); 2281 AllocaTmps.pop_back(); 2282 2283 // If there are still users of the alloca, the program is doing something 2284 // silly, e.g. storing the address of the alloca somewhere and using it 2285 // later. Since this is undefined, we'll just make it be null. 2286 if (!Tmp->use_empty()) 2287 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); 2288 delete Tmp; 2289 } 2290 } 2291 2292 /// EvaluateFunction - Evaluate a call to function F, returning true if 2293 /// successful, false if we can't evaluate it. ActualArgs contains the formal 2294 /// arguments for the function. 2295 bool EvaluateFunction(Function *F, Constant *&RetVal, 2296 const SmallVectorImpl<Constant*> &ActualArgs); 2297 2298 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if 2299 /// successful, false if we can't evaluate it. NewBB returns the next BB that 2300 /// control flows into, or null upon return. 2301 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB); 2302 2303 Constant *getVal(Value *V) { 2304 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 2305 Constant *R = ValueStack.back()->lookup(V); 2306 assert(R && "Reference to an uncomputed value!"); 2307 return R; 2308 } 2309 2310 void setVal(Value *V, Constant *C) { 2311 ValueStack.back()->operator[](V) = C; 2312 } 2313 2314 const DenseMap<Constant*, Constant*> &getMutatedMemory() const { 2315 return MutatedMemory; 2316 } 2317 2318 const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const { 2319 return Invariants; 2320 } 2321 2322private: 2323 Constant *ComputeLoadResult(Constant *P); 2324 2325 /// ValueStack - As we compute SSA register values, we store their contents 2326 /// here. The back of the vector contains the current function and the stack 2327 /// contains the values in the calling frames. 2328 SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack; 2329 2330 /// CallStack - This is used to detect recursion. In pathological situations 2331 /// we could hit exponential behavior, but at least there is nothing 2332 /// unbounded. 2333 SmallVector<Function*, 4> CallStack; 2334 2335 /// MutatedMemory - For each store we execute, we update this map. Loads 2336 /// check this to get the most up-to-date value. If evaluation is successful, 2337 /// this state is committed to the process. 2338 DenseMap<Constant*, Constant*> MutatedMemory; 2339 2340 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 2341 /// to represent its body. This vector is needed so we can delete the 2342 /// temporary globals when we are done. 2343 SmallVector<GlobalVariable*, 32> AllocaTmps; 2344 2345 /// Invariants - These global variables have been marked invariant by the 2346 /// static constructor. 2347 SmallPtrSet<GlobalVariable*, 8> Invariants; 2348 2349 /// SimpleConstants - These are constants we have checked and know to be 2350 /// simple enough to live in a static initializer of a global. 2351 SmallPtrSet<Constant*, 8> SimpleConstants; 2352 2353 const DataLayout *DL; 2354 const TargetLibraryInfo *TLI; 2355}; 2356 2357} // anonymous namespace 2358 2359/// ComputeLoadResult - Return the value that would be computed by a load from 2360/// P after the stores reflected by 'memory' have been performed. If we can't 2361/// decide, return null. 2362Constant *Evaluator::ComputeLoadResult(Constant *P) { 2363 // If this memory location has been recently stored, use the stored value: it 2364 // is the most up-to-date. 2365 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P); 2366 if (I != MutatedMemory.end()) return I->second; 2367 2368 // Access it. 2369 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 2370 if (GV->hasDefinitiveInitializer()) 2371 return GV->getInitializer(); 2372 return 0; 2373 } 2374 2375 // Handle a constantexpr getelementptr. 2376 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 2377 if (CE->getOpcode() == Instruction::GetElementPtr && 2378 isa<GlobalVariable>(CE->getOperand(0))) { 2379 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2380 if (GV->hasDefinitiveInitializer()) 2381 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2382 } 2383 2384 return 0; // don't know how to evaluate. 2385} 2386 2387/// EvaluateBlock - Evaluate all instructions in block BB, returning true if 2388/// successful, false if we can't evaluate it. NewBB returns the next BB that 2389/// control flows into, or null upon return. 2390bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, 2391 BasicBlock *&NextBB) { 2392 // This is the main evaluation loop. 2393 while (1) { 2394 Constant *InstResult = 0; 2395 2396 DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); 2397 2398 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 2399 if (!SI->isSimple()) { 2400 DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); 2401 return false; // no volatile/atomic accesses. 2402 } 2403 Constant *Ptr = getVal(SI->getOperand(1)); 2404 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 2405 DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); 2406 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 2407 DEBUG(dbgs() << "; To: " << *Ptr << "\n"); 2408 } 2409 if (!isSimpleEnoughPointerToCommit(Ptr)) { 2410 // If this is too complex for us to commit, reject it. 2411 DEBUG(dbgs() << "Pointer is too complex for us to evaluate store."); 2412 return false; 2413 } 2414 2415 Constant *Val = getVal(SI->getOperand(0)); 2416 2417 // If this might be too difficult for the backend to handle (e.g. the addr 2418 // of one global variable divided by another) then we can't commit it. 2419 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) { 2420 DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val 2421 << "\n"); 2422 return false; 2423 } 2424 2425 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 2426 if (CE->getOpcode() == Instruction::BitCast) { 2427 DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n"); 2428 // If we're evaluating a store through a bitcast, then we need 2429 // to pull the bitcast off the pointer type and push it onto the 2430 // stored value. 2431 Ptr = CE->getOperand(0); 2432 2433 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType(); 2434 2435 // In order to push the bitcast onto the stored value, a bitcast 2436 // from NewTy to Val's type must be legal. If it's not, we can try 2437 // introspecting NewTy to find a legal conversion. 2438 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { 2439 // If NewTy is a struct, we can convert the pointer to the struct 2440 // into a pointer to its first member. 2441 // FIXME: This could be extended to support arrays as well. 2442 if (StructType *STy = dyn_cast<StructType>(NewTy)) { 2443 NewTy = STy->getTypeAtIndex(0U); 2444 2445 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32); 2446 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); 2447 Constant * const IdxList[] = {IdxZero, IdxZero}; 2448 2449 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList); 2450 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2451 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 2452 2453 // If we can't improve the situation by introspecting NewTy, 2454 // we have to give up. 2455 } else { 2456 DEBUG(dbgs() << "Failed to bitcast constant ptr, can not " 2457 "evaluate.\n"); 2458 return false; 2459 } 2460 } 2461 2462 // If we found compatible types, go ahead and push the bitcast 2463 // onto the stored value. 2464 Val = ConstantExpr::getBitCast(Val, NewTy); 2465 2466 DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n"); 2467 } 2468 } 2469 2470 MutatedMemory[Ptr] = Val; 2471 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 2472 InstResult = ConstantExpr::get(BO->getOpcode(), 2473 getVal(BO->getOperand(0)), 2474 getVal(BO->getOperand(1))); 2475 DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult 2476 << "\n"); 2477 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 2478 InstResult = ConstantExpr::getCompare(CI->getPredicate(), 2479 getVal(CI->getOperand(0)), 2480 getVal(CI->getOperand(1))); 2481 DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult 2482 << "\n"); 2483 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 2484 InstResult = ConstantExpr::getCast(CI->getOpcode(), 2485 getVal(CI->getOperand(0)), 2486 CI->getType()); 2487 DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult 2488 << "\n"); 2489 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 2490 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), 2491 getVal(SI->getOperand(1)), 2492 getVal(SI->getOperand(2))); 2493 DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult 2494 << "\n"); 2495 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 2496 Constant *P = getVal(GEP->getOperand(0)); 2497 SmallVector<Constant*, 8> GEPOps; 2498 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 2499 i != e; ++i) 2500 GEPOps.push_back(getVal(*i)); 2501 InstResult = 2502 ConstantExpr::getGetElementPtr(P, GEPOps, 2503 cast<GEPOperator>(GEP)->isInBounds()); 2504 DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult 2505 << "\n"); 2506 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 2507 2508 if (!LI->isSimple()) { 2509 DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); 2510 return false; // no volatile/atomic accesses. 2511 } 2512 2513 Constant *Ptr = getVal(LI->getOperand(0)); 2514 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 2515 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 2516 DEBUG(dbgs() << "Found a constant pointer expression, constant " 2517 "folding: " << *Ptr << "\n"); 2518 } 2519 InstResult = ComputeLoadResult(Ptr); 2520 if (InstResult == 0) { 2521 DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load." 2522 "\n"); 2523 return false; // Could not evaluate load. 2524 } 2525 2526 DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); 2527 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 2528 if (AI->isArrayAllocation()) { 2529 DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); 2530 return false; // Cannot handle array allocs. 2531 } 2532 Type *Ty = AI->getType()->getElementType(); 2533 AllocaTmps.push_back(new GlobalVariable(Ty, false, 2534 GlobalValue::InternalLinkage, 2535 UndefValue::get(Ty), 2536 AI->getName())); 2537 InstResult = AllocaTmps.back(); 2538 DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); 2539 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { 2540 CallSite CS(CurInst); 2541 2542 // Debug info can safely be ignored here. 2543 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) { 2544 DEBUG(dbgs() << "Ignoring debug info.\n"); 2545 ++CurInst; 2546 continue; 2547 } 2548 2549 // Cannot handle inline asm. 2550 if (isa<InlineAsm>(CS.getCalledValue())) { 2551 DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); 2552 return false; 2553 } 2554 2555 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 2556 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { 2557 if (MSI->isVolatile()) { 2558 DEBUG(dbgs() << "Can not optimize a volatile memset " << 2559 "intrinsic.\n"); 2560 return false; 2561 } 2562 Constant *Ptr = getVal(MSI->getDest()); 2563 Constant *Val = getVal(MSI->getValue()); 2564 Constant *DestVal = ComputeLoadResult(getVal(Ptr)); 2565 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { 2566 // This memset is a no-op. 2567 DEBUG(dbgs() << "Ignoring no-op memset.\n"); 2568 ++CurInst; 2569 continue; 2570 } 2571 } 2572 2573 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 2574 II->getIntrinsicID() == Intrinsic::lifetime_end) { 2575 DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); 2576 ++CurInst; 2577 continue; 2578 } 2579 2580 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2581 // We don't insert an entry into Values, as it doesn't have a 2582 // meaningful return value. 2583 if (!II->use_empty()) { 2584 DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n"); 2585 return false; 2586 } 2587 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); 2588 Value *PtrArg = getVal(II->getArgOperand(1)); 2589 Value *Ptr = PtrArg->stripPointerCasts(); 2590 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { 2591 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType(); 2592 if (DL && !Size->isAllOnesValue() && 2593 Size->getValue().getLimitedValue() >= 2594 DL->getTypeStoreSize(ElemTy)) { 2595 Invariants.insert(GV); 2596 DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV 2597 << "\n"); 2598 } else { 2599 DEBUG(dbgs() << "Found a global var, but can not treat it as an " 2600 "invariant.\n"); 2601 } 2602 } 2603 // Continue even if we do nothing. 2604 ++CurInst; 2605 continue; 2606 } 2607 2608 DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n"); 2609 return false; 2610 } 2611 2612 // Resolve function pointers. 2613 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue())); 2614 if (!Callee || Callee->mayBeOverridden()) { 2615 DEBUG(dbgs() << "Can not resolve function pointer.\n"); 2616 return false; // Cannot resolve. 2617 } 2618 2619 SmallVector<Constant*, 8> Formals; 2620 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) 2621 Formals.push_back(getVal(*i)); 2622 2623 if (Callee->isDeclaration()) { 2624 // If this is a function we can constant fold, do it. 2625 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) { 2626 InstResult = C; 2627 DEBUG(dbgs() << "Constant folded function call. Result: " << 2628 *InstResult << "\n"); 2629 } else { 2630 DEBUG(dbgs() << "Can not constant fold function call.\n"); 2631 return false; 2632 } 2633 } else { 2634 if (Callee->getFunctionType()->isVarArg()) { 2635 DEBUG(dbgs() << "Can not constant fold vararg function call.\n"); 2636 return false; 2637 } 2638 2639 Constant *RetVal = 0; 2640 // Execute the call, if successful, use the return value. 2641 ValueStack.push_back(new DenseMap<Value*, Constant*>); 2642 if (!EvaluateFunction(Callee, RetVal, Formals)) { 2643 DEBUG(dbgs() << "Failed to evaluate function.\n"); 2644 return false; 2645 } 2646 delete ValueStack.pop_back_val(); 2647 InstResult = RetVal; 2648 2649 if (InstResult != NULL) { 2650 DEBUG(dbgs() << "Successfully evaluated function. Result: " << 2651 InstResult << "\n\n"); 2652 } else { 2653 DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n"); 2654 } 2655 } 2656 } else if (isa<TerminatorInst>(CurInst)) { 2657 DEBUG(dbgs() << "Found a terminator instruction.\n"); 2658 2659 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 2660 if (BI->isUnconditional()) { 2661 NextBB = BI->getSuccessor(0); 2662 } else { 2663 ConstantInt *Cond = 2664 dyn_cast<ConstantInt>(getVal(BI->getCondition())); 2665 if (!Cond) return false; // Cannot determine. 2666 2667 NextBB = BI->getSuccessor(!Cond->getZExtValue()); 2668 } 2669 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 2670 ConstantInt *Val = 2671 dyn_cast<ConstantInt>(getVal(SI->getCondition())); 2672 if (!Val) return false; // Cannot determine. 2673 NextBB = SI->findCaseValue(Val).getCaseSuccessor(); 2674 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { 2675 Value *Val = getVal(IBI->getAddress())->stripPointerCasts(); 2676 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) 2677 NextBB = BA->getBasicBlock(); 2678 else 2679 return false; // Cannot determine. 2680 } else if (isa<ReturnInst>(CurInst)) { 2681 NextBB = 0; 2682 } else { 2683 // invoke, unwind, resume, unreachable. 2684 DEBUG(dbgs() << "Can not handle terminator."); 2685 return false; // Cannot handle this terminator. 2686 } 2687 2688 // We succeeded at evaluating this block! 2689 DEBUG(dbgs() << "Successfully evaluated block.\n"); 2690 return true; 2691 } else { 2692 // Did not know how to evaluate this! 2693 DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction." 2694 "\n"); 2695 return false; 2696 } 2697 2698 if (!CurInst->use_empty()) { 2699 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) 2700 InstResult = ConstantFoldConstantExpression(CE, DL, TLI); 2701 2702 setVal(CurInst, InstResult); 2703 } 2704 2705 // If we just processed an invoke, we finished evaluating the block. 2706 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { 2707 NextBB = II->getNormalDest(); 2708 DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); 2709 return true; 2710 } 2711 2712 // Advance program counter. 2713 ++CurInst; 2714 } 2715} 2716 2717/// EvaluateFunction - Evaluate a call to function F, returning true if 2718/// successful, false if we can't evaluate it. ActualArgs contains the formal 2719/// arguments for the function. 2720bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, 2721 const SmallVectorImpl<Constant*> &ActualArgs) { 2722 // Check to see if this function is already executing (recursion). If so, 2723 // bail out. TODO: we might want to accept limited recursion. 2724 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 2725 return false; 2726 2727 CallStack.push_back(F); 2728 2729 // Initialize arguments to the incoming values specified. 2730 unsigned ArgNo = 0; 2731 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 2732 ++AI, ++ArgNo) 2733 setVal(AI, ActualArgs[ArgNo]); 2734 2735 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 2736 // we can only evaluate any one basic block at most once. This set keeps 2737 // track of what we have executed so we can detect recursive cases etc. 2738 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 2739 2740 // CurBB - The current basic block we're evaluating. 2741 BasicBlock *CurBB = F->begin(); 2742 2743 BasicBlock::iterator CurInst = CurBB->begin(); 2744 2745 while (1) { 2746 BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings. 2747 DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); 2748 2749 if (!EvaluateBlock(CurInst, NextBB)) 2750 return false; 2751 2752 if (NextBB == 0) { 2753 // Successfully running until there's no next block means that we found 2754 // the return. Fill it the return value and pop the call stack. 2755 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator()); 2756 if (RI->getNumOperands()) 2757 RetVal = getVal(RI->getOperand(0)); 2758 CallStack.pop_back(); 2759 return true; 2760 } 2761 2762 // Okay, we succeeded in evaluating this control flow. See if we have 2763 // executed the new block before. If so, we have a looping function, 2764 // which we cannot evaluate in reasonable time. 2765 if (!ExecutedBlocks.insert(NextBB)) 2766 return false; // looped! 2767 2768 // Okay, we have never been in this block before. Check to see if there 2769 // are any PHI nodes. If so, evaluate them with information about where 2770 // we came from. 2771 PHINode *PN = 0; 2772 for (CurInst = NextBB->begin(); 2773 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 2774 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB))); 2775 2776 // Advance to the next block. 2777 CurBB = NextBB; 2778 } 2779} 2780 2781/// EvaluateStaticConstructor - Evaluate static constructors in the function, if 2782/// we can. Return true if we can, false otherwise. 2783static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL, 2784 const TargetLibraryInfo *TLI) { 2785 // Call the function. 2786 Evaluator Eval(DL, TLI); 2787 Constant *RetValDummy; 2788 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 2789 SmallVector<Constant*, 0>()); 2790 2791 if (EvalSuccess) { 2792 // We succeeded at evaluation: commit the result. 2793 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2794 << F->getName() << "' to " << Eval.getMutatedMemory().size() 2795 << " stores.\n"); 2796 for (DenseMap<Constant*, Constant*>::const_iterator I = 2797 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end(); 2798 I != E; ++I) 2799 CommitValueTo(I->second, I->first); 2800 for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I = 2801 Eval.getInvariants().begin(), E = Eval.getInvariants().end(); 2802 I != E; ++I) 2803 (*I)->setConstant(true); 2804 } 2805 2806 return EvalSuccess; 2807} 2808 2809/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. 2810/// Return true if anything changed. 2811bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { 2812 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); 2813 bool MadeChange = false; 2814 if (Ctors.empty()) return false; 2815 2816 // Loop over global ctors, optimizing them when we can. 2817 for (unsigned i = 0; i != Ctors.size(); ++i) { 2818 Function *F = Ctors[i]; 2819 // Found a null terminator in the middle of the list, prune off the rest of 2820 // the list. 2821 if (F == 0) { 2822 if (i != Ctors.size()-1) { 2823 Ctors.resize(i+1); 2824 MadeChange = true; 2825 } 2826 break; 2827 } 2828 DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n"); 2829 2830 // We cannot simplify external ctor functions. 2831 if (F->empty()) continue; 2832 2833 // If we can evaluate the ctor at compile time, do. 2834 if (EvaluateStaticConstructor(F, DL, TLI)) { 2835 Ctors.erase(Ctors.begin()+i); 2836 MadeChange = true; 2837 --i; 2838 ++NumCtorsEvaluated; 2839 continue; 2840 } 2841 } 2842 2843 if (!MadeChange) return false; 2844 2845 GCL = InstallGlobalCtors(GCL, Ctors); 2846 return true; 2847} 2848 2849static int compareNames(Constant *const *A, Constant *const *B) { 2850 return (*A)->getName().compare((*B)->getName()); 2851} 2852 2853static void setUsedInitializer(GlobalVariable &V, 2854 SmallPtrSet<GlobalValue *, 8> Init) { 2855 if (Init.empty()) { 2856 V.eraseFromParent(); 2857 return; 2858 } 2859 2860 // Type of pointer to the array of pointers. 2861 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); 2862 2863 SmallVector<llvm::Constant *, 8> UsedArray; 2864 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Init.begin(), E = Init.end(); 2865 I != E; ++I) { 2866 Constant *Cast 2867 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(*I, Int8PtrTy); 2868 UsedArray.push_back(Cast); 2869 } 2870 // Sort to get deterministic order. 2871 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); 2872 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); 2873 2874 Module *M = V.getParent(); 2875 V.removeFromParent(); 2876 GlobalVariable *NV = 2877 new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage, 2878 llvm::ConstantArray::get(ATy, UsedArray), ""); 2879 NV->takeName(&V); 2880 NV->setSection("llvm.metadata"); 2881 delete &V; 2882} 2883 2884namespace { 2885/// \brief An easy to access representation of llvm.used and llvm.compiler.used. 2886class LLVMUsed { 2887 SmallPtrSet<GlobalValue *, 8> Used; 2888 SmallPtrSet<GlobalValue *, 8> CompilerUsed; 2889 GlobalVariable *UsedV; 2890 GlobalVariable *CompilerUsedV; 2891 2892public: 2893 LLVMUsed(Module &M) { 2894 UsedV = collectUsedGlobalVariables(M, Used, false); 2895 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); 2896 } 2897 typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator; 2898 iterator usedBegin() { return Used.begin(); } 2899 iterator usedEnd() { return Used.end(); } 2900 iterator compilerUsedBegin() { return CompilerUsed.begin(); } 2901 iterator compilerUsedEnd() { return CompilerUsed.end(); } 2902 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } 2903 bool compilerUsedCount(GlobalValue *GV) const { 2904 return CompilerUsed.count(GV); 2905 } 2906 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } 2907 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } 2908 bool usedInsert(GlobalValue *GV) { return Used.insert(GV); } 2909 bool compilerUsedInsert(GlobalValue *GV) { return CompilerUsed.insert(GV); } 2910 2911 void syncVariablesAndSets() { 2912 if (UsedV) 2913 setUsedInitializer(*UsedV, Used); 2914 if (CompilerUsedV) 2915 setUsedInitializer(*CompilerUsedV, CompilerUsed); 2916 } 2917}; 2918} 2919 2920static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { 2921 if (GA.use_empty()) // No use at all. 2922 return false; 2923 2924 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && 2925 "We should have removed the duplicated " 2926 "element from llvm.compiler.used"); 2927 if (!GA.hasOneUse()) 2928 // Strictly more than one use. So at least one is not in llvm.used and 2929 // llvm.compiler.used. 2930 return true; 2931 2932 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. 2933 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); 2934} 2935 2936static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, 2937 const LLVMUsed &U) { 2938 unsigned N = 2; 2939 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && 2940 "We should have removed the duplicated " 2941 "element from llvm.compiler.used"); 2942 if (U.usedCount(&V) || U.compilerUsedCount(&V)) 2943 ++N; 2944 return V.hasNUsesOrMore(N); 2945} 2946 2947static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { 2948 if (!GA.hasLocalLinkage()) 2949 return true; 2950 2951 return U.usedCount(&GA) || U.compilerUsedCount(&GA); 2952} 2953 2954static bool hasUsesToReplace(GlobalAlias &GA, LLVMUsed &U, bool &RenameTarget) { 2955 RenameTarget = false; 2956 bool Ret = false; 2957 if (hasUseOtherThanLLVMUsed(GA, U)) 2958 Ret = true; 2959 2960 // If the alias is externally visible, we may still be able to simplify it. 2961 if (!mayHaveOtherReferences(GA, U)) 2962 return Ret; 2963 2964 // If the aliasee has internal linkage, give it the name and linkage 2965 // of the alias, and delete the alias. This turns: 2966 // define internal ... @f(...) 2967 // @a = alias ... @f 2968 // into: 2969 // define ... @a(...) 2970 Constant *Aliasee = GA.getAliasee(); 2971 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2972 if (!Target->hasLocalLinkage()) 2973 return Ret; 2974 2975 // Do not perform the transform if multiple aliases potentially target the 2976 // aliasee. This check also ensures that it is safe to replace the section 2977 // and other attributes of the aliasee with those of the alias. 2978 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) 2979 return Ret; 2980 2981 RenameTarget = true; 2982 return true; 2983} 2984 2985bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 2986 bool Changed = false; 2987 LLVMUsed Used(M); 2988 2989 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Used.usedBegin(), 2990 E = Used.usedEnd(); 2991 I != E; ++I) 2992 Used.compilerUsedErase(*I); 2993 2994 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2995 I != E;) { 2996 Module::alias_iterator J = I++; 2997 // Aliases without names cannot be referenced outside this module. 2998 if (!J->hasName() && !J->isDeclaration()) 2999 J->setLinkage(GlobalValue::InternalLinkage); 3000 // If the aliasee may change at link time, nothing can be done - bail out. 3001 if (J->mayBeOverridden()) 3002 continue; 3003 3004 Constant *Aliasee = J->getAliasee(); 3005 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 3006 Target->removeDeadConstantUsers(); 3007 3008 // Make all users of the alias use the aliasee instead. 3009 bool RenameTarget; 3010 if (!hasUsesToReplace(*J, Used, RenameTarget)) 3011 continue; 3012 3013 J->replaceAllUsesWith(Aliasee); 3014 ++NumAliasesResolved; 3015 Changed = true; 3016 3017 if (RenameTarget) { 3018 // Give the aliasee the name, linkage and other attributes of the alias. 3019 Target->takeName(J); 3020 Target->setLinkage(J->getLinkage()); 3021 Target->setVisibility(J->getVisibility()); 3022 Target->setDLLStorageClass(J->getDLLStorageClass()); 3023 3024 if (Used.usedErase(J)) 3025 Used.usedInsert(Target); 3026 3027 if (Used.compilerUsedErase(J)) 3028 Used.compilerUsedInsert(Target); 3029 } else if (mayHaveOtherReferences(*J, Used)) 3030 continue; 3031 3032 // Delete the alias. 3033 M.getAliasList().erase(J); 3034 ++NumAliasesRemoved; 3035 Changed = true; 3036 } 3037 3038 Used.syncVariablesAndSets(); 3039 3040 return Changed; 3041} 3042 3043static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { 3044 if (!TLI->has(LibFunc::cxa_atexit)) 3045 return 0; 3046 3047 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit)); 3048 3049 if (!Fn) 3050 return 0; 3051 3052 FunctionType *FTy = Fn->getFunctionType(); 3053 3054 // Checking that the function has the right return type, the right number of 3055 // parameters and that they all have pointer types should be enough. 3056 if (!FTy->getReturnType()->isIntegerTy() || 3057 FTy->getNumParams() != 3 || 3058 !FTy->getParamType(0)->isPointerTy() || 3059 !FTy->getParamType(1)->isPointerTy() || 3060 !FTy->getParamType(2)->isPointerTy()) 3061 return 0; 3062 3063 return Fn; 3064} 3065 3066/// cxxDtorIsEmpty - Returns whether the given function is an empty C++ 3067/// destructor and can therefore be eliminated. 3068/// Note that we assume that other optimization passes have already simplified 3069/// the code so we only look for a function with a single basic block, where 3070/// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and 3071/// other side-effect free instructions. 3072static bool cxxDtorIsEmpty(const Function &Fn, 3073 SmallPtrSet<const Function *, 8> &CalledFunctions) { 3074 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 3075 // nounwind, but that doesn't seem worth doing. 3076 if (Fn.isDeclaration()) 3077 return false; 3078 3079 if (++Fn.begin() != Fn.end()) 3080 return false; 3081 3082 const BasicBlock &EntryBlock = Fn.getEntryBlock(); 3083 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); 3084 I != E; ++I) { 3085 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 3086 // Ignore debug intrinsics. 3087 if (isa<DbgInfoIntrinsic>(CI)) 3088 continue; 3089 3090 const Function *CalledFn = CI->getCalledFunction(); 3091 3092 if (!CalledFn) 3093 return false; 3094 3095 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); 3096 3097 // Don't treat recursive functions as empty. 3098 if (!NewCalledFunctions.insert(CalledFn)) 3099 return false; 3100 3101 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) 3102 return false; 3103 } else if (isa<ReturnInst>(*I)) 3104 return true; // We're done. 3105 else if (I->mayHaveSideEffects()) 3106 return false; // Destructor with side effects, bail. 3107 } 3108 3109 return false; 3110} 3111 3112bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 3113 /// Itanium C++ ABI p3.3.5: 3114 /// 3115 /// After constructing a global (or local static) object, that will require 3116 /// destruction on exit, a termination function is registered as follows: 3117 /// 3118 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 3119 /// 3120 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 3121 /// call f(p) when DSO d is unloaded, before all such termination calls 3122 /// registered before this one. It returns zero if registration is 3123 /// successful, nonzero on failure. 3124 3125 // This pass will look for calls to __cxa_atexit where the function is trivial 3126 // and remove them. 3127 bool Changed = false; 3128 3129 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end(); 3130 I != E;) { 3131 // We're only interested in calls. Theoretically, we could handle invoke 3132 // instructions as well, but neither llvm-gcc nor clang generate invokes 3133 // to __cxa_atexit. 3134 CallInst *CI = dyn_cast<CallInst>(*I++); 3135 if (!CI) 3136 continue; 3137 3138 Function *DtorFn = 3139 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 3140 if (!DtorFn) 3141 continue; 3142 3143 SmallPtrSet<const Function *, 8> CalledFunctions; 3144 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) 3145 continue; 3146 3147 // Just remove the call. 3148 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 3149 CI->eraseFromParent(); 3150 3151 ++NumCXXDtorsRemoved; 3152 3153 Changed |= true; 3154 } 3155 3156 return Changed; 3157} 3158 3159bool GlobalOpt::runOnModule(Module &M) { 3160 bool Changed = false; 3161 3162 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 3163 DL = DLP ? &DLP->getDataLayout() : 0; 3164 TLI = &getAnalysis<TargetLibraryInfo>(); 3165 3166 // Try to find the llvm.globalctors list. 3167 GlobalVariable *GlobalCtors = FindGlobalCtors(M); 3168 3169 bool LocalChange = true; 3170 while (LocalChange) { 3171 LocalChange = false; 3172 3173 // Delete functions that are trivially dead, ccc -> fastcc 3174 LocalChange |= OptimizeFunctions(M); 3175 3176 // Optimize global_ctors list. 3177 if (GlobalCtors) 3178 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); 3179 3180 // Optimize non-address-taken globals. 3181 LocalChange |= OptimizeGlobalVars(M); 3182 3183 // Resolve aliases, when possible. 3184 LocalChange |= OptimizeGlobalAliases(M); 3185 3186 // Try to remove trivial global destructors if they are not removed 3187 // already. 3188 Function *CXAAtExitFn = FindCXAAtExit(M, TLI); 3189 if (CXAAtExitFn) 3190 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 3191 3192 Changed |= LocalChange; 3193 } 3194 3195 // TODO: Move all global ctors functions to the end of the module for code 3196 // layout. 3197 3198 return Changed; 3199} 3200