GVN.cpp revision de98568bf8adfbfd3c7437c7cdfa617efc7147f3
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/GlobalVariable.h" 21#include "llvm/IntrinsicInst.h" 22#include "llvm/LLVMContext.h" 23#include "llvm/Analysis/AliasAnalysis.h" 24#include "llvm/Analysis/ConstantFolding.h" 25#include "llvm/Analysis/Dominators.h" 26#include "llvm/Analysis/InstructionSimplify.h" 27#include "llvm/Analysis/Loads.h" 28#include "llvm/Analysis/MemoryBuiltins.h" 29#include "llvm/Analysis/MemoryDependenceAnalysis.h" 30#include "llvm/Analysis/PHITransAddr.h" 31#include "llvm/Analysis/ValueTracking.h" 32#include "llvm/Assembly/Writer.h" 33#include "llvm/Target/TargetData.h" 34#include "llvm/Transforms/Utils/BasicBlockUtils.h" 35#include "llvm/Transforms/Utils/SSAUpdater.h" 36#include "llvm/ADT/DenseMap.h" 37#include "llvm/ADT/DepthFirstIterator.h" 38#include "llvm/ADT/SmallPtrSet.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/Support/Allocator.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/IRBuilder.h" 44using namespace llvm; 45 46STATISTIC(NumGVNInstr, "Number of instructions deleted"); 47STATISTIC(NumGVNLoad, "Number of loads deleted"); 48STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 49STATISTIC(NumGVNBlocks, "Number of blocks merged"); 50STATISTIC(NumPRELoad, "Number of loads PRE'd"); 51 52static cl::opt<bool> EnablePRE("enable-pre", 53 cl::init(true), cl::Hidden); 54static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 55 56//===----------------------------------------------------------------------===// 57// ValueTable Class 58//===----------------------------------------------------------------------===// 59 60/// This class holds the mapping between values and value numbers. It is used 61/// as an efficient mechanism to determine the expression-wise equivalence of 62/// two values. 63namespace { 64 struct Expression { 65 uint32_t opcode; 66 const Type *type; 67 SmallVector<uint32_t, 4> varargs; 68 69 Expression(uint32_t o = ~2U) : opcode(o) { } 70 71 bool operator==(const Expression &other) const { 72 if (opcode != other.opcode) 73 return false; 74 if (opcode == ~0U || opcode == ~1U) 75 return true; 76 if (type != other.type) 77 return false; 78 if (varargs != other.varargs) 79 return false; 80 return true; 81 } 82 }; 83 84 class ValueTable { 85 DenseMap<Value*, uint32_t> valueNumbering; 86 DenseMap<Expression, uint32_t> expressionNumbering; 87 AliasAnalysis *AA; 88 MemoryDependenceAnalysis *MD; 89 DominatorTree *DT; 90 91 uint32_t nextValueNumber; 92 93 Expression create_expression(Instruction* I); 94 uint32_t lookup_or_add_call(CallInst* C); 95 public: 96 ValueTable() : nextValueNumber(1) { } 97 uint32_t lookup_or_add(Value *V); 98 uint32_t lookup(Value *V) const; 99 void add(Value *V, uint32_t num); 100 void clear(); 101 void erase(Value *v); 102 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 103 AliasAnalysis *getAliasAnalysis() const { return AA; } 104 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 105 void setDomTree(DominatorTree* D) { DT = D; } 106 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 107 void verifyRemoved(const Value *) const; 108 }; 109} 110 111namespace llvm { 112template <> struct DenseMapInfo<Expression> { 113 static inline Expression getEmptyKey() { 114 return ~0U; 115 } 116 117 static inline Expression getTombstoneKey() { 118 return ~1U; 119 } 120 121 static unsigned getHashValue(const Expression e) { 122 unsigned hash = e.opcode; 123 124 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 125 (unsigned)((uintptr_t)e.type >> 9)); 126 127 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 128 E = e.varargs.end(); I != E; ++I) 129 hash = *I + hash * 37; 130 131 return hash; 132 } 133 static bool isEqual(const Expression &LHS, const Expression &RHS) { 134 return LHS == RHS; 135 } 136}; 137 138} 139 140//===----------------------------------------------------------------------===// 141// ValueTable Internal Functions 142//===----------------------------------------------------------------------===// 143 144 145Expression ValueTable::create_expression(Instruction *I) { 146 Expression e; 147 e.type = I->getType(); 148 e.opcode = I->getOpcode(); 149 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); 150 OI != OE; ++OI) 151 e.varargs.push_back(lookup_or_add(*OI)); 152 153 if (CmpInst *C = dyn_cast<CmpInst>(I)) 154 e.opcode = (C->getOpcode() << 8) | C->getPredicate(); 155 else if (ExtractValueInst *E = dyn_cast<ExtractValueInst>(I)) { 156 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 157 II != IE; ++II) 158 e.varargs.push_back(*II); 159 } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { 160 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 161 II != IE; ++II) 162 e.varargs.push_back(*II); 163 } 164 165 return e; 166} 167 168//===----------------------------------------------------------------------===// 169// ValueTable External Functions 170//===----------------------------------------------------------------------===// 171 172/// add - Insert a value into the table with a specified value number. 173void ValueTable::add(Value *V, uint32_t num) { 174 valueNumbering.insert(std::make_pair(V, num)); 175} 176 177uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 178 if (AA->doesNotAccessMemory(C)) { 179 Expression exp = create_expression(C); 180 uint32_t& e = expressionNumbering[exp]; 181 if (!e) e = nextValueNumber++; 182 valueNumbering[C] = e; 183 return e; 184 } else if (AA->onlyReadsMemory(C)) { 185 Expression exp = create_expression(C); 186 uint32_t& e = expressionNumbering[exp]; 187 if (!e) { 188 e = nextValueNumber++; 189 valueNumbering[C] = e; 190 return e; 191 } 192 if (!MD) { 193 e = nextValueNumber++; 194 valueNumbering[C] = e; 195 return e; 196 } 197 198 MemDepResult local_dep = MD->getDependency(C); 199 200 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 201 valueNumbering[C] = nextValueNumber; 202 return nextValueNumber++; 203 } 204 205 if (local_dep.isDef()) { 206 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 207 208 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 209 valueNumbering[C] = nextValueNumber; 210 return nextValueNumber++; 211 } 212 213 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 214 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 215 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 216 if (c_vn != cd_vn) { 217 valueNumbering[C] = nextValueNumber; 218 return nextValueNumber++; 219 } 220 } 221 222 uint32_t v = lookup_or_add(local_cdep); 223 valueNumbering[C] = v; 224 return v; 225 } 226 227 // Non-local case. 228 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 229 MD->getNonLocalCallDependency(CallSite(C)); 230 // FIXME: call/call dependencies for readonly calls should return def, not 231 // clobber! Move the checking logic to MemDep! 232 CallInst* cdep = 0; 233 234 // Check to see if we have a single dominating call instruction that is 235 // identical to C. 236 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 237 const NonLocalDepEntry *I = &deps[i]; 238 // Ignore non-local dependencies. 239 if (I->getResult().isNonLocal()) 240 continue; 241 242 // We don't handle non-depedencies. If we already have a call, reject 243 // instruction dependencies. 244 if (I->getResult().isClobber() || cdep != 0) { 245 cdep = 0; 246 break; 247 } 248 249 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 250 // FIXME: All duplicated with non-local case. 251 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 252 cdep = NonLocalDepCall; 253 continue; 254 } 255 256 cdep = 0; 257 break; 258 } 259 260 if (!cdep) { 261 valueNumbering[C] = nextValueNumber; 262 return nextValueNumber++; 263 } 264 265 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 266 valueNumbering[C] = nextValueNumber; 267 return nextValueNumber++; 268 } 269 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 270 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 271 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 272 if (c_vn != cd_vn) { 273 valueNumbering[C] = nextValueNumber; 274 return nextValueNumber++; 275 } 276 } 277 278 uint32_t v = lookup_or_add(cdep); 279 valueNumbering[C] = v; 280 return v; 281 282 } else { 283 valueNumbering[C] = nextValueNumber; 284 return nextValueNumber++; 285 } 286} 287 288/// lookup_or_add - Returns the value number for the specified value, assigning 289/// it a new number if it did not have one before. 290uint32_t ValueTable::lookup_or_add(Value *V) { 291 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 292 if (VI != valueNumbering.end()) 293 return VI->second; 294 295 if (!isa<Instruction>(V)) { 296 valueNumbering[V] = nextValueNumber; 297 return nextValueNumber++; 298 } 299 300 Instruction* I = cast<Instruction>(V); 301 Expression exp; 302 switch (I->getOpcode()) { 303 case Instruction::Call: 304 return lookup_or_add_call(cast<CallInst>(I)); 305 case Instruction::Add: 306 case Instruction::FAdd: 307 case Instruction::Sub: 308 case Instruction::FSub: 309 case Instruction::Mul: 310 case Instruction::FMul: 311 case Instruction::UDiv: 312 case Instruction::SDiv: 313 case Instruction::FDiv: 314 case Instruction::URem: 315 case Instruction::SRem: 316 case Instruction::FRem: 317 case Instruction::Shl: 318 case Instruction::LShr: 319 case Instruction::AShr: 320 case Instruction::And: 321 case Instruction::Or : 322 case Instruction::Xor: 323 case Instruction::ICmp: 324 case Instruction::FCmp: 325 case Instruction::Trunc: 326 case Instruction::ZExt: 327 case Instruction::SExt: 328 case Instruction::FPToUI: 329 case Instruction::FPToSI: 330 case Instruction::UIToFP: 331 case Instruction::SIToFP: 332 case Instruction::FPTrunc: 333 case Instruction::FPExt: 334 case Instruction::PtrToInt: 335 case Instruction::IntToPtr: 336 case Instruction::BitCast: 337 case Instruction::Select: 338 case Instruction::ExtractElement: 339 case Instruction::InsertElement: 340 case Instruction::ShuffleVector: 341 case Instruction::ExtractValue: 342 case Instruction::InsertValue: 343 case Instruction::GetElementPtr: 344 exp = create_expression(I); 345 break; 346 default: 347 valueNumbering[V] = nextValueNumber; 348 return nextValueNumber++; 349 } 350 351 uint32_t& e = expressionNumbering[exp]; 352 if (!e) e = nextValueNumber++; 353 valueNumbering[V] = e; 354 return e; 355} 356 357/// lookup - Returns the value number of the specified value. Fails if 358/// the value has not yet been numbered. 359uint32_t ValueTable::lookup(Value *V) const { 360 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 361 assert(VI != valueNumbering.end() && "Value not numbered?"); 362 return VI->second; 363} 364 365/// clear - Remove all entries from the ValueTable. 366void ValueTable::clear() { 367 valueNumbering.clear(); 368 expressionNumbering.clear(); 369 nextValueNumber = 1; 370} 371 372/// erase - Remove a value from the value numbering. 373void ValueTable::erase(Value *V) { 374 valueNumbering.erase(V); 375} 376 377/// verifyRemoved - Verify that the value is removed from all internal data 378/// structures. 379void ValueTable::verifyRemoved(const Value *V) const { 380 for (DenseMap<Value*, uint32_t>::const_iterator 381 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 382 assert(I->first != V && "Inst still occurs in value numbering map!"); 383 } 384} 385 386//===----------------------------------------------------------------------===// 387// GVN Pass 388//===----------------------------------------------------------------------===// 389 390namespace { 391 392 class GVN : public FunctionPass { 393 bool NoLoads; 394 MemoryDependenceAnalysis *MD; 395 DominatorTree *DT; 396 const TargetData *TD; 397 398 ValueTable VN; 399 400 /// LeaderTable - A mapping from value numbers to lists of Value*'s that 401 /// have that value number. Use findLeader to query it. 402 struct LeaderTableEntry { 403 Value *Val; 404 BasicBlock *BB; 405 LeaderTableEntry *Next; 406 }; 407 DenseMap<uint32_t, LeaderTableEntry> LeaderTable; 408 BumpPtrAllocator TableAllocator; 409 410 SmallVector<Instruction*, 8> InstrsToErase; 411 public: 412 static char ID; // Pass identification, replacement for typeid 413 explicit GVN(bool noloads = false) 414 : FunctionPass(ID), NoLoads(noloads), MD(0) { 415 initializeGVNPass(*PassRegistry::getPassRegistry()); 416 } 417 418 bool runOnFunction(Function &F); 419 420 /// markInstructionForDeletion - This removes the specified instruction from 421 /// our various maps and marks it for deletion. 422 void markInstructionForDeletion(Instruction *I) { 423 VN.erase(I); 424 InstrsToErase.push_back(I); 425 } 426 427 const TargetData *getTargetData() const { return TD; } 428 DominatorTree &getDominatorTree() const { return *DT; } 429 AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } 430 MemoryDependenceAnalysis &getMemDep() const { return *MD; } 431 private: 432 /// addToLeaderTable - Push a new Value to the LeaderTable onto the list for 433 /// its value number. 434 void addToLeaderTable(uint32_t N, Value *V, BasicBlock *BB) { 435 LeaderTableEntry &Curr = LeaderTable[N]; 436 if (!Curr.Val) { 437 Curr.Val = V; 438 Curr.BB = BB; 439 return; 440 } 441 442 LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>(); 443 Node->Val = V; 444 Node->BB = BB; 445 Node->Next = Curr.Next; 446 Curr.Next = Node; 447 } 448 449 /// removeFromLeaderTable - Scan the list of values corresponding to a given 450 /// value number, and remove the given value if encountered. 451 void removeFromLeaderTable(uint32_t N, Value *V, BasicBlock *BB) { 452 LeaderTableEntry* Prev = 0; 453 LeaderTableEntry* Curr = &LeaderTable[N]; 454 455 while (Curr->Val != V || Curr->BB != BB) { 456 Prev = Curr; 457 Curr = Curr->Next; 458 } 459 460 if (Prev) { 461 Prev->Next = Curr->Next; 462 } else { 463 if (!Curr->Next) { 464 Curr->Val = 0; 465 Curr->BB = 0; 466 } else { 467 LeaderTableEntry* Next = Curr->Next; 468 Curr->Val = Next->Val; 469 Curr->BB = Next->BB; 470 Curr->Next = Next->Next; 471 } 472 } 473 } 474 475 // List of critical edges to be split between iterations. 476 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 477 478 // This transformation requires dominator postdominator info 479 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 480 AU.addRequired<DominatorTree>(); 481 if (!NoLoads) 482 AU.addRequired<MemoryDependenceAnalysis>(); 483 AU.addRequired<AliasAnalysis>(); 484 485 AU.addPreserved<DominatorTree>(); 486 AU.addPreserved<AliasAnalysis>(); 487 } 488 489 490 // Helper fuctions 491 // FIXME: eliminate or document these better 492 bool processLoad(LoadInst *L); 493 bool processInstruction(Instruction *I); 494 bool processNonLocalLoad(LoadInst *L); 495 bool processBlock(BasicBlock *BB); 496 void dump(DenseMap<uint32_t, Value*> &d); 497 bool iterateOnFunction(Function &F); 498 bool performPRE(Function &F); 499 Value *findLeader(BasicBlock *BB, uint32_t num); 500 void cleanupGlobalSets(); 501 void verifyRemoved(const Instruction *I) const; 502 bool splitCriticalEdges(); 503 }; 504 505 char GVN::ID = 0; 506} 507 508// createGVNPass - The public interface to this file... 509FunctionPass *llvm::createGVNPass(bool NoLoads) { 510 return new GVN(NoLoads); 511} 512 513INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false) 514INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 515INITIALIZE_PASS_DEPENDENCY(DominatorTree) 516INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 517INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) 518 519void GVN::dump(DenseMap<uint32_t, Value*>& d) { 520 errs() << "{\n"; 521 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 522 E = d.end(); I != E; ++I) { 523 errs() << I->first << "\n"; 524 I->second->dump(); 525 } 526 errs() << "}\n"; 527} 528 529/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 530/// we're analyzing is fully available in the specified block. As we go, keep 531/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 532/// map is actually a tri-state map with the following values: 533/// 0) we know the block *is not* fully available. 534/// 1) we know the block *is* fully available. 535/// 2) we do not know whether the block is fully available or not, but we are 536/// currently speculating that it will be. 537/// 3) we are speculating for this block and have used that to speculate for 538/// other blocks. 539static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 540 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 541 // Optimistically assume that the block is fully available and check to see 542 // if we already know about this block in one lookup. 543 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 544 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 545 546 // If the entry already existed for this block, return the precomputed value. 547 if (!IV.second) { 548 // If this is a speculative "available" value, mark it as being used for 549 // speculation of other blocks. 550 if (IV.first->second == 2) 551 IV.first->second = 3; 552 return IV.first->second != 0; 553 } 554 555 // Otherwise, see if it is fully available in all predecessors. 556 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 557 558 // If this block has no predecessors, it isn't live-in here. 559 if (PI == PE) 560 goto SpeculationFailure; 561 562 for (; PI != PE; ++PI) 563 // If the value isn't fully available in one of our predecessors, then it 564 // isn't fully available in this block either. Undo our previous 565 // optimistic assumption and bail out. 566 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 567 goto SpeculationFailure; 568 569 return true; 570 571// SpeculationFailure - If we get here, we found out that this is not, after 572// all, a fully-available block. We have a problem if we speculated on this and 573// used the speculation to mark other blocks as available. 574SpeculationFailure: 575 char &BBVal = FullyAvailableBlocks[BB]; 576 577 // If we didn't speculate on this, just return with it set to false. 578 if (BBVal == 2) { 579 BBVal = 0; 580 return false; 581 } 582 583 // If we did speculate on this value, we could have blocks set to 1 that are 584 // incorrect. Walk the (transitive) successors of this block and mark them as 585 // 0 if set to one. 586 SmallVector<BasicBlock*, 32> BBWorklist; 587 BBWorklist.push_back(BB); 588 589 do { 590 BasicBlock *Entry = BBWorklist.pop_back_val(); 591 // Note that this sets blocks to 0 (unavailable) if they happen to not 592 // already be in FullyAvailableBlocks. This is safe. 593 char &EntryVal = FullyAvailableBlocks[Entry]; 594 if (EntryVal == 0) continue; // Already unavailable. 595 596 // Mark as unavailable. 597 EntryVal = 0; 598 599 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 600 BBWorklist.push_back(*I); 601 } while (!BBWorklist.empty()); 602 603 return false; 604} 605 606 607/// CanCoerceMustAliasedValueToLoad - Return true if 608/// CoerceAvailableValueToLoadType will succeed. 609static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 610 const Type *LoadTy, 611 const TargetData &TD) { 612 // If the loaded or stored value is an first class array or struct, don't try 613 // to transform them. We need to be able to bitcast to integer. 614 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 615 StoredVal->getType()->isStructTy() || 616 StoredVal->getType()->isArrayTy()) 617 return false; 618 619 // The store has to be at least as big as the load. 620 if (TD.getTypeSizeInBits(StoredVal->getType()) < 621 TD.getTypeSizeInBits(LoadTy)) 622 return false; 623 624 return true; 625} 626 627 628/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 629/// then a load from a must-aliased pointer of a different type, try to coerce 630/// the stored value. LoadedTy is the type of the load we want to replace and 631/// InsertPt is the place to insert new instructions. 632/// 633/// If we can't do it, return null. 634static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 635 const Type *LoadedTy, 636 Instruction *InsertPt, 637 const TargetData &TD) { 638 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 639 return 0; 640 641 // If this is already the right type, just return it. 642 const Type *StoredValTy = StoredVal->getType(); 643 644 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy); 645 uint64_t LoadSize = TD.getTypeStoreSizeInBits(LoadedTy); 646 647 // If the store and reload are the same size, we can always reuse it. 648 if (StoreSize == LoadSize) { 649 // Pointer to Pointer -> use bitcast. 650 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) 651 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 652 653 // Convert source pointers to integers, which can be bitcast. 654 if (StoredValTy->isPointerTy()) { 655 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 656 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 657 } 658 659 const Type *TypeToCastTo = LoadedTy; 660 if (TypeToCastTo->isPointerTy()) 661 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 662 663 if (StoredValTy != TypeToCastTo) 664 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 665 666 // Cast to pointer if the load needs a pointer type. 667 if (LoadedTy->isPointerTy()) 668 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 669 670 return StoredVal; 671 } 672 673 // If the loaded value is smaller than the available value, then we can 674 // extract out a piece from it. If the available value is too small, then we 675 // can't do anything. 676 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 677 678 // Convert source pointers to integers, which can be manipulated. 679 if (StoredValTy->isPointerTy()) { 680 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 681 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 682 } 683 684 // Convert vectors and fp to integer, which can be manipulated. 685 if (!StoredValTy->isIntegerTy()) { 686 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 687 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 688 } 689 690 // If this is a big-endian system, we need to shift the value down to the low 691 // bits so that a truncate will work. 692 if (TD.isBigEndian()) { 693 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 694 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 695 } 696 697 // Truncate the integer to the right size now. 698 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 699 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 700 701 if (LoadedTy == NewIntTy) 702 return StoredVal; 703 704 // If the result is a pointer, inttoptr. 705 if (LoadedTy->isPointerTy()) 706 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 707 708 // Otherwise, bitcast. 709 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 710} 711 712/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 713/// memdep query of a load that ends up being a clobbering memory write (store, 714/// memset, memcpy, memmove). This means that the write *may* provide bits used 715/// by the load but we can't be sure because the pointers don't mustalias. 716/// 717/// Check this case to see if there is anything more we can do before we give 718/// up. This returns -1 if we have to give up, or a byte number in the stored 719/// value of the piece that feeds the load. 720static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 721 Value *WritePtr, 722 uint64_t WriteSizeInBits, 723 const TargetData &TD) { 724 // If the loaded or stored value is an first class array or struct, don't try 725 // to transform them. We need to be able to bitcast to integer. 726 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 727 return -1; 728 729 int64_t StoreOffset = 0, LoadOffset = 0; 730 Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD); 731 Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 732 if (StoreBase != LoadBase) 733 return -1; 734 735 // If the load and store are to the exact same address, they should have been 736 // a must alias. AA must have gotten confused. 737 // FIXME: Study to see if/when this happens. One case is forwarding a memset 738 // to a load from the base of the memset. 739#if 0 740 if (LoadOffset == StoreOffset) { 741 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 742 << "Base = " << *StoreBase << "\n" 743 << "Store Ptr = " << *WritePtr << "\n" 744 << "Store Offs = " << StoreOffset << "\n" 745 << "Load Ptr = " << *LoadPtr << "\n"; 746 abort(); 747 } 748#endif 749 750 // If the load and store don't overlap at all, the store doesn't provide 751 // anything to the load. In this case, they really don't alias at all, AA 752 // must have gotten confused. 753 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 754 755 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 756 return -1; 757 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 758 LoadSize >>= 3; 759 760 761 bool isAAFailure = false; 762 if (StoreOffset < LoadOffset) 763 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 764 else 765 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 766 767 if (isAAFailure) { 768#if 0 769 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 770 << "Base = " << *StoreBase << "\n" 771 << "Store Ptr = " << *WritePtr << "\n" 772 << "Store Offs = " << StoreOffset << "\n" 773 << "Load Ptr = " << *LoadPtr << "\n"; 774 abort(); 775#endif 776 return -1; 777 } 778 779 // If the Load isn't completely contained within the stored bits, we don't 780 // have all the bits to feed it. We could do something crazy in the future 781 // (issue a smaller load then merge the bits in) but this seems unlikely to be 782 // valuable. 783 if (StoreOffset > LoadOffset || 784 StoreOffset+StoreSize < LoadOffset+LoadSize) 785 return -1; 786 787 // Okay, we can do this transformation. Return the number of bytes into the 788 // store that the load is. 789 return LoadOffset-StoreOffset; 790} 791 792/// AnalyzeLoadFromClobberingStore - This function is called when we have a 793/// memdep query of a load that ends up being a clobbering store. 794static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 795 StoreInst *DepSI, 796 const TargetData &TD) { 797 // Cannot handle reading from store of first-class aggregate yet. 798 if (DepSI->getValueOperand()->getType()->isStructTy() || 799 DepSI->getValueOperand()->getType()->isArrayTy()) 800 return -1; 801 802 Value *StorePtr = DepSI->getPointerOperand(); 803 uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType()); 804 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 805 StorePtr, StoreSize, TD); 806} 807 808/// AnalyzeLoadFromClobberingLoad - This function is called when we have a 809/// memdep query of a load that ends up being clobbered by another load. See if 810/// the other load can feed into the second load. 811static int AnalyzeLoadFromClobberingLoad(const Type *LoadTy, Value *LoadPtr, 812 LoadInst *DepLI, const TargetData &TD){ 813 // Cannot handle reading from store of first-class aggregate yet. 814 if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) 815 return -1; 816 817 Value *DepPtr = DepLI->getPointerOperand(); 818 uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType()); 819 int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD); 820 if (R != -1) return R; 821 822 // If we have a load/load clobber an DepLI can be widened to cover this load, 823 // then we should widen it! 824 int64_t LoadOffs = 0; 825 const Value *LoadBase = 826 GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, TD); 827 unsigned LoadSize = TD.getTypeStoreSize(LoadTy); 828 829 unsigned Size = MemoryDependenceAnalysis:: 830 getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD); 831 if (Size == 0) return -1; 832 833 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD); 834} 835 836 837 838static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 839 MemIntrinsic *MI, 840 const TargetData &TD) { 841 // If the mem operation is a non-constant size, we can't handle it. 842 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 843 if (SizeCst == 0) return -1; 844 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 845 846 // If this is memset, we just need to see if the offset is valid in the size 847 // of the memset.. 848 if (MI->getIntrinsicID() == Intrinsic::memset) 849 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 850 MemSizeInBits, TD); 851 852 // If we have a memcpy/memmove, the only case we can handle is if this is a 853 // copy from constant memory. In that case, we can read directly from the 854 // constant memory. 855 MemTransferInst *MTI = cast<MemTransferInst>(MI); 856 857 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 858 if (Src == 0) return -1; 859 860 GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD)); 861 if (GV == 0 || !GV->isConstant()) return -1; 862 863 // See if the access is within the bounds of the transfer. 864 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 865 MI->getDest(), MemSizeInBits, TD); 866 if (Offset == -1) 867 return Offset; 868 869 // Otherwise, see if we can constant fold a load from the constant with the 870 // offset applied as appropriate. 871 Src = ConstantExpr::getBitCast(Src, 872 llvm::Type::getInt8PtrTy(Src->getContext())); 873 Constant *OffsetCst = 874 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 875 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 876 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 877 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 878 return Offset; 879 return -1; 880} 881 882 883/// GetStoreValueForLoad - This function is called when we have a 884/// memdep query of a load that ends up being a clobbering store. This means 885/// that the store provides bits used by the load but we the pointers don't 886/// mustalias. Check this case to see if there is anything more we can do 887/// before we give up. 888static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 889 const Type *LoadTy, 890 Instruction *InsertPt, const TargetData &TD){ 891 LLVMContext &Ctx = SrcVal->getType()->getContext(); 892 893 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 894 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 895 896 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 897 898 // Compute which bits of the stored value are being used by the load. Convert 899 // to an integer type to start with. 900 if (SrcVal->getType()->isPointerTy()) 901 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 902 if (!SrcVal->getType()->isIntegerTy()) 903 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 904 "tmp"); 905 906 // Shift the bits to the least significant depending on endianness. 907 unsigned ShiftAmt; 908 if (TD.isLittleEndian()) 909 ShiftAmt = Offset*8; 910 else 911 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 912 913 if (ShiftAmt) 914 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 915 916 if (LoadSize != StoreSize) 917 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 918 "tmp"); 919 920 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 921} 922 923/// GetStoreValueForLoad - This function is called when we have a 924/// memdep query of a load that ends up being a clobbering load. This means 925/// that the load *may* provide bits used by the load but we can't be sure 926/// because the pointers don't mustalias. Check this case to see if there is 927/// anything more we can do before we give up. 928static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, 929 const Type *LoadTy, Instruction *InsertPt, 930 GVN &gvn) { 931 const TargetData &TD = *gvn.getTargetData(); 932 // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to 933 // widen SrcVal out to a larger load. 934 unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType()); 935 unsigned LoadSize = TD.getTypeStoreSize(LoadTy); 936 if (Offset+LoadSize > SrcValSize) { 937 assert(!SrcVal->isVolatile() && "Cannot widen volatile load!"); 938 assert(isa<IntegerType>(SrcVal->getType())&&"Can't widen non-integer load"); 939 // If we have a load/load clobber an DepLI can be widened to cover this 940 // load, then we should widen it to the next power of 2 size big enough! 941 unsigned NewLoadSize = Offset+LoadSize; 942 if (!isPowerOf2_32(NewLoadSize)) 943 NewLoadSize = NextPowerOf2(NewLoadSize); 944 945 Value *PtrVal = SrcVal->getPointerOperand(); 946 947 // Insert the new load after the old load. This ensures that subsequent 948 // memdep queries will find the new load. We can't easily remove the old 949 // load completely because it is already in the value numbering table. 950 IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal)); 951 const Type *DestPTy = 952 IntegerType::get(LoadTy->getContext(), NewLoadSize*8); 953 DestPTy = PointerType::get(DestPTy, 954 cast<PointerType>(PtrVal->getType())->getAddressSpace()); 955 Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc()); 956 PtrVal = Builder.CreateBitCast(PtrVal, DestPTy); 957 LoadInst *NewLoad = Builder.CreateLoad(PtrVal); 958 NewLoad->takeName(SrcVal); 959 NewLoad->setAlignment(SrcVal->getAlignment()); 960 961 DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); 962 DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); 963 964 // Replace uses of the original load with the wider load. On a big endian 965 // system, we need to shift down to get the relevant bits. 966 Value *RV = NewLoad; 967 if (TD.isBigEndian()) 968 RV = Builder.CreateLShr(RV, 969 NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits()); 970 RV = Builder.CreateTrunc(RV, SrcVal->getType()); 971 SrcVal->replaceAllUsesWith(RV); 972 973 // We would like to use gvn.markInstructionForDeletion here, but we can't 974 // because the load is already memoized into the leader map table that GVN 975 // tracks. It is potentially possible to remove the load from the table, 976 // but then there all of the operations based on it would need to be 977 // rehashed. Just leave the dead load around. 978 gvn.getMemDep().removeInstruction(SrcVal); 979 SrcVal = NewLoad; 980 } 981 982 return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD); 983} 984 985 986/// GetMemInstValueForLoad - This function is called when we have a 987/// memdep query of a load that ends up being a clobbering mem intrinsic. 988static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 989 const Type *LoadTy, Instruction *InsertPt, 990 const TargetData &TD){ 991 LLVMContext &Ctx = LoadTy->getContext(); 992 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 993 994 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 995 996 // We know that this method is only called when the mem transfer fully 997 // provides the bits for the load. 998 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 999 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1000 // independently of what the offset is. 1001 Value *Val = MSI->getValue(); 1002 if (LoadSize != 1) 1003 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1004 1005 Value *OneElt = Val; 1006 1007 // Splat the value out to the right number of bits. 1008 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1009 // If we can double the number of bytes set, do it. 1010 if (NumBytesSet*2 <= LoadSize) { 1011 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1012 Val = Builder.CreateOr(Val, ShVal); 1013 NumBytesSet <<= 1; 1014 continue; 1015 } 1016 1017 // Otherwise insert one byte at a time. 1018 Value *ShVal = Builder.CreateShl(Val, 1*8); 1019 Val = Builder.CreateOr(OneElt, ShVal); 1020 ++NumBytesSet; 1021 } 1022 1023 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1024 } 1025 1026 // Otherwise, this is a memcpy/memmove from a constant global. 1027 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1028 Constant *Src = cast<Constant>(MTI->getSource()); 1029 1030 // Otherwise, see if we can constant fold a load from the constant with the 1031 // offset applied as appropriate. 1032 Src = ConstantExpr::getBitCast(Src, 1033 llvm::Type::getInt8PtrTy(Src->getContext())); 1034 Constant *OffsetCst = 1035 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1036 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1037 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1038 return ConstantFoldLoadFromConstPtr(Src, &TD); 1039} 1040 1041namespace { 1042 1043struct AvailableValueInBlock { 1044 /// BB - The basic block in question. 1045 BasicBlock *BB; 1046 enum ValType { 1047 SimpleVal, // A simple offsetted value that is accessed. 1048 LoadVal, // A value produced by a load. 1049 MemIntrin // A memory intrinsic which is loaded from. 1050 }; 1051 1052 /// V - The value that is live out of the block. 1053 PointerIntPair<Value *, 2, ValType> Val; 1054 1055 /// Offset - The byte offset in Val that is interesting for the load query. 1056 unsigned Offset; 1057 1058 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1059 unsigned Offset = 0) { 1060 AvailableValueInBlock Res; 1061 Res.BB = BB; 1062 Res.Val.setPointer(V); 1063 Res.Val.setInt(SimpleVal); 1064 Res.Offset = Offset; 1065 return Res; 1066 } 1067 1068 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1069 unsigned Offset = 0) { 1070 AvailableValueInBlock Res; 1071 Res.BB = BB; 1072 Res.Val.setPointer(MI); 1073 Res.Val.setInt(MemIntrin); 1074 Res.Offset = Offset; 1075 return Res; 1076 } 1077 1078 static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI, 1079 unsigned Offset = 0) { 1080 AvailableValueInBlock Res; 1081 Res.BB = BB; 1082 Res.Val.setPointer(LI); 1083 Res.Val.setInt(LoadVal); 1084 Res.Offset = Offset; 1085 return Res; 1086 } 1087 1088 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1089 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } 1090 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } 1091 1092 Value *getSimpleValue() const { 1093 assert(isSimpleValue() && "Wrong accessor"); 1094 return Val.getPointer(); 1095 } 1096 1097 LoadInst *getCoercedLoadValue() const { 1098 assert(isCoercedLoadValue() && "Wrong accessor"); 1099 return cast<LoadInst>(Val.getPointer()); 1100 } 1101 1102 MemIntrinsic *getMemIntrinValue() const { 1103 assert(isMemIntrinValue() && "Wrong accessor"); 1104 return cast<MemIntrinsic>(Val.getPointer()); 1105 } 1106 1107 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1108 /// defined here to the specified type. This handles various coercion cases. 1109 Value *MaterializeAdjustedValue(const Type *LoadTy, GVN &gvn) const { 1110 Value *Res; 1111 if (isSimpleValue()) { 1112 Res = getSimpleValue(); 1113 if (Res->getType() != LoadTy) { 1114 const TargetData *TD = gvn.getTargetData(); 1115 assert(TD && "Need target data to handle type mismatch case"); 1116 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1117 *TD); 1118 1119 DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1120 << *getSimpleValue() << '\n' 1121 << *Res << '\n' << "\n\n\n"); 1122 } 1123 } else if (isCoercedLoadValue()) { 1124 LoadInst *Load = getCoercedLoadValue(); 1125 if (Load->getType() == LoadTy && Offset == 0) { 1126 Res = Load; 1127 } else { 1128 Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(), 1129 gvn); 1130 1131 DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " 1132 << *getCoercedLoadValue() << '\n' 1133 << *Res << '\n' << "\n\n\n"); 1134 } 1135 } else { 1136 const TargetData *TD = gvn.getTargetData(); 1137 assert(TD && "Need target data to handle type mismatch case"); 1138 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1139 LoadTy, BB->getTerminator(), *TD); 1140 DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1141 << " " << *getMemIntrinValue() << '\n' 1142 << *Res << '\n' << "\n\n\n"); 1143 } 1144 return Res; 1145 } 1146}; 1147 1148} // end anonymous namespace 1149 1150/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1151/// construct SSA form, allowing us to eliminate LI. This returns the value 1152/// that should be used at LI's definition site. 1153static Value *ConstructSSAForLoadSet(LoadInst *LI, 1154 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1155 GVN &gvn) { 1156 // Check for the fully redundant, dominating load case. In this case, we can 1157 // just use the dominating value directly. 1158 if (ValuesPerBlock.size() == 1 && 1159 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 1160 LI->getParent())) 1161 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn); 1162 1163 // Otherwise, we have to construct SSA form. 1164 SmallVector<PHINode*, 8> NewPHIs; 1165 SSAUpdater SSAUpdate(&NewPHIs); 1166 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1167 1168 const Type *LoadTy = LI->getType(); 1169 1170 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1171 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1172 BasicBlock *BB = AV.BB; 1173 1174 if (SSAUpdate.HasValueForBlock(BB)) 1175 continue; 1176 1177 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn)); 1178 } 1179 1180 // Perform PHI construction. 1181 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1182 1183 // If new PHI nodes were created, notify alias analysis. 1184 if (V->getType()->isPointerTy()) { 1185 AliasAnalysis *AA = gvn.getAliasAnalysis(); 1186 1187 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1188 AA->copyValue(LI, NewPHIs[i]); 1189 1190 // Now that we've copied information to the new PHIs, scan through 1191 // them again and inform alias analysis that we've added potentially 1192 // escaping uses to any values that are operands to these PHIs. 1193 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) { 1194 PHINode *P = NewPHIs[i]; 1195 for (unsigned ii = 0, ee = P->getNumIncomingValues(); ii != ee; ++ii) 1196 AA->addEscapingUse(P->getOperandUse(2*ii)); 1197 } 1198 } 1199 1200 return V; 1201} 1202 1203static bool isLifetimeStart(const Instruction *Inst) { 1204 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1205 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1206 return false; 1207} 1208 1209/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1210/// non-local by performing PHI construction. 1211bool GVN::processNonLocalLoad(LoadInst *LI) { 1212 // Find the non-local dependencies of the load. 1213 SmallVector<NonLocalDepResult, 64> Deps; 1214 AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI); 1215 MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps); 1216 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1217 // << Deps.size() << *LI << '\n'); 1218 1219 // If we had to process more than one hundred blocks to find the 1220 // dependencies, this load isn't worth worrying about. Optimizing 1221 // it will be too expensive. 1222 if (Deps.size() > 100) 1223 return false; 1224 1225 // If we had a phi translation failure, we'll have a single entry which is a 1226 // clobber in the current block. Reject this early. 1227 if (Deps.size() == 1 && Deps[0].getResult().isClobber() && 1228 Deps[0].getResult().getInst()->getParent() == LI->getParent()) { 1229 DEBUG( 1230 dbgs() << "GVN: non-local load "; 1231 WriteAsOperand(dbgs(), LI); 1232 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1233 ); 1234 return false; 1235 } 1236 1237 // Filter out useless results (non-locals, etc). Keep track of the blocks 1238 // where we have a value available in repl, also keep track of whether we see 1239 // dependencies that produce an unknown value for the load (such as a call 1240 // that could potentially clobber the load). 1241 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1242 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1243 1244 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1245 BasicBlock *DepBB = Deps[i].getBB(); 1246 MemDepResult DepInfo = Deps[i].getResult(); 1247 1248 if (DepInfo.isClobber()) { 1249 // The address being loaded in this non-local block may not be the same as 1250 // the pointer operand of the load if PHI translation occurs. Make sure 1251 // to consider the right address. 1252 Value *Address = Deps[i].getAddress(); 1253 1254 // If the dependence is to a store that writes to a superset of the bits 1255 // read by the load, we can extract the bits we need for the load from the 1256 // stored value. 1257 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1258 if (TD && Address) { 1259 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1260 DepSI, *TD); 1261 if (Offset != -1) { 1262 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1263 DepSI->getValueOperand(), 1264 Offset)); 1265 continue; 1266 } 1267 } 1268 } 1269 1270 // Check to see if we have something like this: 1271 // load i32* P 1272 // load i8* (P+1) 1273 // if we have this, replace the later with an extraction from the former. 1274 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { 1275 // If this is a clobber and L is the first instruction in its block, then 1276 // we have the first instruction in the entry block. 1277 if (DepLI != LI && Address && TD) { 1278 int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), 1279 LI->getPointerOperand(), 1280 DepLI, *TD); 1281 1282 if (Offset != -1) { 1283 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI, 1284 Offset)); 1285 continue; 1286 } 1287 } 1288 } 1289 1290 // If the clobbering value is a memset/memcpy/memmove, see if we can 1291 // forward a value on from it. 1292 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1293 if (TD && Address) { 1294 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1295 DepMI, *TD); 1296 if (Offset != -1) { 1297 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1298 Offset)); 1299 continue; 1300 } 1301 } 1302 } 1303 1304 UnavailableBlocks.push_back(DepBB); 1305 continue; 1306 } 1307 1308 Instruction *DepInst = DepInfo.getInst(); 1309 1310 // Loading the allocation -> undef. 1311 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1312 // Loading immediately after lifetime begin -> undef. 1313 isLifetimeStart(DepInst)) { 1314 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1315 UndefValue::get(LI->getType()))); 1316 continue; 1317 } 1318 1319 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1320 // Reject loads and stores that are to the same address but are of 1321 // different types if we have to. 1322 if (S->getValueOperand()->getType() != LI->getType()) { 1323 // If the stored value is larger or equal to the loaded value, we can 1324 // reuse it. 1325 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), 1326 LI->getType(), *TD)) { 1327 UnavailableBlocks.push_back(DepBB); 1328 continue; 1329 } 1330 } 1331 1332 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1333 S->getValueOperand())); 1334 continue; 1335 } 1336 1337 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1338 // If the types mismatch and we can't handle it, reject reuse of the load. 1339 if (LD->getType() != LI->getType()) { 1340 // If the stored value is larger or equal to the loaded value, we can 1341 // reuse it. 1342 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1343 UnavailableBlocks.push_back(DepBB); 1344 continue; 1345 } 1346 } 1347 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB, LD)); 1348 continue; 1349 } 1350 1351 UnavailableBlocks.push_back(DepBB); 1352 continue; 1353 } 1354 1355 // If we have no predecessors that produce a known value for this load, exit 1356 // early. 1357 if (ValuesPerBlock.empty()) return false; 1358 1359 // If all of the instructions we depend on produce a known value for this 1360 // load, then it is fully redundant and we can use PHI insertion to compute 1361 // its value. Insert PHIs and remove the fully redundant value now. 1362 if (UnavailableBlocks.empty()) { 1363 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1364 1365 // Perform PHI construction. 1366 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1367 LI->replaceAllUsesWith(V); 1368 1369 if (isa<PHINode>(V)) 1370 V->takeName(LI); 1371 if (V->getType()->isPointerTy()) 1372 MD->invalidateCachedPointerInfo(V); 1373 markInstructionForDeletion(LI); 1374 ++NumGVNLoad; 1375 return true; 1376 } 1377 1378 if (!EnablePRE || !EnableLoadPRE) 1379 return false; 1380 1381 // Okay, we have *some* definitions of the value. This means that the value 1382 // is available in some of our (transitive) predecessors. Lets think about 1383 // doing PRE of this load. This will involve inserting a new load into the 1384 // predecessor when it's not available. We could do this in general, but 1385 // prefer to not increase code size. As such, we only do this when we know 1386 // that we only have to insert *one* load (which means we're basically moving 1387 // the load, not inserting a new one). 1388 1389 SmallPtrSet<BasicBlock *, 4> Blockers; 1390 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1391 Blockers.insert(UnavailableBlocks[i]); 1392 1393 // Lets find first basic block with more than one predecessor. Walk backwards 1394 // through predecessors if needed. 1395 BasicBlock *LoadBB = LI->getParent(); 1396 BasicBlock *TmpBB = LoadBB; 1397 1398 bool isSinglePred = false; 1399 bool allSingleSucc = true; 1400 while (TmpBB->getSinglePredecessor()) { 1401 isSinglePred = true; 1402 TmpBB = TmpBB->getSinglePredecessor(); 1403 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1404 return false; 1405 if (Blockers.count(TmpBB)) 1406 return false; 1407 1408 // If any of these blocks has more than one successor (i.e. if the edge we 1409 // just traversed was critical), then there are other paths through this 1410 // block along which the load may not be anticipated. Hoisting the load 1411 // above this block would be adding the load to execution paths along 1412 // which it was not previously executed. 1413 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1414 return false; 1415 } 1416 1417 assert(TmpBB); 1418 LoadBB = TmpBB; 1419 1420 // FIXME: It is extremely unclear what this loop is doing, other than 1421 // artificially restricting loadpre. 1422 if (isSinglePred) { 1423 bool isHot = false; 1424 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1425 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1426 if (AV.isSimpleValue()) 1427 // "Hot" Instruction is in some loop (because it dominates its dep. 1428 // instruction). 1429 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1430 if (DT->dominates(LI, I)) { 1431 isHot = true; 1432 break; 1433 } 1434 } 1435 1436 // We are interested only in "hot" instructions. We don't want to do any 1437 // mis-optimizations here. 1438 if (!isHot) 1439 return false; 1440 } 1441 1442 // Check to see how many predecessors have the loaded value fully 1443 // available. 1444 DenseMap<BasicBlock*, Value*> PredLoads; 1445 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1446 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1447 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1448 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1449 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1450 1451 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1452 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1453 PI != E; ++PI) { 1454 BasicBlock *Pred = *PI; 1455 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1456 continue; 1457 } 1458 PredLoads[Pred] = 0; 1459 1460 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1461 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1462 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1463 << Pred->getName() << "': " << *LI << '\n'); 1464 return false; 1465 } 1466 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1467 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1468 } 1469 } 1470 if (!NeedToSplit.empty()) { 1471 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1472 return false; 1473 } 1474 1475 // Decide whether PRE is profitable for this load. 1476 unsigned NumUnavailablePreds = PredLoads.size(); 1477 assert(NumUnavailablePreds != 0 && 1478 "Fully available value should be eliminated above!"); 1479 1480 // If this load is unavailable in multiple predecessors, reject it. 1481 // FIXME: If we could restructure the CFG, we could make a common pred with 1482 // all the preds that don't have an available LI and insert a new load into 1483 // that one block. 1484 if (NumUnavailablePreds != 1) 1485 return false; 1486 1487 // Check if the load can safely be moved to all the unavailable predecessors. 1488 bool CanDoPRE = true; 1489 SmallVector<Instruction*, 8> NewInsts; 1490 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1491 E = PredLoads.end(); I != E; ++I) { 1492 BasicBlock *UnavailablePred = I->first; 1493 1494 // Do PHI translation to get its value in the predecessor if necessary. The 1495 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1496 1497 // If all preds have a single successor, then we know it is safe to insert 1498 // the load on the pred (?!?), so we can insert code to materialize the 1499 // pointer if it is not available. 1500 PHITransAddr Address(LI->getPointerOperand(), TD); 1501 Value *LoadPtr = 0; 1502 if (allSingleSucc) { 1503 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1504 *DT, NewInsts); 1505 } else { 1506 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1507 LoadPtr = Address.getAddr(); 1508 } 1509 1510 // If we couldn't find or insert a computation of this phi translated value, 1511 // we fail PRE. 1512 if (LoadPtr == 0) { 1513 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1514 << *LI->getPointerOperand() << "\n"); 1515 CanDoPRE = false; 1516 break; 1517 } 1518 1519 // Make sure it is valid to move this load here. We have to watch out for: 1520 // @1 = getelementptr (i8* p, ... 1521 // test p and branch if == 0 1522 // load @1 1523 // It is valid to have the getelementptr before the test, even if p can 1524 // be 0, as getelementptr only does address arithmetic. 1525 // If we are not pushing the value through any multiple-successor blocks 1526 // we do not have this case. Otherwise, check that the load is safe to 1527 // put anywhere; this can be improved, but should be conservatively safe. 1528 if (!allSingleSucc && 1529 // FIXME: REEVALUTE THIS. 1530 !isSafeToLoadUnconditionally(LoadPtr, 1531 UnavailablePred->getTerminator(), 1532 LI->getAlignment(), TD)) { 1533 CanDoPRE = false; 1534 break; 1535 } 1536 1537 I->second = LoadPtr; 1538 } 1539 1540 if (!CanDoPRE) { 1541 while (!NewInsts.empty()) { 1542 Instruction *I = NewInsts.pop_back_val(); 1543 if (MD) MD->removeInstruction(I); 1544 I->eraseFromParent(); 1545 } 1546 return false; 1547 } 1548 1549 // Okay, we can eliminate this load by inserting a reload in the predecessor 1550 // and using PHI construction to get the value in the other predecessors, do 1551 // it. 1552 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1553 DEBUG(if (!NewInsts.empty()) 1554 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1555 << *NewInsts.back() << '\n'); 1556 1557 // Assign value numbers to the new instructions. 1558 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1559 // FIXME: We really _ought_ to insert these value numbers into their 1560 // parent's availability map. However, in doing so, we risk getting into 1561 // ordering issues. If a block hasn't been processed yet, we would be 1562 // marking a value as AVAIL-IN, which isn't what we intend. 1563 VN.lookup_or_add(NewInsts[i]); 1564 } 1565 1566 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1567 E = PredLoads.end(); I != E; ++I) { 1568 BasicBlock *UnavailablePred = I->first; 1569 Value *LoadPtr = I->second; 1570 1571 Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1572 LI->getAlignment(), 1573 UnavailablePred->getTerminator()); 1574 1575 // Transfer the old load's TBAA tag to the new load. 1576 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) 1577 NewLoad->setMetadata(LLVMContext::MD_tbaa, Tag); 1578 1579 // Transfer DebugLoc. 1580 NewLoad->setDebugLoc(LI->getDebugLoc()); 1581 1582 // Add the newly created load. 1583 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1584 NewLoad)); 1585 MD->invalidateCachedPointerInfo(LoadPtr); 1586 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1587 } 1588 1589 // Perform PHI construction. 1590 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1591 LI->replaceAllUsesWith(V); 1592 if (isa<PHINode>(V)) 1593 V->takeName(LI); 1594 if (V->getType()->isPointerTy()) 1595 MD->invalidateCachedPointerInfo(V); 1596 markInstructionForDeletion(LI); 1597 ++NumPRELoad; 1598 return true; 1599} 1600 1601/// processLoad - Attempt to eliminate a load, first by eliminating it 1602/// locally, and then attempting non-local elimination if that fails. 1603bool GVN::processLoad(LoadInst *L) { 1604 if (!MD) 1605 return false; 1606 1607 if (L->isVolatile()) 1608 return false; 1609 1610 // ... to a pointer that has been loaded from before... 1611 MemDepResult Dep = MD->getDependency(L); 1612 1613 // If we have a clobber and target data is around, see if this is a clobber 1614 // that we can fix up through code synthesis. 1615 if (Dep.isClobber() && TD) { 1616 // Check to see if we have something like this: 1617 // store i32 123, i32* %P 1618 // %A = bitcast i32* %P to i8* 1619 // %B = gep i8* %A, i32 1 1620 // %C = load i8* %B 1621 // 1622 // We could do that by recognizing if the clobber instructions are obviously 1623 // a common base + constant offset, and if the previous store (or memset) 1624 // completely covers this load. This sort of thing can happen in bitfield 1625 // access code. 1626 Value *AvailVal = 0; 1627 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) { 1628 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1629 L->getPointerOperand(), 1630 DepSI, *TD); 1631 if (Offset != -1) 1632 AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, 1633 L->getType(), L, *TD); 1634 } 1635 1636 // Check to see if we have something like this: 1637 // load i32* P 1638 // load i8* (P+1) 1639 // if we have this, replace the later with an extraction from the former. 1640 if (LoadInst *DepLI = dyn_cast<LoadInst>(Dep.getInst())) { 1641 // If this is a clobber and L is the first instruction in its block, then 1642 // we have the first instruction in the entry block. 1643 if (DepLI == L) 1644 return false; 1645 1646 int Offset = AnalyzeLoadFromClobberingLoad(L->getType(), 1647 L->getPointerOperand(), 1648 DepLI, *TD); 1649 if (Offset != -1) 1650 AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this); 1651 } 1652 1653 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1654 // a value on from it. 1655 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1656 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1657 L->getPointerOperand(), 1658 DepMI, *TD); 1659 if (Offset != -1) 1660 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD); 1661 } 1662 1663 if (AvailVal) { 1664 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1665 << *AvailVal << '\n' << *L << "\n\n\n"); 1666 1667 // Replace the load! 1668 L->replaceAllUsesWith(AvailVal); 1669 if (AvailVal->getType()->isPointerTy()) 1670 MD->invalidateCachedPointerInfo(AvailVal); 1671 markInstructionForDeletion(L); 1672 ++NumGVNLoad; 1673 return true; 1674 } 1675 } 1676 1677 // If the value isn't available, don't do anything! 1678 if (Dep.isClobber()) { 1679 DEBUG( 1680 // fast print dep, using operator<< on instruction is too slow. 1681 dbgs() << "GVN: load "; 1682 WriteAsOperand(dbgs(), L); 1683 Instruction *I = Dep.getInst(); 1684 dbgs() << " is clobbered by " << *I << '\n'; 1685 ); 1686 return false; 1687 } 1688 1689 // If it is defined in another block, try harder. 1690 if (Dep.isNonLocal()) 1691 return processNonLocalLoad(L); 1692 1693 Instruction *DepInst = Dep.getInst(); 1694 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1695 Value *StoredVal = DepSI->getValueOperand(); 1696 1697 // The store and load are to a must-aliased pointer, but they may not 1698 // actually have the same type. See if we know how to reuse the stored 1699 // value (depending on its type). 1700 if (StoredVal->getType() != L->getType()) { 1701 if (TD) { 1702 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1703 L, *TD); 1704 if (StoredVal == 0) 1705 return false; 1706 1707 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1708 << '\n' << *L << "\n\n\n"); 1709 } 1710 else 1711 return false; 1712 } 1713 1714 // Remove it! 1715 L->replaceAllUsesWith(StoredVal); 1716 if (StoredVal->getType()->isPointerTy()) 1717 MD->invalidateCachedPointerInfo(StoredVal); 1718 markInstructionForDeletion(L); 1719 ++NumGVNLoad; 1720 return true; 1721 } 1722 1723 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1724 Value *AvailableVal = DepLI; 1725 1726 // The loads are of a must-aliased pointer, but they may not actually have 1727 // the same type. See if we know how to reuse the previously loaded value 1728 // (depending on its type). 1729 if (DepLI->getType() != L->getType()) { 1730 if (TD) { 1731 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), 1732 L, *TD); 1733 if (AvailableVal == 0) 1734 return false; 1735 1736 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1737 << "\n" << *L << "\n\n\n"); 1738 } 1739 else 1740 return false; 1741 } 1742 1743 // Remove it! 1744 L->replaceAllUsesWith(AvailableVal); 1745 if (DepLI->getType()->isPointerTy()) 1746 MD->invalidateCachedPointerInfo(DepLI); 1747 markInstructionForDeletion(L); 1748 ++NumGVNLoad; 1749 return true; 1750 } 1751 1752 // If this load really doesn't depend on anything, then we must be loading an 1753 // undef value. This can happen when loading for a fresh allocation with no 1754 // intervening stores, for example. 1755 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1756 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1757 markInstructionForDeletion(L); 1758 ++NumGVNLoad; 1759 return true; 1760 } 1761 1762 // If this load occurs either right after a lifetime begin, 1763 // then the loaded value is undefined. 1764 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(DepInst)) { 1765 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1766 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1767 markInstructionForDeletion(L); 1768 ++NumGVNLoad; 1769 return true; 1770 } 1771 } 1772 1773 return false; 1774} 1775 1776// findLeader - In order to find a leader for a given value number at a 1777// specific basic block, we first obtain the list of all Values for that number, 1778// and then scan the list to find one whose block dominates the block in 1779// question. This is fast because dominator tree queries consist of only 1780// a few comparisons of DFS numbers. 1781Value *GVN::findLeader(BasicBlock *BB, uint32_t num) { 1782 LeaderTableEntry Vals = LeaderTable[num]; 1783 if (!Vals.Val) return 0; 1784 1785 Value *Val = 0; 1786 if (DT->dominates(Vals.BB, BB)) { 1787 Val = Vals.Val; 1788 if (isa<Constant>(Val)) return Val; 1789 } 1790 1791 LeaderTableEntry* Next = Vals.Next; 1792 while (Next) { 1793 if (DT->dominates(Next->BB, BB)) { 1794 if (isa<Constant>(Next->Val)) return Next->Val; 1795 if (!Val) Val = Next->Val; 1796 } 1797 1798 Next = Next->Next; 1799 } 1800 1801 return Val; 1802} 1803 1804 1805/// processInstruction - When calculating availability, handle an instruction 1806/// by inserting it into the appropriate sets 1807bool GVN::processInstruction(Instruction *I) { 1808 // Ignore dbg info intrinsics. 1809 if (isa<DbgInfoIntrinsic>(I)) 1810 return false; 1811 1812 // If the instruction can be easily simplified then do so now in preference 1813 // to value numbering it. Value numbering often exposes redundancies, for 1814 // example if it determines that %y is equal to %x then the instruction 1815 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 1816 if (Value *V = SimplifyInstruction(I, TD, DT)) { 1817 I->replaceAllUsesWith(V); 1818 if (MD && V->getType()->isPointerTy()) 1819 MD->invalidateCachedPointerInfo(V); 1820 markInstructionForDeletion(I); 1821 return true; 1822 } 1823 1824 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1825 if (processLoad(LI)) 1826 return true; 1827 1828 unsigned Num = VN.lookup_or_add(LI); 1829 addToLeaderTable(Num, LI, LI->getParent()); 1830 return false; 1831 } 1832 1833 // For conditions branches, we can perform simple conditional propagation on 1834 // the condition value itself. 1835 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1836 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1837 return false; 1838 1839 Value *BranchCond = BI->getCondition(); 1840 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1841 1842 BasicBlock *TrueSucc = BI->getSuccessor(0); 1843 BasicBlock *FalseSucc = BI->getSuccessor(1); 1844 1845 if (TrueSucc->getSinglePredecessor()) 1846 addToLeaderTable(CondVN, 1847 ConstantInt::getTrue(TrueSucc->getContext()), 1848 TrueSucc); 1849 if (FalseSucc->getSinglePredecessor()) 1850 addToLeaderTable(CondVN, 1851 ConstantInt::getFalse(TrueSucc->getContext()), 1852 FalseSucc); 1853 1854 return false; 1855 } 1856 1857 // Instructions with void type don't return a value, so there's 1858 // no point in trying to find redudancies in them. 1859 if (I->getType()->isVoidTy()) return false; 1860 1861 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1862 unsigned Num = VN.lookup_or_add(I); 1863 1864 // Allocations are always uniquely numbered, so we can save time and memory 1865 // by fast failing them. 1866 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) { 1867 addToLeaderTable(Num, I, I->getParent()); 1868 return false; 1869 } 1870 1871 // If the number we were assigned was a brand new VN, then we don't 1872 // need to do a lookup to see if the number already exists 1873 // somewhere in the domtree: it can't! 1874 if (Num == NextNum) { 1875 addToLeaderTable(Num, I, I->getParent()); 1876 return false; 1877 } 1878 1879 // Perform fast-path value-number based elimination of values inherited from 1880 // dominators. 1881 Value *repl = findLeader(I->getParent(), Num); 1882 if (repl == 0) { 1883 // Failure, just remember this instance for future use. 1884 addToLeaderTable(Num, I, I->getParent()); 1885 return false; 1886 } 1887 1888 // Remove it! 1889 I->replaceAllUsesWith(repl); 1890 if (MD && repl->getType()->isPointerTy()) 1891 MD->invalidateCachedPointerInfo(repl); 1892 markInstructionForDeletion(I); 1893 return true; 1894} 1895 1896/// runOnFunction - This is the main transformation entry point for a function. 1897bool GVN::runOnFunction(Function& F) { 1898 if (!NoLoads) 1899 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1900 DT = &getAnalysis<DominatorTree>(); 1901 TD = getAnalysisIfAvailable<TargetData>(); 1902 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1903 VN.setMemDep(MD); 1904 VN.setDomTree(DT); 1905 1906 bool Changed = false; 1907 bool ShouldContinue = true; 1908 1909 // Merge unconditional branches, allowing PRE to catch more 1910 // optimization opportunities. 1911 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 1912 BasicBlock *BB = FI++; 1913 1914 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 1915 if (removedBlock) ++NumGVNBlocks; 1916 1917 Changed |= removedBlock; 1918 } 1919 1920 unsigned Iteration = 0; 1921 while (ShouldContinue) { 1922 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 1923 ShouldContinue = iterateOnFunction(F); 1924 if (splitCriticalEdges()) 1925 ShouldContinue = true; 1926 Changed |= ShouldContinue; 1927 ++Iteration; 1928 } 1929 1930 if (EnablePRE) { 1931 bool PREChanged = true; 1932 while (PREChanged) { 1933 PREChanged = performPRE(F); 1934 Changed |= PREChanged; 1935 } 1936 } 1937 // FIXME: Should perform GVN again after PRE does something. PRE can move 1938 // computations into blocks where they become fully redundant. Note that 1939 // we can't do this until PRE's critical edge splitting updates memdep. 1940 // Actually, when this happens, we should just fully integrate PRE into GVN. 1941 1942 cleanupGlobalSets(); 1943 1944 return Changed; 1945} 1946 1947 1948bool GVN::processBlock(BasicBlock *BB) { 1949 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 1950 // (and incrementing BI before processing an instruction). 1951 assert(InstrsToErase.empty() && 1952 "We expect InstrsToErase to be empty across iterations"); 1953 bool ChangedFunction = false; 1954 1955 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 1956 BI != BE;) { 1957 ChangedFunction |= processInstruction(BI); 1958 if (InstrsToErase.empty()) { 1959 ++BI; 1960 continue; 1961 } 1962 1963 // If we need some instructions deleted, do it now. 1964 NumGVNInstr += InstrsToErase.size(); 1965 1966 // Avoid iterator invalidation. 1967 bool AtStart = BI == BB->begin(); 1968 if (!AtStart) 1969 --BI; 1970 1971 for (SmallVector<Instruction*, 4>::iterator I = InstrsToErase.begin(), 1972 E = InstrsToErase.end(); I != E; ++I) { 1973 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 1974 if (MD) MD->removeInstruction(*I); 1975 (*I)->eraseFromParent(); 1976 DEBUG(verifyRemoved(*I)); 1977 } 1978 InstrsToErase.clear(); 1979 1980 if (AtStart) 1981 BI = BB->begin(); 1982 else 1983 ++BI; 1984 } 1985 1986 return ChangedFunction; 1987} 1988 1989/// performPRE - Perform a purely local form of PRE that looks for diamond 1990/// control flow patterns and attempts to perform simple PRE at the join point. 1991bool GVN::performPRE(Function &F) { 1992 bool Changed = false; 1993 DenseMap<BasicBlock*, Value*> predMap; 1994 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 1995 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 1996 BasicBlock *CurrentBlock = *DI; 1997 1998 // Nothing to PRE in the entry block. 1999 if (CurrentBlock == &F.getEntryBlock()) continue; 2000 2001 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2002 BE = CurrentBlock->end(); BI != BE; ) { 2003 Instruction *CurInst = BI++; 2004 2005 if (isa<AllocaInst>(CurInst) || 2006 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2007 CurInst->getType()->isVoidTy() || 2008 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2009 isa<DbgInfoIntrinsic>(CurInst)) 2010 continue; 2011 2012 // We don't currently value number ANY inline asm calls. 2013 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2014 if (CallI->isInlineAsm()) 2015 continue; 2016 2017 uint32_t ValNo = VN.lookup(CurInst); 2018 2019 // Look for the predecessors for PRE opportunities. We're 2020 // only trying to solve the basic diamond case, where 2021 // a value is computed in the successor and one predecessor, 2022 // but not the other. We also explicitly disallow cases 2023 // where the successor is its own predecessor, because they're 2024 // more complicated to get right. 2025 unsigned NumWith = 0; 2026 unsigned NumWithout = 0; 2027 BasicBlock *PREPred = 0; 2028 predMap.clear(); 2029 2030 for (pred_iterator PI = pred_begin(CurrentBlock), 2031 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2032 BasicBlock *P = *PI; 2033 // We're not interested in PRE where the block is its 2034 // own predecessor, or in blocks with predecessors 2035 // that are not reachable. 2036 if (P == CurrentBlock) { 2037 NumWithout = 2; 2038 break; 2039 } else if (!DT->dominates(&F.getEntryBlock(), P)) { 2040 NumWithout = 2; 2041 break; 2042 } 2043 2044 Value* predV = findLeader(P, ValNo); 2045 if (predV == 0) { 2046 PREPred = P; 2047 ++NumWithout; 2048 } else if (predV == CurInst) { 2049 NumWithout = 2; 2050 } else { 2051 predMap[P] = predV; 2052 ++NumWith; 2053 } 2054 } 2055 2056 // Don't do PRE when it might increase code size, i.e. when 2057 // we would need to insert instructions in more than one pred. 2058 if (NumWithout != 1 || NumWith == 0) 2059 continue; 2060 2061 // Don't do PRE across indirect branch. 2062 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2063 continue; 2064 2065 // We can't do PRE safely on a critical edge, so instead we schedule 2066 // the edge to be split and perform the PRE the next time we iterate 2067 // on the function. 2068 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2069 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2070 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2071 continue; 2072 } 2073 2074 // Instantiate the expression in the predecessor that lacked it. 2075 // Because we are going top-down through the block, all value numbers 2076 // will be available in the predecessor by the time we need them. Any 2077 // that weren't originally present will have been instantiated earlier 2078 // in this loop. 2079 Instruction *PREInstr = CurInst->clone(); 2080 bool success = true; 2081 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2082 Value *Op = PREInstr->getOperand(i); 2083 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2084 continue; 2085 2086 if (Value *V = findLeader(PREPred, VN.lookup(Op))) { 2087 PREInstr->setOperand(i, V); 2088 } else { 2089 success = false; 2090 break; 2091 } 2092 } 2093 2094 // Fail out if we encounter an operand that is not available in 2095 // the PRE predecessor. This is typically because of loads which 2096 // are not value numbered precisely. 2097 if (!success) { 2098 delete PREInstr; 2099 DEBUG(verifyRemoved(PREInstr)); 2100 continue; 2101 } 2102 2103 PREInstr->insertBefore(PREPred->getTerminator()); 2104 PREInstr->setName(CurInst->getName() + ".pre"); 2105 PREInstr->setDebugLoc(CurInst->getDebugLoc()); 2106 predMap[PREPred] = PREInstr; 2107 VN.add(PREInstr, ValNo); 2108 ++NumGVNPRE; 2109 2110 // Update the availability map to include the new instruction. 2111 addToLeaderTable(ValNo, PREInstr, PREPred); 2112 2113 // Create a PHI to make the value available in this block. 2114 pred_iterator PB = pred_begin(CurrentBlock), PE = pred_end(CurrentBlock); 2115 PHINode* Phi = PHINode::Create(CurInst->getType(), std::distance(PB, PE), 2116 CurInst->getName() + ".pre-phi", 2117 CurrentBlock->begin()); 2118 for (pred_iterator PI = PB; PI != PE; ++PI) { 2119 BasicBlock *P = *PI; 2120 Phi->addIncoming(predMap[P], P); 2121 } 2122 2123 VN.add(Phi, ValNo); 2124 addToLeaderTable(ValNo, Phi, CurrentBlock); 2125 Phi->setDebugLoc(CurInst->getDebugLoc()); 2126 CurInst->replaceAllUsesWith(Phi); 2127 if (Phi->getType()->isPointerTy()) { 2128 // Because we have added a PHI-use of the pointer value, it has now 2129 // "escaped" from alias analysis' perspective. We need to inform 2130 // AA of this. 2131 for (unsigned ii = 0, ee = Phi->getNumIncomingValues(); ii != ee; ++ii) 2132 VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(2*ii)); 2133 2134 if (MD) 2135 MD->invalidateCachedPointerInfo(Phi); 2136 } 2137 VN.erase(CurInst); 2138 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2139 2140 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2141 if (MD) MD->removeInstruction(CurInst); 2142 CurInst->eraseFromParent(); 2143 DEBUG(verifyRemoved(CurInst)); 2144 Changed = true; 2145 } 2146 } 2147 2148 if (splitCriticalEdges()) 2149 Changed = true; 2150 2151 return Changed; 2152} 2153 2154/// splitCriticalEdges - Split critical edges found during the previous 2155/// iteration that may enable further optimization. 2156bool GVN::splitCriticalEdges() { 2157 if (toSplit.empty()) 2158 return false; 2159 do { 2160 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2161 SplitCriticalEdge(Edge.first, Edge.second, this); 2162 } while (!toSplit.empty()); 2163 if (MD) MD->invalidateCachedPredecessors(); 2164 return true; 2165} 2166 2167/// iterateOnFunction - Executes one iteration of GVN 2168bool GVN::iterateOnFunction(Function &F) { 2169 cleanupGlobalSets(); 2170 2171 // Top-down walk of the dominator tree 2172 bool Changed = false; 2173#if 0 2174 // Needed for value numbering with phi construction to work. 2175 ReversePostOrderTraversal<Function*> RPOT(&F); 2176 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2177 RE = RPOT.end(); RI != RE; ++RI) 2178 Changed |= processBlock(*RI); 2179#else 2180 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2181 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2182 Changed |= processBlock(DI->getBlock()); 2183#endif 2184 2185 return Changed; 2186} 2187 2188void GVN::cleanupGlobalSets() { 2189 VN.clear(); 2190 LeaderTable.clear(); 2191 TableAllocator.Reset(); 2192} 2193 2194/// verifyRemoved - Verify that the specified instruction does not occur in our 2195/// internal data structures. 2196void GVN::verifyRemoved(const Instruction *Inst) const { 2197 VN.verifyRemoved(Inst); 2198 2199 // Walk through the value number scope to make sure the instruction isn't 2200 // ferreted away in it. 2201 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator 2202 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { 2203 const LeaderTableEntry *Node = &I->second; 2204 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2205 2206 while (Node->Next) { 2207 Node = Node->Next; 2208 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2209 } 2210 } 2211} 2212