GVN.cpp revision 06c6791742dfdd54b51441589e1c0921f9851675
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/GlobalVariable.h" 21#include "llvm/IntrinsicInst.h" 22#include "llvm/Metadata.h" 23#include "llvm/LLVMContext.h" 24#include "llvm/Analysis/AliasAnalysis.h" 25#include "llvm/Analysis/ConstantFolding.h" 26#include "llvm/Analysis/Dominators.h" 27#include "llvm/Analysis/InstructionSimplify.h" 28#include "llvm/Analysis/Loads.h" 29#include "llvm/Analysis/MemoryBuiltins.h" 30#include "llvm/Analysis/MemoryDependenceAnalysis.h" 31#include "llvm/Analysis/PHITransAddr.h" 32#include "llvm/Analysis/ValueTracking.h" 33#include "llvm/Assembly/Writer.h" 34#include "llvm/Target/TargetData.h" 35#include "llvm/Target/TargetLibraryInfo.h" 36#include "llvm/Transforms/Utils/BasicBlockUtils.h" 37#include "llvm/Transforms/Utils/SSAUpdater.h" 38#include "llvm/ADT/DenseMap.h" 39#include "llvm/ADT/DepthFirstIterator.h" 40#include "llvm/ADT/Hashing.h" 41#include "llvm/ADT/SmallPtrSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/Support/Allocator.h" 44#include "llvm/Support/CommandLine.h" 45#include "llvm/Support/ConstantRange.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/IRBuilder.h" 48#include "llvm/Support/PatternMatch.h" 49using namespace llvm; 50using namespace PatternMatch; 51 52STATISTIC(NumGVNInstr, "Number of instructions deleted"); 53STATISTIC(NumGVNLoad, "Number of loads deleted"); 54STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 55STATISTIC(NumGVNBlocks, "Number of blocks merged"); 56STATISTIC(NumGVNSimpl, "Number of instructions simplified"); 57STATISTIC(NumGVNEqProp, "Number of equalities propagated"); 58STATISTIC(NumPRELoad, "Number of loads PRE'd"); 59 60static cl::opt<bool> EnablePRE("enable-pre", 61 cl::init(true), cl::Hidden); 62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 63 64// Maximum allowed recursion depth. 65static cl::opt<uint32_t> 66MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, 67 cl::desc("Max recurse depth (default = 1000)")); 68 69//===----------------------------------------------------------------------===// 70// ValueTable Class 71//===----------------------------------------------------------------------===// 72 73/// This class holds the mapping between values and value numbers. It is used 74/// as an efficient mechanism to determine the expression-wise equivalence of 75/// two values. 76namespace { 77 struct Expression { 78 uint32_t opcode; 79 Type *type; 80 SmallVector<uint32_t, 4> varargs; 81 82 Expression(uint32_t o = ~2U) : opcode(o) { } 83 84 bool operator==(const Expression &other) const { 85 if (opcode != other.opcode) 86 return false; 87 if (opcode == ~0U || opcode == ~1U) 88 return true; 89 if (type != other.type) 90 return false; 91 if (varargs != other.varargs) 92 return false; 93 return true; 94 } 95 96 friend hash_code hash_value(const Expression &Value) { 97 return hash_combine(Value.opcode, Value.type, 98 hash_combine_range(Value.varargs.begin(), 99 Value.varargs.end())); 100 } 101 }; 102 103 class ValueTable { 104 DenseMap<Value*, uint32_t> valueNumbering; 105 DenseMap<Expression, uint32_t> expressionNumbering; 106 AliasAnalysis *AA; 107 MemoryDependenceAnalysis *MD; 108 DominatorTree *DT; 109 110 uint32_t nextValueNumber; 111 112 Expression create_expression(Instruction* I); 113 Expression create_cmp_expression(unsigned Opcode, 114 CmpInst::Predicate Predicate, 115 Value *LHS, Value *RHS); 116 Expression create_extractvalue_expression(ExtractValueInst* EI); 117 uint32_t lookup_or_add_call(CallInst* C); 118 public: 119 ValueTable() : nextValueNumber(1) { } 120 uint32_t lookup_or_add(Value *V); 121 uint32_t lookup(Value *V) const; 122 uint32_t lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Pred, 123 Value *LHS, Value *RHS); 124 void add(Value *V, uint32_t num); 125 void clear(); 126 void erase(Value *v); 127 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 128 AliasAnalysis *getAliasAnalysis() const { return AA; } 129 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 130 void setDomTree(DominatorTree* D) { DT = D; } 131 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 132 void verifyRemoved(const Value *) const; 133 }; 134} 135 136namespace llvm { 137template <> struct DenseMapInfo<Expression> { 138 static inline Expression getEmptyKey() { 139 return ~0U; 140 } 141 142 static inline Expression getTombstoneKey() { 143 return ~1U; 144 } 145 146 static unsigned getHashValue(const Expression e) { 147 using llvm::hash_value; 148 return static_cast<unsigned>(hash_value(e)); 149 } 150 static bool isEqual(const Expression &LHS, const Expression &RHS) { 151 return LHS == RHS; 152 } 153}; 154 155} 156 157//===----------------------------------------------------------------------===// 158// ValueTable Internal Functions 159//===----------------------------------------------------------------------===// 160 161Expression ValueTable::create_expression(Instruction *I) { 162 Expression e; 163 e.type = I->getType(); 164 e.opcode = I->getOpcode(); 165 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); 166 OI != OE; ++OI) 167 e.varargs.push_back(lookup_or_add(*OI)); 168 if (I->isCommutative()) { 169 // Ensure that commutative instructions that only differ by a permutation 170 // of their operands get the same value number by sorting the operand value 171 // numbers. Since all commutative instructions have two operands it is more 172 // efficient to sort by hand rather than using, say, std::sort. 173 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 174 if (e.varargs[0] > e.varargs[1]) 175 std::swap(e.varargs[0], e.varargs[1]); 176 } 177 178 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 179 // Sort the operand value numbers so x<y and y>x get the same value number. 180 CmpInst::Predicate Predicate = C->getPredicate(); 181 if (e.varargs[0] > e.varargs[1]) { 182 std::swap(e.varargs[0], e.varargs[1]); 183 Predicate = CmpInst::getSwappedPredicate(Predicate); 184 } 185 e.opcode = (C->getOpcode() << 8) | Predicate; 186 } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { 187 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 188 II != IE; ++II) 189 e.varargs.push_back(*II); 190 } 191 192 return e; 193} 194 195Expression ValueTable::create_cmp_expression(unsigned Opcode, 196 CmpInst::Predicate Predicate, 197 Value *LHS, Value *RHS) { 198 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 199 "Not a comparison!"); 200 Expression e; 201 e.type = CmpInst::makeCmpResultType(LHS->getType()); 202 e.varargs.push_back(lookup_or_add(LHS)); 203 e.varargs.push_back(lookup_or_add(RHS)); 204 205 // Sort the operand value numbers so x<y and y>x get the same value number. 206 if (e.varargs[0] > e.varargs[1]) { 207 std::swap(e.varargs[0], e.varargs[1]); 208 Predicate = CmpInst::getSwappedPredicate(Predicate); 209 } 210 e.opcode = (Opcode << 8) | Predicate; 211 return e; 212} 213 214Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) { 215 assert(EI != 0 && "Not an ExtractValueInst?"); 216 Expression e; 217 e.type = EI->getType(); 218 e.opcode = 0; 219 220 IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand()); 221 if (I != 0 && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { 222 // EI might be an extract from one of our recognised intrinsics. If it 223 // is we'll synthesize a semantically equivalent expression instead on 224 // an extract value expression. 225 switch (I->getIntrinsicID()) { 226 case Intrinsic::sadd_with_overflow: 227 case Intrinsic::uadd_with_overflow: 228 e.opcode = Instruction::Add; 229 break; 230 case Intrinsic::ssub_with_overflow: 231 case Intrinsic::usub_with_overflow: 232 e.opcode = Instruction::Sub; 233 break; 234 case Intrinsic::smul_with_overflow: 235 case Intrinsic::umul_with_overflow: 236 e.opcode = Instruction::Mul; 237 break; 238 default: 239 break; 240 } 241 242 if (e.opcode != 0) { 243 // Intrinsic recognized. Grab its args to finish building the expression. 244 assert(I->getNumArgOperands() == 2 && 245 "Expect two args for recognised intrinsics."); 246 e.varargs.push_back(lookup_or_add(I->getArgOperand(0))); 247 e.varargs.push_back(lookup_or_add(I->getArgOperand(1))); 248 return e; 249 } 250 } 251 252 // Not a recognised intrinsic. Fall back to producing an extract value 253 // expression. 254 e.opcode = EI->getOpcode(); 255 for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); 256 OI != OE; ++OI) 257 e.varargs.push_back(lookup_or_add(*OI)); 258 259 for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); 260 II != IE; ++II) 261 e.varargs.push_back(*II); 262 263 return e; 264} 265 266//===----------------------------------------------------------------------===// 267// ValueTable External Functions 268//===----------------------------------------------------------------------===// 269 270/// add - Insert a value into the table with a specified value number. 271void ValueTable::add(Value *V, uint32_t num) { 272 valueNumbering.insert(std::make_pair(V, num)); 273} 274 275uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 276 if (AA->doesNotAccessMemory(C)) { 277 Expression exp = create_expression(C); 278 uint32_t& e = expressionNumbering[exp]; 279 if (!e) e = nextValueNumber++; 280 valueNumbering[C] = e; 281 return e; 282 } else if (AA->onlyReadsMemory(C)) { 283 Expression exp = create_expression(C); 284 uint32_t& e = expressionNumbering[exp]; 285 if (!e) { 286 e = nextValueNumber++; 287 valueNumbering[C] = e; 288 return e; 289 } 290 if (!MD) { 291 e = nextValueNumber++; 292 valueNumbering[C] = e; 293 return e; 294 } 295 296 MemDepResult local_dep = MD->getDependency(C); 297 298 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 299 valueNumbering[C] = nextValueNumber; 300 return nextValueNumber++; 301 } 302 303 if (local_dep.isDef()) { 304 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 305 306 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 307 valueNumbering[C] = nextValueNumber; 308 return nextValueNumber++; 309 } 310 311 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 312 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 313 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 314 if (c_vn != cd_vn) { 315 valueNumbering[C] = nextValueNumber; 316 return nextValueNumber++; 317 } 318 } 319 320 uint32_t v = lookup_or_add(local_cdep); 321 valueNumbering[C] = v; 322 return v; 323 } 324 325 // Non-local case. 326 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 327 MD->getNonLocalCallDependency(CallSite(C)); 328 // FIXME: Move the checking logic to MemDep! 329 CallInst* cdep = 0; 330 331 // Check to see if we have a single dominating call instruction that is 332 // identical to C. 333 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 334 const NonLocalDepEntry *I = &deps[i]; 335 if (I->getResult().isNonLocal()) 336 continue; 337 338 // We don't handle non-definitions. If we already have a call, reject 339 // instruction dependencies. 340 if (!I->getResult().isDef() || cdep != 0) { 341 cdep = 0; 342 break; 343 } 344 345 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 346 // FIXME: All duplicated with non-local case. 347 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 348 cdep = NonLocalDepCall; 349 continue; 350 } 351 352 cdep = 0; 353 break; 354 } 355 356 if (!cdep) { 357 valueNumbering[C] = nextValueNumber; 358 return nextValueNumber++; 359 } 360 361 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 362 valueNumbering[C] = nextValueNumber; 363 return nextValueNumber++; 364 } 365 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 366 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 367 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 368 if (c_vn != cd_vn) { 369 valueNumbering[C] = nextValueNumber; 370 return nextValueNumber++; 371 } 372 } 373 374 uint32_t v = lookup_or_add(cdep); 375 valueNumbering[C] = v; 376 return v; 377 378 } else { 379 valueNumbering[C] = nextValueNumber; 380 return nextValueNumber++; 381 } 382} 383 384/// lookup_or_add - Returns the value number for the specified value, assigning 385/// it a new number if it did not have one before. 386uint32_t ValueTable::lookup_or_add(Value *V) { 387 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 388 if (VI != valueNumbering.end()) 389 return VI->second; 390 391 if (!isa<Instruction>(V)) { 392 valueNumbering[V] = nextValueNumber; 393 return nextValueNumber++; 394 } 395 396 Instruction* I = cast<Instruction>(V); 397 Expression exp; 398 switch (I->getOpcode()) { 399 case Instruction::Call: 400 return lookup_or_add_call(cast<CallInst>(I)); 401 case Instruction::Add: 402 case Instruction::FAdd: 403 case Instruction::Sub: 404 case Instruction::FSub: 405 case Instruction::Mul: 406 case Instruction::FMul: 407 case Instruction::UDiv: 408 case Instruction::SDiv: 409 case Instruction::FDiv: 410 case Instruction::URem: 411 case Instruction::SRem: 412 case Instruction::FRem: 413 case Instruction::Shl: 414 case Instruction::LShr: 415 case Instruction::AShr: 416 case Instruction::And: 417 case Instruction::Or : 418 case Instruction::Xor: 419 case Instruction::ICmp: 420 case Instruction::FCmp: 421 case Instruction::Trunc: 422 case Instruction::ZExt: 423 case Instruction::SExt: 424 case Instruction::FPToUI: 425 case Instruction::FPToSI: 426 case Instruction::UIToFP: 427 case Instruction::SIToFP: 428 case Instruction::FPTrunc: 429 case Instruction::FPExt: 430 case Instruction::PtrToInt: 431 case Instruction::IntToPtr: 432 case Instruction::BitCast: 433 case Instruction::Select: 434 case Instruction::ExtractElement: 435 case Instruction::InsertElement: 436 case Instruction::ShuffleVector: 437 case Instruction::InsertValue: 438 case Instruction::GetElementPtr: 439 exp = create_expression(I); 440 break; 441 case Instruction::ExtractValue: 442 exp = create_extractvalue_expression(cast<ExtractValueInst>(I)); 443 break; 444 default: 445 valueNumbering[V] = nextValueNumber; 446 return nextValueNumber++; 447 } 448 449 uint32_t& e = expressionNumbering[exp]; 450 if (!e) e = nextValueNumber++; 451 valueNumbering[V] = e; 452 return e; 453} 454 455/// lookup - Returns the value number of the specified value. Fails if 456/// the value has not yet been numbered. 457uint32_t ValueTable::lookup(Value *V) const { 458 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 459 assert(VI != valueNumbering.end() && "Value not numbered?"); 460 return VI->second; 461} 462 463/// lookup_or_add_cmp - Returns the value number of the given comparison, 464/// assigning it a new number if it did not have one before. Useful when 465/// we deduced the result of a comparison, but don't immediately have an 466/// instruction realizing that comparison to hand. 467uint32_t ValueTable::lookup_or_add_cmp(unsigned Opcode, 468 CmpInst::Predicate Predicate, 469 Value *LHS, Value *RHS) { 470 Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS); 471 uint32_t& e = expressionNumbering[exp]; 472 if (!e) e = nextValueNumber++; 473 return e; 474} 475 476/// clear - Remove all entries from the ValueTable. 477void ValueTable::clear() { 478 valueNumbering.clear(); 479 expressionNumbering.clear(); 480 nextValueNumber = 1; 481} 482 483/// erase - Remove a value from the value numbering. 484void ValueTable::erase(Value *V) { 485 valueNumbering.erase(V); 486} 487 488/// verifyRemoved - Verify that the value is removed from all internal data 489/// structures. 490void ValueTable::verifyRemoved(const Value *V) const { 491 for (DenseMap<Value*, uint32_t>::const_iterator 492 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 493 assert(I->first != V && "Inst still occurs in value numbering map!"); 494 } 495} 496 497//===----------------------------------------------------------------------===// 498// GVN Pass 499//===----------------------------------------------------------------------===// 500 501namespace { 502 503 class GVN : public FunctionPass { 504 bool NoLoads; 505 MemoryDependenceAnalysis *MD; 506 DominatorTree *DT; 507 const TargetData *TD; 508 const TargetLibraryInfo *TLI; 509 510 ValueTable VN; 511 512 /// LeaderTable - A mapping from value numbers to lists of Value*'s that 513 /// have that value number. Use findLeader to query it. 514 struct LeaderTableEntry { 515 Value *Val; 516 BasicBlock *BB; 517 LeaderTableEntry *Next; 518 }; 519 DenseMap<uint32_t, LeaderTableEntry> LeaderTable; 520 BumpPtrAllocator TableAllocator; 521 522 SmallVector<Instruction*, 8> InstrsToErase; 523 public: 524 static char ID; // Pass identification, replacement for typeid 525 explicit GVN(bool noloads = false) 526 : FunctionPass(ID), NoLoads(noloads), MD(0) { 527 initializeGVNPass(*PassRegistry::getPassRegistry()); 528 } 529 530 bool runOnFunction(Function &F); 531 532 /// markInstructionForDeletion - This removes the specified instruction from 533 /// our various maps and marks it for deletion. 534 void markInstructionForDeletion(Instruction *I) { 535 VN.erase(I); 536 InstrsToErase.push_back(I); 537 } 538 539 const TargetData *getTargetData() const { return TD; } 540 DominatorTree &getDominatorTree() const { return *DT; } 541 AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } 542 MemoryDependenceAnalysis &getMemDep() const { return *MD; } 543 private: 544 /// addToLeaderTable - Push a new Value to the LeaderTable onto the list for 545 /// its value number. 546 void addToLeaderTable(uint32_t N, Value *V, BasicBlock *BB) { 547 LeaderTableEntry &Curr = LeaderTable[N]; 548 if (!Curr.Val) { 549 Curr.Val = V; 550 Curr.BB = BB; 551 return; 552 } 553 554 LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>(); 555 Node->Val = V; 556 Node->BB = BB; 557 Node->Next = Curr.Next; 558 Curr.Next = Node; 559 } 560 561 /// removeFromLeaderTable - Scan the list of values corresponding to a given 562 /// value number, and remove the given instruction if encountered. 563 void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) { 564 LeaderTableEntry* Prev = 0; 565 LeaderTableEntry* Curr = &LeaderTable[N]; 566 567 while (Curr->Val != I || Curr->BB != BB) { 568 Prev = Curr; 569 Curr = Curr->Next; 570 } 571 572 if (Prev) { 573 Prev->Next = Curr->Next; 574 } else { 575 if (!Curr->Next) { 576 Curr->Val = 0; 577 Curr->BB = 0; 578 } else { 579 LeaderTableEntry* Next = Curr->Next; 580 Curr->Val = Next->Val; 581 Curr->BB = Next->BB; 582 Curr->Next = Next->Next; 583 } 584 } 585 } 586 587 // List of critical edges to be split between iterations. 588 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 589 590 // This transformation requires dominator postdominator info 591 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 592 AU.addRequired<DominatorTree>(); 593 AU.addRequired<TargetLibraryInfo>(); 594 if (!NoLoads) 595 AU.addRequired<MemoryDependenceAnalysis>(); 596 AU.addRequired<AliasAnalysis>(); 597 598 AU.addPreserved<DominatorTree>(); 599 AU.addPreserved<AliasAnalysis>(); 600 } 601 602 603 // Helper fuctions 604 // FIXME: eliminate or document these better 605 bool processLoad(LoadInst *L); 606 bool processInstruction(Instruction *I); 607 bool processNonLocalLoad(LoadInst *L); 608 bool processBlock(BasicBlock *BB); 609 void dump(DenseMap<uint32_t, Value*> &d); 610 bool iterateOnFunction(Function &F); 611 bool performPRE(Function &F); 612 Value *findLeader(BasicBlock *BB, uint32_t num); 613 void cleanupGlobalSets(); 614 void verifyRemoved(const Instruction *I) const; 615 bool splitCriticalEdges(); 616 unsigned replaceAllDominatedUsesWith(Value *From, Value *To, 617 BasicBlock *Root); 618 bool propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root); 619 }; 620 621 char GVN::ID = 0; 622} 623 624// createGVNPass - The public interface to this file... 625FunctionPass *llvm::createGVNPass(bool NoLoads) { 626 return new GVN(NoLoads); 627} 628 629INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false) 630INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 631INITIALIZE_PASS_DEPENDENCY(DominatorTree) 632INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 633INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 634INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) 635 636void GVN::dump(DenseMap<uint32_t, Value*>& d) { 637 errs() << "{\n"; 638 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 639 E = d.end(); I != E; ++I) { 640 errs() << I->first << "\n"; 641 I->second->dump(); 642 } 643 errs() << "}\n"; 644} 645 646/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 647/// we're analyzing is fully available in the specified block. As we go, keep 648/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 649/// map is actually a tri-state map with the following values: 650/// 0) we know the block *is not* fully available. 651/// 1) we know the block *is* fully available. 652/// 2) we do not know whether the block is fully available or not, but we are 653/// currently speculating that it will be. 654/// 3) we are speculating for this block and have used that to speculate for 655/// other blocks. 656static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 657 DenseMap<BasicBlock*, char> &FullyAvailableBlocks, 658 uint32_t RecurseDepth) { 659 if (RecurseDepth > MaxRecurseDepth) 660 return false; 661 662 // Optimistically assume that the block is fully available and check to see 663 // if we already know about this block in one lookup. 664 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 665 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 666 667 // If the entry already existed for this block, return the precomputed value. 668 if (!IV.second) { 669 // If this is a speculative "available" value, mark it as being used for 670 // speculation of other blocks. 671 if (IV.first->second == 2) 672 IV.first->second = 3; 673 return IV.first->second != 0; 674 } 675 676 // Otherwise, see if it is fully available in all predecessors. 677 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 678 679 // If this block has no predecessors, it isn't live-in here. 680 if (PI == PE) 681 goto SpeculationFailure; 682 683 for (; PI != PE; ++PI) 684 // If the value isn't fully available in one of our predecessors, then it 685 // isn't fully available in this block either. Undo our previous 686 // optimistic assumption and bail out. 687 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1)) 688 goto SpeculationFailure; 689 690 return true; 691 692// SpeculationFailure - If we get here, we found out that this is not, after 693// all, a fully-available block. We have a problem if we speculated on this and 694// used the speculation to mark other blocks as available. 695SpeculationFailure: 696 char &BBVal = FullyAvailableBlocks[BB]; 697 698 // If we didn't speculate on this, just return with it set to false. 699 if (BBVal == 2) { 700 BBVal = 0; 701 return false; 702 } 703 704 // If we did speculate on this value, we could have blocks set to 1 that are 705 // incorrect. Walk the (transitive) successors of this block and mark them as 706 // 0 if set to one. 707 SmallVector<BasicBlock*, 32> BBWorklist; 708 BBWorklist.push_back(BB); 709 710 do { 711 BasicBlock *Entry = BBWorklist.pop_back_val(); 712 // Note that this sets blocks to 0 (unavailable) if they happen to not 713 // already be in FullyAvailableBlocks. This is safe. 714 char &EntryVal = FullyAvailableBlocks[Entry]; 715 if (EntryVal == 0) continue; // Already unavailable. 716 717 // Mark as unavailable. 718 EntryVal = 0; 719 720 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 721 BBWorklist.push_back(*I); 722 } while (!BBWorklist.empty()); 723 724 return false; 725} 726 727 728/// CanCoerceMustAliasedValueToLoad - Return true if 729/// CoerceAvailableValueToLoadType will succeed. 730static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 731 Type *LoadTy, 732 const TargetData &TD) { 733 // If the loaded or stored value is an first class array or struct, don't try 734 // to transform them. We need to be able to bitcast to integer. 735 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 736 StoredVal->getType()->isStructTy() || 737 StoredVal->getType()->isArrayTy()) 738 return false; 739 740 // The store has to be at least as big as the load. 741 if (TD.getTypeSizeInBits(StoredVal->getType()) < 742 TD.getTypeSizeInBits(LoadTy)) 743 return false; 744 745 return true; 746} 747 748 749/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 750/// then a load from a must-aliased pointer of a different type, try to coerce 751/// the stored value. LoadedTy is the type of the load we want to replace and 752/// InsertPt is the place to insert new instructions. 753/// 754/// If we can't do it, return null. 755static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 756 Type *LoadedTy, 757 Instruction *InsertPt, 758 const TargetData &TD) { 759 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 760 return 0; 761 762 // If this is already the right type, just return it. 763 Type *StoredValTy = StoredVal->getType(); 764 765 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); 766 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 767 768 // If the store and reload are the same size, we can always reuse it. 769 if (StoreSize == LoadSize) { 770 // Pointer to Pointer -> use bitcast. 771 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) 772 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 773 774 // Convert source pointers to integers, which can be bitcast. 775 if (StoredValTy->isPointerTy()) { 776 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 777 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 778 } 779 780 Type *TypeToCastTo = LoadedTy; 781 if (TypeToCastTo->isPointerTy()) 782 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 783 784 if (StoredValTy != TypeToCastTo) 785 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 786 787 // Cast to pointer if the load needs a pointer type. 788 if (LoadedTy->isPointerTy()) 789 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 790 791 return StoredVal; 792 } 793 794 // If the loaded value is smaller than the available value, then we can 795 // extract out a piece from it. If the available value is too small, then we 796 // can't do anything. 797 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 798 799 // Convert source pointers to integers, which can be manipulated. 800 if (StoredValTy->isPointerTy()) { 801 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 802 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 803 } 804 805 // Convert vectors and fp to integer, which can be manipulated. 806 if (!StoredValTy->isIntegerTy()) { 807 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 808 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 809 } 810 811 // If this is a big-endian system, we need to shift the value down to the low 812 // bits so that a truncate will work. 813 if (TD.isBigEndian()) { 814 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 815 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 816 } 817 818 // Truncate the integer to the right size now. 819 Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 820 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 821 822 if (LoadedTy == NewIntTy) 823 return StoredVal; 824 825 // If the result is a pointer, inttoptr. 826 if (LoadedTy->isPointerTy()) 827 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 828 829 // Otherwise, bitcast. 830 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 831} 832 833/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 834/// memdep query of a load that ends up being a clobbering memory write (store, 835/// memset, memcpy, memmove). This means that the write *may* provide bits used 836/// by the load but we can't be sure because the pointers don't mustalias. 837/// 838/// Check this case to see if there is anything more we can do before we give 839/// up. This returns -1 if we have to give up, or a byte number in the stored 840/// value of the piece that feeds the load. 841static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, 842 Value *WritePtr, 843 uint64_t WriteSizeInBits, 844 const TargetData &TD) { 845 // If the loaded or stored value is a first class array or struct, don't try 846 // to transform them. We need to be able to bitcast to integer. 847 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 848 return -1; 849 850 int64_t StoreOffset = 0, LoadOffset = 0; 851 Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD); 852 Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 853 if (StoreBase != LoadBase) 854 return -1; 855 856 // If the load and store are to the exact same address, they should have been 857 // a must alias. AA must have gotten confused. 858 // FIXME: Study to see if/when this happens. One case is forwarding a memset 859 // to a load from the base of the memset. 860#if 0 861 if (LoadOffset == StoreOffset) { 862 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 863 << "Base = " << *StoreBase << "\n" 864 << "Store Ptr = " << *WritePtr << "\n" 865 << "Store Offs = " << StoreOffset << "\n" 866 << "Load Ptr = " << *LoadPtr << "\n"; 867 abort(); 868 } 869#endif 870 871 // If the load and store don't overlap at all, the store doesn't provide 872 // anything to the load. In this case, they really don't alias at all, AA 873 // must have gotten confused. 874 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 875 876 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 877 return -1; 878 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 879 LoadSize >>= 3; 880 881 882 bool isAAFailure = false; 883 if (StoreOffset < LoadOffset) 884 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 885 else 886 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 887 888 if (isAAFailure) { 889#if 0 890 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 891 << "Base = " << *StoreBase << "\n" 892 << "Store Ptr = " << *WritePtr << "\n" 893 << "Store Offs = " << StoreOffset << "\n" 894 << "Load Ptr = " << *LoadPtr << "\n"; 895 abort(); 896#endif 897 return -1; 898 } 899 900 // If the Load isn't completely contained within the stored bits, we don't 901 // have all the bits to feed it. We could do something crazy in the future 902 // (issue a smaller load then merge the bits in) but this seems unlikely to be 903 // valuable. 904 if (StoreOffset > LoadOffset || 905 StoreOffset+StoreSize < LoadOffset+LoadSize) 906 return -1; 907 908 // Okay, we can do this transformation. Return the number of bytes into the 909 // store that the load is. 910 return LoadOffset-StoreOffset; 911} 912 913/// AnalyzeLoadFromClobberingStore - This function is called when we have a 914/// memdep query of a load that ends up being a clobbering store. 915static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, 916 StoreInst *DepSI, 917 const TargetData &TD) { 918 // Cannot handle reading from store of first-class aggregate yet. 919 if (DepSI->getValueOperand()->getType()->isStructTy() || 920 DepSI->getValueOperand()->getType()->isArrayTy()) 921 return -1; 922 923 Value *StorePtr = DepSI->getPointerOperand(); 924 uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType()); 925 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 926 StorePtr, StoreSize, TD); 927} 928 929/// AnalyzeLoadFromClobberingLoad - This function is called when we have a 930/// memdep query of a load that ends up being clobbered by another load. See if 931/// the other load can feed into the second load. 932static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, 933 LoadInst *DepLI, const TargetData &TD){ 934 // Cannot handle reading from store of first-class aggregate yet. 935 if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) 936 return -1; 937 938 Value *DepPtr = DepLI->getPointerOperand(); 939 uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType()); 940 int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD); 941 if (R != -1) return R; 942 943 // If we have a load/load clobber an DepLI can be widened to cover this load, 944 // then we should widen it! 945 int64_t LoadOffs = 0; 946 const Value *LoadBase = 947 GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, TD); 948 unsigned LoadSize = TD.getTypeStoreSize(LoadTy); 949 950 unsigned Size = MemoryDependenceAnalysis:: 951 getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD); 952 if (Size == 0) return -1; 953 954 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD); 955} 956 957 958 959static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, 960 MemIntrinsic *MI, 961 const TargetData &TD) { 962 // If the mem operation is a non-constant size, we can't handle it. 963 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 964 if (SizeCst == 0) return -1; 965 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 966 967 // If this is memset, we just need to see if the offset is valid in the size 968 // of the memset.. 969 if (MI->getIntrinsicID() == Intrinsic::memset) 970 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 971 MemSizeInBits, TD); 972 973 // If we have a memcpy/memmove, the only case we can handle is if this is a 974 // copy from constant memory. In that case, we can read directly from the 975 // constant memory. 976 MemTransferInst *MTI = cast<MemTransferInst>(MI); 977 978 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 979 if (Src == 0) return -1; 980 981 GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD)); 982 if (GV == 0 || !GV->isConstant()) return -1; 983 984 // See if the access is within the bounds of the transfer. 985 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 986 MI->getDest(), MemSizeInBits, TD); 987 if (Offset == -1) 988 return Offset; 989 990 // Otherwise, see if we can constant fold a load from the constant with the 991 // offset applied as appropriate. 992 Src = ConstantExpr::getBitCast(Src, 993 llvm::Type::getInt8PtrTy(Src->getContext())); 994 Constant *OffsetCst = 995 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 996 Src = ConstantExpr::getGetElementPtr(Src, OffsetCst); 997 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 998 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 999 return Offset; 1000 return -1; 1001} 1002 1003 1004/// GetStoreValueForLoad - This function is called when we have a 1005/// memdep query of a load that ends up being a clobbering store. This means 1006/// that the store provides bits used by the load but we the pointers don't 1007/// mustalias. Check this case to see if there is anything more we can do 1008/// before we give up. 1009static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1010 Type *LoadTy, 1011 Instruction *InsertPt, const TargetData &TD){ 1012 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1013 1014 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1015 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 1016 1017 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1018 1019 // Compute which bits of the stored value are being used by the load. Convert 1020 // to an integer type to start with. 1021 if (SrcVal->getType()->isPointerTy()) 1022 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx)); 1023 if (!SrcVal->getType()->isIntegerTy()) 1024 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8)); 1025 1026 // Shift the bits to the least significant depending on endianness. 1027 unsigned ShiftAmt; 1028 if (TD.isLittleEndian()) 1029 ShiftAmt = Offset*8; 1030 else 1031 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1032 1033 if (ShiftAmt) 1034 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt); 1035 1036 if (LoadSize != StoreSize) 1037 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8)); 1038 1039 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1040} 1041 1042/// GetLoadValueForLoad - This function is called when we have a 1043/// memdep query of a load that ends up being a clobbering load. This means 1044/// that the load *may* provide bits used by the load but we can't be sure 1045/// because the pointers don't mustalias. Check this case to see if there is 1046/// anything more we can do before we give up. 1047static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, 1048 Type *LoadTy, Instruction *InsertPt, 1049 GVN &gvn) { 1050 const TargetData &TD = *gvn.getTargetData(); 1051 // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to 1052 // widen SrcVal out to a larger load. 1053 unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType()); 1054 unsigned LoadSize = TD.getTypeStoreSize(LoadTy); 1055 if (Offset+LoadSize > SrcValSize) { 1056 assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!"); 1057 assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load"); 1058 // If we have a load/load clobber an DepLI can be widened to cover this 1059 // load, then we should widen it to the next power of 2 size big enough! 1060 unsigned NewLoadSize = Offset+LoadSize; 1061 if (!isPowerOf2_32(NewLoadSize)) 1062 NewLoadSize = NextPowerOf2(NewLoadSize); 1063 1064 Value *PtrVal = SrcVal->getPointerOperand(); 1065 1066 // Insert the new load after the old load. This ensures that subsequent 1067 // memdep queries will find the new load. We can't easily remove the old 1068 // load completely because it is already in the value numbering table. 1069 IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal)); 1070 Type *DestPTy = 1071 IntegerType::get(LoadTy->getContext(), NewLoadSize*8); 1072 DestPTy = PointerType::get(DestPTy, 1073 cast<PointerType>(PtrVal->getType())->getAddressSpace()); 1074 Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc()); 1075 PtrVal = Builder.CreateBitCast(PtrVal, DestPTy); 1076 LoadInst *NewLoad = Builder.CreateLoad(PtrVal); 1077 NewLoad->takeName(SrcVal); 1078 NewLoad->setAlignment(SrcVal->getAlignment()); 1079 1080 DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); 1081 DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); 1082 1083 // Replace uses of the original load with the wider load. On a big endian 1084 // system, we need to shift down to get the relevant bits. 1085 Value *RV = NewLoad; 1086 if (TD.isBigEndian()) 1087 RV = Builder.CreateLShr(RV, 1088 NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits()); 1089 RV = Builder.CreateTrunc(RV, SrcVal->getType()); 1090 SrcVal->replaceAllUsesWith(RV); 1091 1092 // We would like to use gvn.markInstructionForDeletion here, but we can't 1093 // because the load is already memoized into the leader map table that GVN 1094 // tracks. It is potentially possible to remove the load from the table, 1095 // but then there all of the operations based on it would need to be 1096 // rehashed. Just leave the dead load around. 1097 gvn.getMemDep().removeInstruction(SrcVal); 1098 SrcVal = NewLoad; 1099 } 1100 1101 return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD); 1102} 1103 1104 1105/// GetMemInstValueForLoad - This function is called when we have a 1106/// memdep query of a load that ends up being a clobbering mem intrinsic. 1107static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1108 Type *LoadTy, Instruction *InsertPt, 1109 const TargetData &TD){ 1110 LLVMContext &Ctx = LoadTy->getContext(); 1111 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1112 1113 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1114 1115 // We know that this method is only called when the mem transfer fully 1116 // provides the bits for the load. 1117 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1118 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1119 // independently of what the offset is. 1120 Value *Val = MSI->getValue(); 1121 if (LoadSize != 1) 1122 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1123 1124 Value *OneElt = Val; 1125 1126 // Splat the value out to the right number of bits. 1127 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1128 // If we can double the number of bytes set, do it. 1129 if (NumBytesSet*2 <= LoadSize) { 1130 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1131 Val = Builder.CreateOr(Val, ShVal); 1132 NumBytesSet <<= 1; 1133 continue; 1134 } 1135 1136 // Otherwise insert one byte at a time. 1137 Value *ShVal = Builder.CreateShl(Val, 1*8); 1138 Val = Builder.CreateOr(OneElt, ShVal); 1139 ++NumBytesSet; 1140 } 1141 1142 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1143 } 1144 1145 // Otherwise, this is a memcpy/memmove from a constant global. 1146 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1147 Constant *Src = cast<Constant>(MTI->getSource()); 1148 1149 // Otherwise, see if we can constant fold a load from the constant with the 1150 // offset applied as appropriate. 1151 Src = ConstantExpr::getBitCast(Src, 1152 llvm::Type::getInt8PtrTy(Src->getContext())); 1153 Constant *OffsetCst = 1154 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1155 Src = ConstantExpr::getGetElementPtr(Src, OffsetCst); 1156 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1157 return ConstantFoldLoadFromConstPtr(Src, &TD); 1158} 1159 1160namespace { 1161 1162struct AvailableValueInBlock { 1163 /// BB - The basic block in question. 1164 BasicBlock *BB; 1165 enum ValType { 1166 SimpleVal, // A simple offsetted value that is accessed. 1167 LoadVal, // A value produced by a load. 1168 MemIntrin // A memory intrinsic which is loaded from. 1169 }; 1170 1171 /// V - The value that is live out of the block. 1172 PointerIntPair<Value *, 2, ValType> Val; 1173 1174 /// Offset - The byte offset in Val that is interesting for the load query. 1175 unsigned Offset; 1176 1177 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1178 unsigned Offset = 0) { 1179 AvailableValueInBlock Res; 1180 Res.BB = BB; 1181 Res.Val.setPointer(V); 1182 Res.Val.setInt(SimpleVal); 1183 Res.Offset = Offset; 1184 return Res; 1185 } 1186 1187 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1188 unsigned Offset = 0) { 1189 AvailableValueInBlock Res; 1190 Res.BB = BB; 1191 Res.Val.setPointer(MI); 1192 Res.Val.setInt(MemIntrin); 1193 Res.Offset = Offset; 1194 return Res; 1195 } 1196 1197 static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI, 1198 unsigned Offset = 0) { 1199 AvailableValueInBlock Res; 1200 Res.BB = BB; 1201 Res.Val.setPointer(LI); 1202 Res.Val.setInt(LoadVal); 1203 Res.Offset = Offset; 1204 return Res; 1205 } 1206 1207 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1208 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } 1209 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } 1210 1211 Value *getSimpleValue() const { 1212 assert(isSimpleValue() && "Wrong accessor"); 1213 return Val.getPointer(); 1214 } 1215 1216 LoadInst *getCoercedLoadValue() const { 1217 assert(isCoercedLoadValue() && "Wrong accessor"); 1218 return cast<LoadInst>(Val.getPointer()); 1219 } 1220 1221 MemIntrinsic *getMemIntrinValue() const { 1222 assert(isMemIntrinValue() && "Wrong accessor"); 1223 return cast<MemIntrinsic>(Val.getPointer()); 1224 } 1225 1226 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1227 /// defined here to the specified type. This handles various coercion cases. 1228 Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const { 1229 Value *Res; 1230 if (isSimpleValue()) { 1231 Res = getSimpleValue(); 1232 if (Res->getType() != LoadTy) { 1233 const TargetData *TD = gvn.getTargetData(); 1234 assert(TD && "Need target data to handle type mismatch case"); 1235 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1236 *TD); 1237 1238 DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1239 << *getSimpleValue() << '\n' 1240 << *Res << '\n' << "\n\n\n"); 1241 } 1242 } else if (isCoercedLoadValue()) { 1243 LoadInst *Load = getCoercedLoadValue(); 1244 if (Load->getType() == LoadTy && Offset == 0) { 1245 Res = Load; 1246 } else { 1247 Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(), 1248 gvn); 1249 1250 DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " 1251 << *getCoercedLoadValue() << '\n' 1252 << *Res << '\n' << "\n\n\n"); 1253 } 1254 } else { 1255 const TargetData *TD = gvn.getTargetData(); 1256 assert(TD && "Need target data to handle type mismatch case"); 1257 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1258 LoadTy, BB->getTerminator(), *TD); 1259 DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1260 << " " << *getMemIntrinValue() << '\n' 1261 << *Res << '\n' << "\n\n\n"); 1262 } 1263 return Res; 1264 } 1265}; 1266 1267} // end anonymous namespace 1268 1269/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1270/// construct SSA form, allowing us to eliminate LI. This returns the value 1271/// that should be used at LI's definition site. 1272static Value *ConstructSSAForLoadSet(LoadInst *LI, 1273 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1274 GVN &gvn) { 1275 // Check for the fully redundant, dominating load case. In this case, we can 1276 // just use the dominating value directly. 1277 if (ValuesPerBlock.size() == 1 && 1278 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 1279 LI->getParent())) 1280 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn); 1281 1282 // Otherwise, we have to construct SSA form. 1283 SmallVector<PHINode*, 8> NewPHIs; 1284 SSAUpdater SSAUpdate(&NewPHIs); 1285 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1286 1287 Type *LoadTy = LI->getType(); 1288 1289 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1290 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1291 BasicBlock *BB = AV.BB; 1292 1293 if (SSAUpdate.HasValueForBlock(BB)) 1294 continue; 1295 1296 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn)); 1297 } 1298 1299 // Perform PHI construction. 1300 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1301 1302 // If new PHI nodes were created, notify alias analysis. 1303 if (V->getType()->isPointerTy()) { 1304 AliasAnalysis *AA = gvn.getAliasAnalysis(); 1305 1306 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1307 AA->copyValue(LI, NewPHIs[i]); 1308 1309 // Now that we've copied information to the new PHIs, scan through 1310 // them again and inform alias analysis that we've added potentially 1311 // escaping uses to any values that are operands to these PHIs. 1312 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) { 1313 PHINode *P = NewPHIs[i]; 1314 for (unsigned ii = 0, ee = P->getNumIncomingValues(); ii != ee; ++ii) { 1315 unsigned jj = PHINode::getOperandNumForIncomingValue(ii); 1316 AA->addEscapingUse(P->getOperandUse(jj)); 1317 } 1318 } 1319 } 1320 1321 return V; 1322} 1323 1324static bool isLifetimeStart(const Instruction *Inst) { 1325 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1326 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1327 return false; 1328} 1329 1330/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1331/// non-local by performing PHI construction. 1332bool GVN::processNonLocalLoad(LoadInst *LI) { 1333 // Find the non-local dependencies of the load. 1334 SmallVector<NonLocalDepResult, 64> Deps; 1335 AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI); 1336 MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps); 1337 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1338 // << Deps.size() << *LI << '\n'); 1339 1340 // If we had to process more than one hundred blocks to find the 1341 // dependencies, this load isn't worth worrying about. Optimizing 1342 // it will be too expensive. 1343 unsigned NumDeps = Deps.size(); 1344 if (NumDeps > 100) 1345 return false; 1346 1347 // If we had a phi translation failure, we'll have a single entry which is a 1348 // clobber in the current block. Reject this early. 1349 if (NumDeps == 1 && 1350 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { 1351 DEBUG( 1352 dbgs() << "GVN: non-local load "; 1353 WriteAsOperand(dbgs(), LI); 1354 dbgs() << " has unknown dependencies\n"; 1355 ); 1356 return false; 1357 } 1358 1359 // Filter out useless results (non-locals, etc). Keep track of the blocks 1360 // where we have a value available in repl, also keep track of whether we see 1361 // dependencies that produce an unknown value for the load (such as a call 1362 // that could potentially clobber the load). 1363 SmallVector<AvailableValueInBlock, 64> ValuesPerBlock; 1364 SmallVector<BasicBlock*, 64> UnavailableBlocks; 1365 1366 for (unsigned i = 0, e = NumDeps; i != e; ++i) { 1367 BasicBlock *DepBB = Deps[i].getBB(); 1368 MemDepResult DepInfo = Deps[i].getResult(); 1369 1370 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 1371 UnavailableBlocks.push_back(DepBB); 1372 continue; 1373 } 1374 1375 if (DepInfo.isClobber()) { 1376 // The address being loaded in this non-local block may not be the same as 1377 // the pointer operand of the load if PHI translation occurs. Make sure 1378 // to consider the right address. 1379 Value *Address = Deps[i].getAddress(); 1380 1381 // If the dependence is to a store that writes to a superset of the bits 1382 // read by the load, we can extract the bits we need for the load from the 1383 // stored value. 1384 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1385 if (TD && Address) { 1386 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1387 DepSI, *TD); 1388 if (Offset != -1) { 1389 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1390 DepSI->getValueOperand(), 1391 Offset)); 1392 continue; 1393 } 1394 } 1395 } 1396 1397 // Check to see if we have something like this: 1398 // load i32* P 1399 // load i8* (P+1) 1400 // if we have this, replace the later with an extraction from the former. 1401 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { 1402 // If this is a clobber and L is the first instruction in its block, then 1403 // we have the first instruction in the entry block. 1404 if (DepLI != LI && Address && TD) { 1405 int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), 1406 LI->getPointerOperand(), 1407 DepLI, *TD); 1408 1409 if (Offset != -1) { 1410 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI, 1411 Offset)); 1412 continue; 1413 } 1414 } 1415 } 1416 1417 // If the clobbering value is a memset/memcpy/memmove, see if we can 1418 // forward a value on from it. 1419 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1420 if (TD && Address) { 1421 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1422 DepMI, *TD); 1423 if (Offset != -1) { 1424 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1425 Offset)); 1426 continue; 1427 } 1428 } 1429 } 1430 1431 UnavailableBlocks.push_back(DepBB); 1432 continue; 1433 } 1434 1435 // DepInfo.isDef() here 1436 1437 Instruction *DepInst = DepInfo.getInst(); 1438 1439 // Loading the allocation -> undef. 1440 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1441 // Loading immediately after lifetime begin -> undef. 1442 isLifetimeStart(DepInst)) { 1443 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1444 UndefValue::get(LI->getType()))); 1445 continue; 1446 } 1447 1448 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1449 // Reject loads and stores that are to the same address but are of 1450 // different types if we have to. 1451 if (S->getValueOperand()->getType() != LI->getType()) { 1452 // If the stored value is larger or equal to the loaded value, we can 1453 // reuse it. 1454 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), 1455 LI->getType(), *TD)) { 1456 UnavailableBlocks.push_back(DepBB); 1457 continue; 1458 } 1459 } 1460 1461 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1462 S->getValueOperand())); 1463 continue; 1464 } 1465 1466 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1467 // If the types mismatch and we can't handle it, reject reuse of the load. 1468 if (LD->getType() != LI->getType()) { 1469 // If the stored value is larger or equal to the loaded value, we can 1470 // reuse it. 1471 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1472 UnavailableBlocks.push_back(DepBB); 1473 continue; 1474 } 1475 } 1476 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB, LD)); 1477 continue; 1478 } 1479 1480 UnavailableBlocks.push_back(DepBB); 1481 continue; 1482 } 1483 1484 // If we have no predecessors that produce a known value for this load, exit 1485 // early. 1486 if (ValuesPerBlock.empty()) return false; 1487 1488 // If all of the instructions we depend on produce a known value for this 1489 // load, then it is fully redundant and we can use PHI insertion to compute 1490 // its value. Insert PHIs and remove the fully redundant value now. 1491 if (UnavailableBlocks.empty()) { 1492 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1493 1494 // Perform PHI construction. 1495 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1496 LI->replaceAllUsesWith(V); 1497 1498 if (isa<PHINode>(V)) 1499 V->takeName(LI); 1500 if (V->getType()->isPointerTy()) 1501 MD->invalidateCachedPointerInfo(V); 1502 markInstructionForDeletion(LI); 1503 ++NumGVNLoad; 1504 return true; 1505 } 1506 1507 if (!EnablePRE || !EnableLoadPRE) 1508 return false; 1509 1510 // Okay, we have *some* definitions of the value. This means that the value 1511 // is available in some of our (transitive) predecessors. Lets think about 1512 // doing PRE of this load. This will involve inserting a new load into the 1513 // predecessor when it's not available. We could do this in general, but 1514 // prefer to not increase code size. As such, we only do this when we know 1515 // that we only have to insert *one* load (which means we're basically moving 1516 // the load, not inserting a new one). 1517 1518 SmallPtrSet<BasicBlock *, 4> Blockers; 1519 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1520 Blockers.insert(UnavailableBlocks[i]); 1521 1522 // Let's find the first basic block with more than one predecessor. Walk 1523 // backwards through predecessors if needed. 1524 BasicBlock *LoadBB = LI->getParent(); 1525 BasicBlock *TmpBB = LoadBB; 1526 1527 bool isSinglePred = false; 1528 bool allSingleSucc = true; 1529 while (TmpBB->getSinglePredecessor()) { 1530 isSinglePred = true; 1531 TmpBB = TmpBB->getSinglePredecessor(); 1532 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1533 return false; 1534 if (Blockers.count(TmpBB)) 1535 return false; 1536 1537 // If any of these blocks has more than one successor (i.e. if the edge we 1538 // just traversed was critical), then there are other paths through this 1539 // block along which the load may not be anticipated. Hoisting the load 1540 // above this block would be adding the load to execution paths along 1541 // which it was not previously executed. 1542 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1543 return false; 1544 } 1545 1546 assert(TmpBB); 1547 LoadBB = TmpBB; 1548 1549 // FIXME: It is extremely unclear what this loop is doing, other than 1550 // artificially restricting loadpre. 1551 if (isSinglePred) { 1552 bool isHot = false; 1553 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1554 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1555 if (AV.isSimpleValue()) 1556 // "Hot" Instruction is in some loop (because it dominates its dep. 1557 // instruction). 1558 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1559 if (DT->dominates(LI, I)) { 1560 isHot = true; 1561 break; 1562 } 1563 } 1564 1565 // We are interested only in "hot" instructions. We don't want to do any 1566 // mis-optimizations here. 1567 if (!isHot) 1568 return false; 1569 } 1570 1571 // Check to see how many predecessors have the loaded value fully 1572 // available. 1573 DenseMap<BasicBlock*, Value*> PredLoads; 1574 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1575 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1576 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1577 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1578 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1579 1580 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1581 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1582 PI != E; ++PI) { 1583 BasicBlock *Pred = *PI; 1584 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { 1585 continue; 1586 } 1587 PredLoads[Pred] = 0; 1588 1589 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1590 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1591 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1592 << Pred->getName() << "': " << *LI << '\n'); 1593 return false; 1594 } 1595 1596 if (LoadBB->isLandingPad()) { 1597 DEBUG(dbgs() 1598 << "COULD NOT PRE LOAD BECAUSE OF LANDING PAD CRITICAL EDGE '" 1599 << Pred->getName() << "': " << *LI << '\n'); 1600 return false; 1601 } 1602 1603 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1604 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1605 } 1606 } 1607 1608 if (!NeedToSplit.empty()) { 1609 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1610 return false; 1611 } 1612 1613 // Decide whether PRE is profitable for this load. 1614 unsigned NumUnavailablePreds = PredLoads.size(); 1615 assert(NumUnavailablePreds != 0 && 1616 "Fully available value should be eliminated above!"); 1617 1618 // If this load is unavailable in multiple predecessors, reject it. 1619 // FIXME: If we could restructure the CFG, we could make a common pred with 1620 // all the preds that don't have an available LI and insert a new load into 1621 // that one block. 1622 if (NumUnavailablePreds != 1) 1623 return false; 1624 1625 // Check if the load can safely be moved to all the unavailable predecessors. 1626 bool CanDoPRE = true; 1627 SmallVector<Instruction*, 8> NewInsts; 1628 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1629 E = PredLoads.end(); I != E; ++I) { 1630 BasicBlock *UnavailablePred = I->first; 1631 1632 // Do PHI translation to get its value in the predecessor if necessary. The 1633 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1634 1635 // If all preds have a single successor, then we know it is safe to insert 1636 // the load on the pred (?!?), so we can insert code to materialize the 1637 // pointer if it is not available. 1638 PHITransAddr Address(LI->getPointerOperand(), TD); 1639 Value *LoadPtr = 0; 1640 if (allSingleSucc) { 1641 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1642 *DT, NewInsts); 1643 } else { 1644 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1645 LoadPtr = Address.getAddr(); 1646 } 1647 1648 // If we couldn't find or insert a computation of this phi translated value, 1649 // we fail PRE. 1650 if (LoadPtr == 0) { 1651 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1652 << *LI->getPointerOperand() << "\n"); 1653 CanDoPRE = false; 1654 break; 1655 } 1656 1657 // Make sure it is valid to move this load here. We have to watch out for: 1658 // @1 = getelementptr (i8* p, ... 1659 // test p and branch if == 0 1660 // load @1 1661 // It is valid to have the getelementptr before the test, even if p can 1662 // be 0, as getelementptr only does address arithmetic. 1663 // If we are not pushing the value through any multiple-successor blocks 1664 // we do not have this case. Otherwise, check that the load is safe to 1665 // put anywhere; this can be improved, but should be conservatively safe. 1666 if (!allSingleSucc && 1667 // FIXME: REEVALUTE THIS. 1668 !isSafeToLoadUnconditionally(LoadPtr, 1669 UnavailablePred->getTerminator(), 1670 LI->getAlignment(), TD)) { 1671 CanDoPRE = false; 1672 break; 1673 } 1674 1675 I->second = LoadPtr; 1676 } 1677 1678 if (!CanDoPRE) { 1679 while (!NewInsts.empty()) { 1680 Instruction *I = NewInsts.pop_back_val(); 1681 if (MD) MD->removeInstruction(I); 1682 I->eraseFromParent(); 1683 } 1684 return false; 1685 } 1686 1687 // Okay, we can eliminate this load by inserting a reload in the predecessor 1688 // and using PHI construction to get the value in the other predecessors, do 1689 // it. 1690 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1691 DEBUG(if (!NewInsts.empty()) 1692 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1693 << *NewInsts.back() << '\n'); 1694 1695 // Assign value numbers to the new instructions. 1696 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1697 // FIXME: We really _ought_ to insert these value numbers into their 1698 // parent's availability map. However, in doing so, we risk getting into 1699 // ordering issues. If a block hasn't been processed yet, we would be 1700 // marking a value as AVAIL-IN, which isn't what we intend. 1701 VN.lookup_or_add(NewInsts[i]); 1702 } 1703 1704 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1705 E = PredLoads.end(); I != E; ++I) { 1706 BasicBlock *UnavailablePred = I->first; 1707 Value *LoadPtr = I->second; 1708 1709 Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1710 LI->getAlignment(), 1711 UnavailablePred->getTerminator()); 1712 1713 // Transfer the old load's TBAA tag to the new load. 1714 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) 1715 NewLoad->setMetadata(LLVMContext::MD_tbaa, Tag); 1716 1717 // Transfer DebugLoc. 1718 NewLoad->setDebugLoc(LI->getDebugLoc()); 1719 1720 // Add the newly created load. 1721 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1722 NewLoad)); 1723 MD->invalidateCachedPointerInfo(LoadPtr); 1724 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1725 } 1726 1727 // Perform PHI construction. 1728 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1729 LI->replaceAllUsesWith(V); 1730 if (isa<PHINode>(V)) 1731 V->takeName(LI); 1732 if (V->getType()->isPointerTy()) 1733 MD->invalidateCachedPointerInfo(V); 1734 markInstructionForDeletion(LI); 1735 ++NumPRELoad; 1736 return true; 1737} 1738 1739static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B) { 1740 if (!A || !B) 1741 return NULL; 1742 1743 if (A == B) 1744 return A; 1745 1746 SmallVector<MDNode *, 4> PathA; 1747 MDNode *T = A; 1748 while (T) { 1749 PathA.push_back(T); 1750 T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; 1751 } 1752 1753 SmallVector<MDNode *, 4> PathB; 1754 T = B; 1755 while (T) { 1756 PathB.push_back(T); 1757 T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; 1758 } 1759 1760 int IA = PathA.size() - 1; 1761 int IB = PathB.size() - 1; 1762 1763 MDNode *Ret = 0; 1764 while (IA >= 0 && IB >=0) { 1765 if (PathA[IA] == PathB[IB]) 1766 Ret = PathA[IA]; 1767 else 1768 break; 1769 --IA; 1770 --IB; 1771 } 1772 return Ret; 1773} 1774 1775static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B) { 1776 if (!A || !B) 1777 return NULL; 1778 1779 APFloat AVal = cast<ConstantFP>(A->getOperand(0))->getValueAPF(); 1780 APFloat BVal = cast<ConstantFP>(B->getOperand(0))->getValueAPF(); 1781 if (AVal.compare(BVal) == APFloat::cmpLessThan) 1782 return A; 1783 return B; 1784} 1785 1786static bool isContiguous(const ConstantRange &A, const ConstantRange &B) { 1787 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper(); 1788} 1789 1790static bool canBeMerged(const ConstantRange &A, const ConstantRange &B) { 1791 return !A.intersectWith(B).isEmptySet() || isContiguous(A, B); 1792} 1793 1794static bool tryMergeRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low, 1795 ConstantInt *High) { 1796 ConstantRange NewRange(Low->getValue(), High->getValue()); 1797 unsigned Size = EndPoints.size(); 1798 APInt LB = cast<ConstantInt>(EndPoints[Size - 2])->getValue(); 1799 APInt LE = cast<ConstantInt>(EndPoints[Size - 1])->getValue(); 1800 ConstantRange LastRange(LB, LE); 1801 if (canBeMerged(NewRange, LastRange)) { 1802 ConstantRange Union = LastRange.unionWith(NewRange); 1803 Type *Ty = High->getType(); 1804 EndPoints[Size - 2] = ConstantInt::get(Ty, Union.getLower()); 1805 EndPoints[Size - 1] = ConstantInt::get(Ty, Union.getUpper()); 1806 return true; 1807 } 1808 return false; 1809} 1810 1811static void addRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low, 1812 ConstantInt *High) { 1813 if (!EndPoints.empty()) 1814 if (tryMergeRange(EndPoints, Low, High)) 1815 return; 1816 1817 EndPoints.push_back(Low); 1818 EndPoints.push_back(High); 1819} 1820 1821static MDNode *getMostGenericRange(MDNode *A, MDNode *B) { 1822 // Given two ranges, we want to compute the union of the ranges. This 1823 // is slightly complitade by having to combine the intervals and merge 1824 // the ones that overlap. 1825 1826 if (!A || !B) 1827 return NULL; 1828 1829 if (A == B) 1830 return A; 1831 1832 // First, walk both lists in older of the lower boundary of each interval. 1833 // At each step, try to merge the new interval to the last one we adedd. 1834 SmallVector<Value*, 4> EndPoints; 1835 int AI = 0; 1836 int BI = 0; 1837 int AN = A->getNumOperands() / 2; 1838 int BN = B->getNumOperands() / 2; 1839 while (AI < AN && BI < BN) { 1840 ConstantInt *ALow = cast<ConstantInt>(A->getOperand(2 * AI)); 1841 ConstantInt *BLow = cast<ConstantInt>(B->getOperand(2 * BI)); 1842 1843 if (ALow->getValue().slt(BLow->getValue())) { 1844 addRange(EndPoints, ALow, cast<ConstantInt>(A->getOperand(2 * AI + 1))); 1845 ++AI; 1846 } else { 1847 addRange(EndPoints, BLow, cast<ConstantInt>(B->getOperand(2 * BI + 1))); 1848 ++BI; 1849 } 1850 } 1851 while (AI < AN) { 1852 addRange(EndPoints, cast<ConstantInt>(A->getOperand(2 * AI)), 1853 cast<ConstantInt>(A->getOperand(2 * AI + 1))); 1854 ++AI; 1855 } 1856 while (BI < BN) { 1857 addRange(EndPoints, cast<ConstantInt>(B->getOperand(2 * BI)), 1858 cast<ConstantInt>(B->getOperand(2 * BI + 1))); 1859 ++BI; 1860 } 1861 1862 // If we have more than 2 ranges (4 endpoints) we have to try to merge 1863 // the last and first ones. 1864 unsigned Size = EndPoints.size(); 1865 if (Size > 4) { 1866 ConstantInt *FB = cast<ConstantInt>(EndPoints[0]); 1867 ConstantInt *FE = cast<ConstantInt>(EndPoints[1]); 1868 if (tryMergeRange(EndPoints, FB, FE)) { 1869 for (unsigned i = 0; i < Size - 2; ++i) { 1870 EndPoints[i] = EndPoints[i + 2]; 1871 } 1872 EndPoints.resize(Size - 2); 1873 } 1874 } 1875 1876 // If in the end we have a single range, it is possible that it is now the 1877 // full range. Just drop the metadata in that case. 1878 if (EndPoints.size() == 2) { 1879 ConstantRange Range(cast<ConstantInt>(EndPoints[0])->getValue(), 1880 cast<ConstantInt>(EndPoints[1])->getValue()); 1881 if (Range.isFullSet()) 1882 return NULL; 1883 } 1884 1885 return MDNode::get(A->getContext(), EndPoints); 1886} 1887 1888static void patchReplacementInstruction(Value *Repl, Instruction *I) { 1889 // Patch the replacement so that it is not more restrictive than the value 1890 // being replaced. 1891 BinaryOperator *Op = dyn_cast<BinaryOperator>(I); 1892 BinaryOperator *ReplOp = dyn_cast<BinaryOperator>(Repl); 1893 if (Op && ReplOp && isa<OverflowingBinaryOperator>(Op) && 1894 isa<OverflowingBinaryOperator>(ReplOp)) { 1895 if (ReplOp->hasNoSignedWrap() && !Op->hasNoSignedWrap()) 1896 ReplOp->setHasNoSignedWrap(false); 1897 if (ReplOp->hasNoUnsignedWrap() && !Op->hasNoUnsignedWrap()) 1898 ReplOp->setHasNoUnsignedWrap(false); 1899 } 1900 if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) { 1901 SmallVector<std::pair<unsigned, MDNode*>, 4> Metadata; 1902 ReplInst->getAllMetadataOtherThanDebugLoc(Metadata); 1903 for (int i = 0, n = Metadata.size(); i < n; ++i) { 1904 unsigned Kind = Metadata[i].first; 1905 MDNode *IMD = I->getMetadata(Kind); 1906 MDNode *ReplMD = Metadata[i].second; 1907 switch(Kind) { 1908 default: 1909 ReplInst->setMetadata(Kind, NULL); // Remove unknown metadata 1910 break; 1911 case LLVMContext::MD_dbg: 1912 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg"); 1913 case LLVMContext::MD_tbaa: 1914 ReplInst->setMetadata(Kind, getMostGenericTBAA(IMD, ReplMD)); 1915 break; 1916 case LLVMContext::MD_range: 1917 ReplInst->setMetadata(Kind, getMostGenericRange(IMD, ReplMD)); 1918 break; 1919 case LLVMContext::MD_prof: 1920 llvm_unreachable("MD_prof in a non terminator instruction"); 1921 break; 1922 case LLVMContext::MD_fpmath: 1923 ReplInst->setMetadata(Kind, getMostGenericFPMath(IMD, ReplMD)); 1924 break; 1925 } 1926 } 1927 } 1928} 1929 1930static void patchAndReplaceAllUsesWith(Value *Repl, Instruction *I) { 1931 patchReplacementInstruction(Repl, I); 1932 I->replaceAllUsesWith(Repl); 1933} 1934 1935/// processLoad - Attempt to eliminate a load, first by eliminating it 1936/// locally, and then attempting non-local elimination if that fails. 1937bool GVN::processLoad(LoadInst *L) { 1938 if (!MD) 1939 return false; 1940 1941 if (!L->isSimple()) 1942 return false; 1943 1944 if (L->use_empty()) { 1945 markInstructionForDeletion(L); 1946 return true; 1947 } 1948 1949 // ... to a pointer that has been loaded from before... 1950 MemDepResult Dep = MD->getDependency(L); 1951 1952 // If we have a clobber and target data is around, see if this is a clobber 1953 // that we can fix up through code synthesis. 1954 if (Dep.isClobber() && TD) { 1955 // Check to see if we have something like this: 1956 // store i32 123, i32* %P 1957 // %A = bitcast i32* %P to i8* 1958 // %B = gep i8* %A, i32 1 1959 // %C = load i8* %B 1960 // 1961 // We could do that by recognizing if the clobber instructions are obviously 1962 // a common base + constant offset, and if the previous store (or memset) 1963 // completely covers this load. This sort of thing can happen in bitfield 1964 // access code. 1965 Value *AvailVal = 0; 1966 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) { 1967 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1968 L->getPointerOperand(), 1969 DepSI, *TD); 1970 if (Offset != -1) 1971 AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, 1972 L->getType(), L, *TD); 1973 } 1974 1975 // Check to see if we have something like this: 1976 // load i32* P 1977 // load i8* (P+1) 1978 // if we have this, replace the later with an extraction from the former. 1979 if (LoadInst *DepLI = dyn_cast<LoadInst>(Dep.getInst())) { 1980 // If this is a clobber and L is the first instruction in its block, then 1981 // we have the first instruction in the entry block. 1982 if (DepLI == L) 1983 return false; 1984 1985 int Offset = AnalyzeLoadFromClobberingLoad(L->getType(), 1986 L->getPointerOperand(), 1987 DepLI, *TD); 1988 if (Offset != -1) 1989 AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this); 1990 } 1991 1992 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1993 // a value on from it. 1994 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1995 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1996 L->getPointerOperand(), 1997 DepMI, *TD); 1998 if (Offset != -1) 1999 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD); 2000 } 2001 2002 if (AvailVal) { 2003 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 2004 << *AvailVal << '\n' << *L << "\n\n\n"); 2005 2006 // Replace the load! 2007 L->replaceAllUsesWith(AvailVal); 2008 if (AvailVal->getType()->isPointerTy()) 2009 MD->invalidateCachedPointerInfo(AvailVal); 2010 markInstructionForDeletion(L); 2011 ++NumGVNLoad; 2012 return true; 2013 } 2014 } 2015 2016 // If the value isn't available, don't do anything! 2017 if (Dep.isClobber()) { 2018 DEBUG( 2019 // fast print dep, using operator<< on instruction is too slow. 2020 dbgs() << "GVN: load "; 2021 WriteAsOperand(dbgs(), L); 2022 Instruction *I = Dep.getInst(); 2023 dbgs() << " is clobbered by " << *I << '\n'; 2024 ); 2025 return false; 2026 } 2027 2028 // If it is defined in another block, try harder. 2029 if (Dep.isNonLocal()) 2030 return processNonLocalLoad(L); 2031 2032 if (!Dep.isDef()) { 2033 DEBUG( 2034 // fast print dep, using operator<< on instruction is too slow. 2035 dbgs() << "GVN: load "; 2036 WriteAsOperand(dbgs(), L); 2037 dbgs() << " has unknown dependence\n"; 2038 ); 2039 return false; 2040 } 2041 2042 Instruction *DepInst = Dep.getInst(); 2043 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 2044 Value *StoredVal = DepSI->getValueOperand(); 2045 2046 // The store and load are to a must-aliased pointer, but they may not 2047 // actually have the same type. See if we know how to reuse the stored 2048 // value (depending on its type). 2049 if (StoredVal->getType() != L->getType()) { 2050 if (TD) { 2051 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 2052 L, *TD); 2053 if (StoredVal == 0) 2054 return false; 2055 2056 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 2057 << '\n' << *L << "\n\n\n"); 2058 } 2059 else 2060 return false; 2061 } 2062 2063 // Remove it! 2064 L->replaceAllUsesWith(StoredVal); 2065 if (StoredVal->getType()->isPointerTy()) 2066 MD->invalidateCachedPointerInfo(StoredVal); 2067 markInstructionForDeletion(L); 2068 ++NumGVNLoad; 2069 return true; 2070 } 2071 2072 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 2073 Value *AvailableVal = DepLI; 2074 2075 // The loads are of a must-aliased pointer, but they may not actually have 2076 // the same type. See if we know how to reuse the previously loaded value 2077 // (depending on its type). 2078 if (DepLI->getType() != L->getType()) { 2079 if (TD) { 2080 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), 2081 L, *TD); 2082 if (AvailableVal == 0) 2083 return false; 2084 2085 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 2086 << "\n" << *L << "\n\n\n"); 2087 } 2088 else 2089 return false; 2090 } 2091 2092 // Remove it! 2093 patchAndReplaceAllUsesWith(AvailableVal, L); 2094 if (DepLI->getType()->isPointerTy()) 2095 MD->invalidateCachedPointerInfo(DepLI); 2096 markInstructionForDeletion(L); 2097 ++NumGVNLoad; 2098 return true; 2099 } 2100 2101 // If this load really doesn't depend on anything, then we must be loading an 2102 // undef value. This can happen when loading for a fresh allocation with no 2103 // intervening stores, for example. 2104 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 2105 L->replaceAllUsesWith(UndefValue::get(L->getType())); 2106 markInstructionForDeletion(L); 2107 ++NumGVNLoad; 2108 return true; 2109 } 2110 2111 // If this load occurs either right after a lifetime begin, 2112 // then the loaded value is undefined. 2113 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(DepInst)) { 2114 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 2115 L->replaceAllUsesWith(UndefValue::get(L->getType())); 2116 markInstructionForDeletion(L); 2117 ++NumGVNLoad; 2118 return true; 2119 } 2120 } 2121 2122 return false; 2123} 2124 2125// findLeader - In order to find a leader for a given value number at a 2126// specific basic block, we first obtain the list of all Values for that number, 2127// and then scan the list to find one whose block dominates the block in 2128// question. This is fast because dominator tree queries consist of only 2129// a few comparisons of DFS numbers. 2130Value *GVN::findLeader(BasicBlock *BB, uint32_t num) { 2131 LeaderTableEntry Vals = LeaderTable[num]; 2132 if (!Vals.Val) return 0; 2133 2134 Value *Val = 0; 2135 if (DT->dominates(Vals.BB, BB)) { 2136 Val = Vals.Val; 2137 if (isa<Constant>(Val)) return Val; 2138 } 2139 2140 LeaderTableEntry* Next = Vals.Next; 2141 while (Next) { 2142 if (DT->dominates(Next->BB, BB)) { 2143 if (isa<Constant>(Next->Val)) return Next->Val; 2144 if (!Val) Val = Next->Val; 2145 } 2146 2147 Next = Next->Next; 2148 } 2149 2150 return Val; 2151} 2152 2153/// replaceAllDominatedUsesWith - Replace all uses of 'From' with 'To' if the 2154/// use is dominated by the given basic block. Returns the number of uses that 2155/// were replaced. 2156unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To, 2157 BasicBlock *Root) { 2158 unsigned Count = 0; 2159 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); 2160 UI != UE; ) { 2161 Use &U = (UI++).getUse(); 2162 2163 // If From occurs as a phi node operand then the use implicitly lives in the 2164 // corresponding incoming block. Otherwise it is the block containing the 2165 // user that must be dominated by Root. 2166 BasicBlock *UsingBlock; 2167 if (PHINode *PN = dyn_cast<PHINode>(U.getUser())) 2168 UsingBlock = PN->getIncomingBlock(U); 2169 else 2170 UsingBlock = cast<Instruction>(U.getUser())->getParent(); 2171 2172 if (DT->dominates(Root, UsingBlock)) { 2173 U.set(To); 2174 ++Count; 2175 } 2176 } 2177 return Count; 2178} 2179 2180/// propagateEquality - The given values are known to be equal in every block 2181/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with 2182/// 'RHS' everywhere in the scope. Returns whether a change was made. 2183bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) { 2184 SmallVector<std::pair<Value*, Value*>, 4> Worklist; 2185 Worklist.push_back(std::make_pair(LHS, RHS)); 2186 bool Changed = false; 2187 2188 while (!Worklist.empty()) { 2189 std::pair<Value*, Value*> Item = Worklist.pop_back_val(); 2190 LHS = Item.first; RHS = Item.second; 2191 2192 if (LHS == RHS) continue; 2193 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); 2194 2195 // Don't try to propagate equalities between constants. 2196 if (isa<Constant>(LHS) && isa<Constant>(RHS)) continue; 2197 2198 // Prefer a constant on the right-hand side, or an Argument if no constants. 2199 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) 2200 std::swap(LHS, RHS); 2201 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); 2202 2203 // If there is no obvious reason to prefer the left-hand side over the right- 2204 // hand side, ensure the longest lived term is on the right-hand side, so the 2205 // shortest lived term will be replaced by the longest lived. This tends to 2206 // expose more simplifications. 2207 uint32_t LVN = VN.lookup_or_add(LHS); 2208 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || 2209 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { 2210 // Move the 'oldest' value to the right-hand side, using the value number as 2211 // a proxy for age. 2212 uint32_t RVN = VN.lookup_or_add(RHS); 2213 if (LVN < RVN) { 2214 std::swap(LHS, RHS); 2215 LVN = RVN; 2216 } 2217 } 2218 assert((!isa<Instruction>(RHS) || 2219 DT->properlyDominates(cast<Instruction>(RHS)->getParent(), Root)) && 2220 "Instruction doesn't dominate scope!"); 2221 2222 // If value numbering later sees that an instruction in the scope is equal 2223 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve 2224 // the invariant that instructions only occur in the leader table for their 2225 // own value number (this is used by removeFromLeaderTable), do not do this 2226 // if RHS is an instruction (if an instruction in the scope is morphed into 2227 // LHS then it will be turned into RHS by the next GVN iteration anyway, so 2228 // using the leader table is about compiling faster, not optimizing better). 2229 if (!isa<Instruction>(RHS)) 2230 addToLeaderTable(LVN, RHS, Root); 2231 2232 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As 2233 // LHS always has at least one use that is not dominated by Root, this will 2234 // never do anything if LHS has only one use. 2235 if (!LHS->hasOneUse()) { 2236 unsigned NumReplacements = replaceAllDominatedUsesWith(LHS, RHS, Root); 2237 Changed |= NumReplacements > 0; 2238 NumGVNEqProp += NumReplacements; 2239 } 2240 2241 // Now try to deduce additional equalities from this one. For example, if the 2242 // known equality was "(A != B)" == "false" then it follows that A and B are 2243 // equal in the scope. Only boolean equalities with an explicit true or false 2244 // RHS are currently supported. 2245 if (!RHS->getType()->isIntegerTy(1)) 2246 // Not a boolean equality - bail out. 2247 continue; 2248 ConstantInt *CI = dyn_cast<ConstantInt>(RHS); 2249 if (!CI) 2250 // RHS neither 'true' nor 'false' - bail out. 2251 continue; 2252 // Whether RHS equals 'true'. Otherwise it equals 'false'. 2253 bool isKnownTrue = CI->isAllOnesValue(); 2254 bool isKnownFalse = !isKnownTrue; 2255 2256 // If "A && B" is known true then both A and B are known true. If "A || B" 2257 // is known false then both A and B are known false. 2258 Value *A, *B; 2259 if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) || 2260 (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) { 2261 Worklist.push_back(std::make_pair(A, RHS)); 2262 Worklist.push_back(std::make_pair(B, RHS)); 2263 continue; 2264 } 2265 2266 // If we are propagating an equality like "(A == B)" == "true" then also 2267 // propagate the equality A == B. When propagating a comparison such as 2268 // "(A >= B)" == "true", replace all instances of "A < B" with "false". 2269 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(LHS)) { 2270 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 2271 2272 // If "A == B" is known true, or "A != B" is known false, then replace 2273 // A with B everywhere in the scope. 2274 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) || 2275 (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) 2276 Worklist.push_back(std::make_pair(Op0, Op1)); 2277 2278 // If "A >= B" is known true, replace "A < B" with false everywhere. 2279 CmpInst::Predicate NotPred = Cmp->getInversePredicate(); 2280 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); 2281 // Since we don't have the instruction "A < B" immediately to hand, work out 2282 // the value number that it would have and use that to find an appropriate 2283 // instruction (if any). 2284 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2285 uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1); 2286 // If the number we were assigned was brand new then there is no point in 2287 // looking for an instruction realizing it: there cannot be one! 2288 if (Num < NextNum) { 2289 Value *NotCmp = findLeader(Root, Num); 2290 if (NotCmp && isa<Instruction>(NotCmp)) { 2291 unsigned NumReplacements = 2292 replaceAllDominatedUsesWith(NotCmp, NotVal, Root); 2293 Changed |= NumReplacements > 0; 2294 NumGVNEqProp += NumReplacements; 2295 } 2296 } 2297 // Ensure that any instruction in scope that gets the "A < B" value number 2298 // is replaced with false. 2299 addToLeaderTable(Num, NotVal, Root); 2300 2301 continue; 2302 } 2303 } 2304 2305 return Changed; 2306} 2307 2308/// isOnlyReachableViaThisEdge - There is an edge from 'Src' to 'Dst'. Return 2309/// true if every path from the entry block to 'Dst' passes via this edge. In 2310/// particular 'Dst' must not be reachable via another edge from 'Src'. 2311static bool isOnlyReachableViaThisEdge(BasicBlock *Src, BasicBlock *Dst, 2312 DominatorTree *DT) { 2313 // While in theory it is interesting to consider the case in which Dst has 2314 // more than one predecessor, because Dst might be part of a loop which is 2315 // only reachable from Src, in practice it is pointless since at the time 2316 // GVN runs all such loops have preheaders, which means that Dst will have 2317 // been changed to have only one predecessor, namely Src. 2318 BasicBlock *Pred = Dst->getSinglePredecessor(); 2319 assert((!Pred || Pred == Src) && "No edge between these basic blocks!"); 2320 (void)Src; 2321 return Pred != 0; 2322} 2323 2324/// processInstruction - When calculating availability, handle an instruction 2325/// by inserting it into the appropriate sets 2326bool GVN::processInstruction(Instruction *I) { 2327 // Ignore dbg info intrinsics. 2328 if (isa<DbgInfoIntrinsic>(I)) 2329 return false; 2330 2331 // If the instruction can be easily simplified then do so now in preference 2332 // to value numbering it. Value numbering often exposes redundancies, for 2333 // example if it determines that %y is equal to %x then the instruction 2334 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 2335 if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) { 2336 I->replaceAllUsesWith(V); 2337 if (MD && V->getType()->isPointerTy()) 2338 MD->invalidateCachedPointerInfo(V); 2339 markInstructionForDeletion(I); 2340 ++NumGVNSimpl; 2341 return true; 2342 } 2343 2344 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 2345 if (processLoad(LI)) 2346 return true; 2347 2348 unsigned Num = VN.lookup_or_add(LI); 2349 addToLeaderTable(Num, LI, LI->getParent()); 2350 return false; 2351 } 2352 2353 // For conditional branches, we can perform simple conditional propagation on 2354 // the condition value itself. 2355 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 2356 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 2357 return false; 2358 2359 Value *BranchCond = BI->getCondition(); 2360 2361 BasicBlock *TrueSucc = BI->getSuccessor(0); 2362 BasicBlock *FalseSucc = BI->getSuccessor(1); 2363 BasicBlock *Parent = BI->getParent(); 2364 bool Changed = false; 2365 2366 if (isOnlyReachableViaThisEdge(Parent, TrueSucc, DT)) 2367 Changed |= propagateEquality(BranchCond, 2368 ConstantInt::getTrue(TrueSucc->getContext()), 2369 TrueSucc); 2370 2371 if (isOnlyReachableViaThisEdge(Parent, FalseSucc, DT)) 2372 Changed |= propagateEquality(BranchCond, 2373 ConstantInt::getFalse(FalseSucc->getContext()), 2374 FalseSucc); 2375 2376 return Changed; 2377 } 2378 2379 // For switches, propagate the case values into the case destinations. 2380 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 2381 Value *SwitchCond = SI->getCondition(); 2382 BasicBlock *Parent = SI->getParent(); 2383 bool Changed = false; 2384 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 2385 i != e; ++i) { 2386 BasicBlock *Dst = i.getCaseSuccessor(); 2387 if (isOnlyReachableViaThisEdge(Parent, Dst, DT)) 2388 Changed |= propagateEquality(SwitchCond, i.getCaseValue(), Dst); 2389 } 2390 return Changed; 2391 } 2392 2393 // Instructions with void type don't return a value, so there's 2394 // no point in trying to find redundancies in them. 2395 if (I->getType()->isVoidTy()) return false; 2396 2397 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2398 unsigned Num = VN.lookup_or_add(I); 2399 2400 // Allocations are always uniquely numbered, so we can save time and memory 2401 // by fast failing them. 2402 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) { 2403 addToLeaderTable(Num, I, I->getParent()); 2404 return false; 2405 } 2406 2407 // If the number we were assigned was a brand new VN, then we don't 2408 // need to do a lookup to see if the number already exists 2409 // somewhere in the domtree: it can't! 2410 if (Num >= NextNum) { 2411 addToLeaderTable(Num, I, I->getParent()); 2412 return false; 2413 } 2414 2415 // Perform fast-path value-number based elimination of values inherited from 2416 // dominators. 2417 Value *repl = findLeader(I->getParent(), Num); 2418 if (repl == 0) { 2419 // Failure, just remember this instance for future use. 2420 addToLeaderTable(Num, I, I->getParent()); 2421 return false; 2422 } 2423 2424 // Remove it! 2425 patchAndReplaceAllUsesWith(repl, I); 2426 if (MD && repl->getType()->isPointerTy()) 2427 MD->invalidateCachedPointerInfo(repl); 2428 markInstructionForDeletion(I); 2429 return true; 2430} 2431 2432/// runOnFunction - This is the main transformation entry point for a function. 2433bool GVN::runOnFunction(Function& F) { 2434 if (!NoLoads) 2435 MD = &getAnalysis<MemoryDependenceAnalysis>(); 2436 DT = &getAnalysis<DominatorTree>(); 2437 TD = getAnalysisIfAvailable<TargetData>(); 2438 TLI = &getAnalysis<TargetLibraryInfo>(); 2439 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 2440 VN.setMemDep(MD); 2441 VN.setDomTree(DT); 2442 2443 bool Changed = false; 2444 bool ShouldContinue = true; 2445 2446 // Merge unconditional branches, allowing PRE to catch more 2447 // optimization opportunities. 2448 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2449 BasicBlock *BB = FI++; 2450 2451 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2452 if (removedBlock) ++NumGVNBlocks; 2453 2454 Changed |= removedBlock; 2455 } 2456 2457 unsigned Iteration = 0; 2458 while (ShouldContinue) { 2459 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2460 ShouldContinue = iterateOnFunction(F); 2461 if (splitCriticalEdges()) 2462 ShouldContinue = true; 2463 Changed |= ShouldContinue; 2464 ++Iteration; 2465 } 2466 2467 if (EnablePRE) { 2468 bool PREChanged = true; 2469 while (PREChanged) { 2470 PREChanged = performPRE(F); 2471 Changed |= PREChanged; 2472 } 2473 } 2474 // FIXME: Should perform GVN again after PRE does something. PRE can move 2475 // computations into blocks where they become fully redundant. Note that 2476 // we can't do this until PRE's critical edge splitting updates memdep. 2477 // Actually, when this happens, we should just fully integrate PRE into GVN. 2478 2479 cleanupGlobalSets(); 2480 2481 return Changed; 2482} 2483 2484 2485bool GVN::processBlock(BasicBlock *BB) { 2486 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 2487 // (and incrementing BI before processing an instruction). 2488 assert(InstrsToErase.empty() && 2489 "We expect InstrsToErase to be empty across iterations"); 2490 bool ChangedFunction = false; 2491 2492 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2493 BI != BE;) { 2494 ChangedFunction |= processInstruction(BI); 2495 if (InstrsToErase.empty()) { 2496 ++BI; 2497 continue; 2498 } 2499 2500 // If we need some instructions deleted, do it now. 2501 NumGVNInstr += InstrsToErase.size(); 2502 2503 // Avoid iterator invalidation. 2504 bool AtStart = BI == BB->begin(); 2505 if (!AtStart) 2506 --BI; 2507 2508 for (SmallVector<Instruction*, 4>::iterator I = InstrsToErase.begin(), 2509 E = InstrsToErase.end(); I != E; ++I) { 2510 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2511 if (MD) MD->removeInstruction(*I); 2512 (*I)->eraseFromParent(); 2513 DEBUG(verifyRemoved(*I)); 2514 } 2515 InstrsToErase.clear(); 2516 2517 if (AtStart) 2518 BI = BB->begin(); 2519 else 2520 ++BI; 2521 } 2522 2523 return ChangedFunction; 2524} 2525 2526/// performPRE - Perform a purely local form of PRE that looks for diamond 2527/// control flow patterns and attempts to perform simple PRE at the join point. 2528bool GVN::performPRE(Function &F) { 2529 bool Changed = false; 2530 DenseMap<BasicBlock*, Value*> predMap; 2531 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2532 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2533 BasicBlock *CurrentBlock = *DI; 2534 2535 // Nothing to PRE in the entry block. 2536 if (CurrentBlock == &F.getEntryBlock()) continue; 2537 2538 // Don't perform PRE on a landing pad. 2539 if (CurrentBlock->isLandingPad()) continue; 2540 2541 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2542 BE = CurrentBlock->end(); BI != BE; ) { 2543 Instruction *CurInst = BI++; 2544 2545 if (isa<AllocaInst>(CurInst) || 2546 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2547 CurInst->getType()->isVoidTy() || 2548 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2549 isa<DbgInfoIntrinsic>(CurInst)) 2550 continue; 2551 2552 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from 2553 // sinking the compare again, and it would force the code generator to 2554 // move the i1 from processor flags or predicate registers into a general 2555 // purpose register. 2556 if (isa<CmpInst>(CurInst)) 2557 continue; 2558 2559 // We don't currently value number ANY inline asm calls. 2560 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2561 if (CallI->isInlineAsm()) 2562 continue; 2563 2564 uint32_t ValNo = VN.lookup(CurInst); 2565 2566 // Look for the predecessors for PRE opportunities. We're 2567 // only trying to solve the basic diamond case, where 2568 // a value is computed in the successor and one predecessor, 2569 // but not the other. We also explicitly disallow cases 2570 // where the successor is its own predecessor, because they're 2571 // more complicated to get right. 2572 unsigned NumWith = 0; 2573 unsigned NumWithout = 0; 2574 BasicBlock *PREPred = 0; 2575 predMap.clear(); 2576 2577 for (pred_iterator PI = pred_begin(CurrentBlock), 2578 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2579 BasicBlock *P = *PI; 2580 // We're not interested in PRE where the block is its 2581 // own predecessor, or in blocks with predecessors 2582 // that are not reachable. 2583 if (P == CurrentBlock) { 2584 NumWithout = 2; 2585 break; 2586 } else if (!DT->dominates(&F.getEntryBlock(), P)) { 2587 NumWithout = 2; 2588 break; 2589 } 2590 2591 Value* predV = findLeader(P, ValNo); 2592 if (predV == 0) { 2593 PREPred = P; 2594 ++NumWithout; 2595 } else if (predV == CurInst) { 2596 NumWithout = 2; 2597 } else { 2598 predMap[P] = predV; 2599 ++NumWith; 2600 } 2601 } 2602 2603 // Don't do PRE when it might increase code size, i.e. when 2604 // we would need to insert instructions in more than one pred. 2605 if (NumWithout != 1 || NumWith == 0) 2606 continue; 2607 2608 // Don't do PRE across indirect branch. 2609 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2610 continue; 2611 2612 // We can't do PRE safely on a critical edge, so instead we schedule 2613 // the edge to be split and perform the PRE the next time we iterate 2614 // on the function. 2615 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2616 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2617 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2618 continue; 2619 } 2620 2621 // Instantiate the expression in the predecessor that lacked it. 2622 // Because we are going top-down through the block, all value numbers 2623 // will be available in the predecessor by the time we need them. Any 2624 // that weren't originally present will have been instantiated earlier 2625 // in this loop. 2626 Instruction *PREInstr = CurInst->clone(); 2627 bool success = true; 2628 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2629 Value *Op = PREInstr->getOperand(i); 2630 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2631 continue; 2632 2633 if (Value *V = findLeader(PREPred, VN.lookup(Op))) { 2634 PREInstr->setOperand(i, V); 2635 } else { 2636 success = false; 2637 break; 2638 } 2639 } 2640 2641 // Fail out if we encounter an operand that is not available in 2642 // the PRE predecessor. This is typically because of loads which 2643 // are not value numbered precisely. 2644 if (!success) { 2645 delete PREInstr; 2646 DEBUG(verifyRemoved(PREInstr)); 2647 continue; 2648 } 2649 2650 PREInstr->insertBefore(PREPred->getTerminator()); 2651 PREInstr->setName(CurInst->getName() + ".pre"); 2652 PREInstr->setDebugLoc(CurInst->getDebugLoc()); 2653 predMap[PREPred] = PREInstr; 2654 VN.add(PREInstr, ValNo); 2655 ++NumGVNPRE; 2656 2657 // Update the availability map to include the new instruction. 2658 addToLeaderTable(ValNo, PREInstr, PREPred); 2659 2660 // Create a PHI to make the value available in this block. 2661 pred_iterator PB = pred_begin(CurrentBlock), PE = pred_end(CurrentBlock); 2662 PHINode* Phi = PHINode::Create(CurInst->getType(), std::distance(PB, PE), 2663 CurInst->getName() + ".pre-phi", 2664 CurrentBlock->begin()); 2665 for (pred_iterator PI = PB; PI != PE; ++PI) { 2666 BasicBlock *P = *PI; 2667 Phi->addIncoming(predMap[P], P); 2668 } 2669 2670 VN.add(Phi, ValNo); 2671 addToLeaderTable(ValNo, Phi, CurrentBlock); 2672 Phi->setDebugLoc(CurInst->getDebugLoc()); 2673 CurInst->replaceAllUsesWith(Phi); 2674 if (Phi->getType()->isPointerTy()) { 2675 // Because we have added a PHI-use of the pointer value, it has now 2676 // "escaped" from alias analysis' perspective. We need to inform 2677 // AA of this. 2678 for (unsigned ii = 0, ee = Phi->getNumIncomingValues(); ii != ee; 2679 ++ii) { 2680 unsigned jj = PHINode::getOperandNumForIncomingValue(ii); 2681 VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(jj)); 2682 } 2683 2684 if (MD) 2685 MD->invalidateCachedPointerInfo(Phi); 2686 } 2687 VN.erase(CurInst); 2688 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2689 2690 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2691 if (MD) MD->removeInstruction(CurInst); 2692 CurInst->eraseFromParent(); 2693 DEBUG(verifyRemoved(CurInst)); 2694 Changed = true; 2695 } 2696 } 2697 2698 if (splitCriticalEdges()) 2699 Changed = true; 2700 2701 return Changed; 2702} 2703 2704/// splitCriticalEdges - Split critical edges found during the previous 2705/// iteration that may enable further optimization. 2706bool GVN::splitCriticalEdges() { 2707 if (toSplit.empty()) 2708 return false; 2709 do { 2710 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2711 SplitCriticalEdge(Edge.first, Edge.second, this); 2712 } while (!toSplit.empty()); 2713 if (MD) MD->invalidateCachedPredecessors(); 2714 return true; 2715} 2716 2717/// iterateOnFunction - Executes one iteration of GVN 2718bool GVN::iterateOnFunction(Function &F) { 2719 cleanupGlobalSets(); 2720 2721 // Top-down walk of the dominator tree 2722 bool Changed = false; 2723#if 0 2724 // Needed for value numbering with phi construction to work. 2725 ReversePostOrderTraversal<Function*> RPOT(&F); 2726 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2727 RE = RPOT.end(); RI != RE; ++RI) 2728 Changed |= processBlock(*RI); 2729#else 2730 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2731 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2732 Changed |= processBlock(DI->getBlock()); 2733#endif 2734 2735 return Changed; 2736} 2737 2738void GVN::cleanupGlobalSets() { 2739 VN.clear(); 2740 LeaderTable.clear(); 2741 TableAllocator.Reset(); 2742} 2743 2744/// verifyRemoved - Verify that the specified instruction does not occur in our 2745/// internal data structures. 2746void GVN::verifyRemoved(const Instruction *Inst) const { 2747 VN.verifyRemoved(Inst); 2748 2749 // Walk through the value number scope to make sure the instruction isn't 2750 // ferreted away in it. 2751 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator 2752 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { 2753 const LeaderTableEntry *Node = &I->second; 2754 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2755 2756 while (Node->Next) { 2757 Node = Node->Next; 2758 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2759 } 2760 } 2761} 2762