GVN.cpp revision fc6e29d4ab52b7d3efd83846ed495a9ca7e51e49
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Operator.h" 28#include "llvm/Value.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/DepthFirstIterator.h" 31#include "llvm/ADT/PostOrderIterator.h" 32#include "llvm/ADT/SmallPtrSet.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/ConstantFolding.h" 37#include "llvm/Analysis/Dominators.h" 38#include "llvm/Analysis/Loads.h" 39#include "llvm/Analysis/MemoryBuiltins.h" 40#include "llvm/Analysis/MemoryDependenceAnalysis.h" 41#include "llvm/Analysis/PHITransAddr.h" 42#include "llvm/Support/CFG.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Support/GetElementPtrTypeIterator.h" 47#include "llvm/Support/IRBuilder.h" 48#include "llvm/Support/raw_ostream.h" 49#include "llvm/Target/TargetData.h" 50#include "llvm/Transforms/Utils/BasicBlockUtils.h" 51#include "llvm/Transforms/Utils/Local.h" 52#include "llvm/Transforms/Utils/SSAUpdater.h" 53using namespace llvm; 54 55STATISTIC(NumGVNInstr, "Number of instructions deleted"); 56STATISTIC(NumGVNLoad, "Number of loads deleted"); 57STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 58STATISTIC(NumGVNBlocks, "Number of blocks merged"); 59STATISTIC(NumPRELoad, "Number of loads PRE'd"); 60 61static cl::opt<bool> EnablePRE("enable-pre", 62 cl::init(true), cl::Hidden); 63static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 64static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false)); 65 66//===----------------------------------------------------------------------===// 67// ValueTable Class 68//===----------------------------------------------------------------------===// 69 70/// This class holds the mapping between values and value numbers. It is used 71/// as an efficient mechanism to determine the expression-wise equivalence of 72/// two values. 73namespace { 74 struct Expression { 75 enum ExpressionOpcode { 76 ADD = Instruction::Add, 77 FADD = Instruction::FAdd, 78 SUB = Instruction::Sub, 79 FSUB = Instruction::FSub, 80 MUL = Instruction::Mul, 81 FMUL = Instruction::FMul, 82 UDIV = Instruction::UDiv, 83 SDIV = Instruction::SDiv, 84 FDIV = Instruction::FDiv, 85 UREM = Instruction::URem, 86 SREM = Instruction::SRem, 87 FREM = Instruction::FRem, 88 SHL = Instruction::Shl, 89 LSHR = Instruction::LShr, 90 ASHR = Instruction::AShr, 91 AND = Instruction::And, 92 OR = Instruction::Or, 93 XOR = Instruction::Xor, 94 TRUNC = Instruction::Trunc, 95 ZEXT = Instruction::ZExt, 96 SEXT = Instruction::SExt, 97 FPTOUI = Instruction::FPToUI, 98 FPTOSI = Instruction::FPToSI, 99 UITOFP = Instruction::UIToFP, 100 SITOFP = Instruction::SIToFP, 101 FPTRUNC = Instruction::FPTrunc, 102 FPEXT = Instruction::FPExt, 103 PTRTOINT = Instruction::PtrToInt, 104 INTTOPTR = Instruction::IntToPtr, 105 BITCAST = Instruction::BitCast, 106 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 107 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 108 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 109 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 110 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 111 SHUFFLE, SELECT, GEP, CALL, CONSTANT, 112 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 113 114 ExpressionOpcode opcode; 115 const Type* type; 116 SmallVector<uint32_t, 4> varargs; 117 Value *function; 118 119 Expression() { } 120 Expression(ExpressionOpcode o) : opcode(o) { } 121 122 bool operator==(const Expression &other) const { 123 if (opcode != other.opcode) 124 return false; 125 else if (opcode == EMPTY || opcode == TOMBSTONE) 126 return true; 127 else if (type != other.type) 128 return false; 129 else if (function != other.function) 130 return false; 131 else { 132 if (varargs.size() != other.varargs.size()) 133 return false; 134 135 for (size_t i = 0; i < varargs.size(); ++i) 136 if (varargs[i] != other.varargs[i]) 137 return false; 138 139 return true; 140 } 141 } 142 143 bool operator!=(const Expression &other) const { 144 return !(*this == other); 145 } 146 }; 147 148 class ValueTable { 149 private: 150 DenseMap<Value*, uint32_t> valueNumbering; 151 DenseMap<Expression, uint32_t> expressionNumbering; 152 AliasAnalysis* AA; 153 MemoryDependenceAnalysis* MD; 154 DominatorTree* DT; 155 156 uint32_t nextValueNumber; 157 158 Expression::ExpressionOpcode getOpcode(CmpInst* C); 159 Expression create_expression(BinaryOperator* BO); 160 Expression create_expression(CmpInst* C); 161 Expression create_expression(ShuffleVectorInst* V); 162 Expression create_expression(ExtractElementInst* C); 163 Expression create_expression(InsertElementInst* V); 164 Expression create_expression(SelectInst* V); 165 Expression create_expression(CastInst* C); 166 Expression create_expression(GetElementPtrInst* G); 167 Expression create_expression(CallInst* C); 168 Expression create_expression(ExtractValueInst* C); 169 Expression create_expression(InsertValueInst* C); 170 171 uint32_t lookup_or_add_call(CallInst* C); 172 public: 173 ValueTable() : nextValueNumber(1) { } 174 uint32_t lookup_or_add(Value *V); 175 uint32_t lookup(Value *V) const; 176 void add(Value *V, uint32_t num); 177 void clear(); 178 void erase(Value *v); 179 unsigned size(); 180 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 181 AliasAnalysis *getAliasAnalysis() const { return AA; } 182 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 183 void setDomTree(DominatorTree* D) { DT = D; } 184 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 185 void verifyRemoved(const Value *) const; 186 }; 187} 188 189namespace llvm { 190template <> struct DenseMapInfo<Expression> { 191 static inline Expression getEmptyKey() { 192 return Expression(Expression::EMPTY); 193 } 194 195 static inline Expression getTombstoneKey() { 196 return Expression(Expression::TOMBSTONE); 197 } 198 199 static unsigned getHashValue(const Expression e) { 200 unsigned hash = e.opcode; 201 202 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 203 (unsigned)((uintptr_t)e.type >> 9)); 204 205 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 206 E = e.varargs.end(); I != E; ++I) 207 hash = *I + hash * 37; 208 209 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 210 (unsigned)((uintptr_t)e.function >> 9)) + 211 hash * 37; 212 213 return hash; 214 } 215 static bool isEqual(const Expression &LHS, const Expression &RHS) { 216 return LHS == RHS; 217 } 218}; 219 220template <> 221struct isPodLike<Expression> { static const bool value = true; }; 222 223} 224 225//===----------------------------------------------------------------------===// 226// ValueTable Internal Functions 227//===----------------------------------------------------------------------===// 228 229Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 230 if (isa<ICmpInst>(C)) { 231 switch (C->getPredicate()) { 232 default: // THIS SHOULD NEVER HAPPEN 233 llvm_unreachable("Comparison with unknown predicate?"); 234 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 235 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 236 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 237 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 238 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 239 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 240 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 241 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 242 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 243 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 244 } 245 } else { 246 switch (C->getPredicate()) { 247 default: // THIS SHOULD NEVER HAPPEN 248 llvm_unreachable("Comparison with unknown predicate?"); 249 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 250 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 251 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 252 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 253 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 254 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 255 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 256 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 257 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 258 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 259 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 260 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 261 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 262 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 263 } 264 } 265} 266 267Expression ValueTable::create_expression(CallInst* C) { 268 Expression e; 269 270 e.type = C->getType(); 271 e.function = C->getCalledFunction(); 272 e.opcode = Expression::CALL; 273 274 CallSite CS(C); 275 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end(); 276 I != E; ++I) 277 e.varargs.push_back(lookup_or_add(*I)); 278 279 return e; 280} 281 282Expression ValueTable::create_expression(BinaryOperator* BO) { 283 Expression e; 284 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 285 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 286 e.function = 0; 287 e.type = BO->getType(); 288 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode()); 289 290 return e; 291} 292 293Expression ValueTable::create_expression(CmpInst* C) { 294 Expression e; 295 296 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 297 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 298 e.function = 0; 299 e.type = C->getType(); 300 e.opcode = getOpcode(C); 301 302 return e; 303} 304 305Expression ValueTable::create_expression(CastInst* C) { 306 Expression e; 307 308 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 309 e.function = 0; 310 e.type = C->getType(); 311 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode()); 312 313 return e; 314} 315 316Expression ValueTable::create_expression(ShuffleVectorInst* S) { 317 Expression e; 318 319 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 320 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 321 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 322 e.function = 0; 323 e.type = S->getType(); 324 e.opcode = Expression::SHUFFLE; 325 326 return e; 327} 328 329Expression ValueTable::create_expression(ExtractElementInst* E) { 330 Expression e; 331 332 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 333 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 334 e.function = 0; 335 e.type = E->getType(); 336 e.opcode = Expression::EXTRACT; 337 338 return e; 339} 340 341Expression ValueTable::create_expression(InsertElementInst* I) { 342 Expression e; 343 344 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 345 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 346 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 347 e.function = 0; 348 e.type = I->getType(); 349 e.opcode = Expression::INSERT; 350 351 return e; 352} 353 354Expression ValueTable::create_expression(SelectInst* I) { 355 Expression e; 356 357 e.varargs.push_back(lookup_or_add(I->getCondition())); 358 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 359 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 360 e.function = 0; 361 e.type = I->getType(); 362 e.opcode = Expression::SELECT; 363 364 return e; 365} 366 367Expression ValueTable::create_expression(GetElementPtrInst* G) { 368 Expression e; 369 370 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 371 e.function = 0; 372 e.type = G->getType(); 373 e.opcode = Expression::GEP; 374 375 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 376 I != E; ++I) 377 e.varargs.push_back(lookup_or_add(*I)); 378 379 return e; 380} 381 382Expression ValueTable::create_expression(ExtractValueInst* E) { 383 Expression e; 384 385 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 386 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 387 II != IE; ++II) 388 e.varargs.push_back(*II); 389 e.function = 0; 390 e.type = E->getType(); 391 e.opcode = Expression::EXTRACTVALUE; 392 393 return e; 394} 395 396Expression ValueTable::create_expression(InsertValueInst* E) { 397 Expression e; 398 399 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 400 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 401 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 402 II != IE; ++II) 403 e.varargs.push_back(*II); 404 e.function = 0; 405 e.type = E->getType(); 406 e.opcode = Expression::INSERTVALUE; 407 408 return e; 409} 410 411//===----------------------------------------------------------------------===// 412// ValueTable External Functions 413//===----------------------------------------------------------------------===// 414 415/// add - Insert a value into the table with a specified value number. 416void ValueTable::add(Value *V, uint32_t num) { 417 valueNumbering.insert(std::make_pair(V, num)); 418} 419 420uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 421 if (AA->doesNotAccessMemory(C)) { 422 Expression exp = create_expression(C); 423 uint32_t& e = expressionNumbering[exp]; 424 if (!e) e = nextValueNumber++; 425 valueNumbering[C] = e; 426 return e; 427 } else if (AA->onlyReadsMemory(C)) { 428 Expression exp = create_expression(C); 429 uint32_t& e = expressionNumbering[exp]; 430 if (!e) { 431 e = nextValueNumber++; 432 valueNumbering[C] = e; 433 return e; 434 } 435 if (!MD) { 436 e = nextValueNumber++; 437 valueNumbering[C] = e; 438 return e; 439 } 440 441 MemDepResult local_dep = MD->getDependency(C); 442 443 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 444 valueNumbering[C] = nextValueNumber; 445 return nextValueNumber++; 446 } 447 448 if (local_dep.isDef()) { 449 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 450 451 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 452 valueNumbering[C] = nextValueNumber; 453 return nextValueNumber++; 454 } 455 456 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 457 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 458 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 459 if (c_vn != cd_vn) { 460 valueNumbering[C] = nextValueNumber; 461 return nextValueNumber++; 462 } 463 } 464 465 uint32_t v = lookup_or_add(local_cdep); 466 valueNumbering[C] = v; 467 return v; 468 } 469 470 // Non-local case. 471 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 472 MD->getNonLocalCallDependency(CallSite(C)); 473 // FIXME: call/call dependencies for readonly calls should return def, not 474 // clobber! Move the checking logic to MemDep! 475 CallInst* cdep = 0; 476 477 // Check to see if we have a single dominating call instruction that is 478 // identical to C. 479 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 480 const NonLocalDepEntry *I = &deps[i]; 481 // Ignore non-local dependencies. 482 if (I->getResult().isNonLocal()) 483 continue; 484 485 // We don't handle non-depedencies. If we already have a call, reject 486 // instruction dependencies. 487 if (I->getResult().isClobber() || cdep != 0) { 488 cdep = 0; 489 break; 490 } 491 492 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 493 // FIXME: All duplicated with non-local case. 494 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 495 cdep = NonLocalDepCall; 496 continue; 497 } 498 499 cdep = 0; 500 break; 501 } 502 503 if (!cdep) { 504 valueNumbering[C] = nextValueNumber; 505 return nextValueNumber++; 506 } 507 508 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 509 valueNumbering[C] = nextValueNumber; 510 return nextValueNumber++; 511 } 512 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 513 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 514 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 515 if (c_vn != cd_vn) { 516 valueNumbering[C] = nextValueNumber; 517 return nextValueNumber++; 518 } 519 } 520 521 uint32_t v = lookup_or_add(cdep); 522 valueNumbering[C] = v; 523 return v; 524 525 } else { 526 valueNumbering[C] = nextValueNumber; 527 return nextValueNumber++; 528 } 529} 530 531/// lookup_or_add - Returns the value number for the specified value, assigning 532/// it a new number if it did not have one before. 533uint32_t ValueTable::lookup_or_add(Value *V) { 534 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 535 if (VI != valueNumbering.end()) 536 return VI->second; 537 538 if (!isa<Instruction>(V)) { 539 valueNumbering[V] = nextValueNumber; 540 return nextValueNumber++; 541 } 542 543 Instruction* I = cast<Instruction>(V); 544 Expression exp; 545 switch (I->getOpcode()) { 546 case Instruction::Call: 547 return lookup_or_add_call(cast<CallInst>(I)); 548 case Instruction::Add: 549 case Instruction::FAdd: 550 case Instruction::Sub: 551 case Instruction::FSub: 552 case Instruction::Mul: 553 case Instruction::FMul: 554 case Instruction::UDiv: 555 case Instruction::SDiv: 556 case Instruction::FDiv: 557 case Instruction::URem: 558 case Instruction::SRem: 559 case Instruction::FRem: 560 case Instruction::Shl: 561 case Instruction::LShr: 562 case Instruction::AShr: 563 case Instruction::And: 564 case Instruction::Or : 565 case Instruction::Xor: 566 exp = create_expression(cast<BinaryOperator>(I)); 567 break; 568 case Instruction::ICmp: 569 case Instruction::FCmp: 570 exp = create_expression(cast<CmpInst>(I)); 571 break; 572 case Instruction::Trunc: 573 case Instruction::ZExt: 574 case Instruction::SExt: 575 case Instruction::FPToUI: 576 case Instruction::FPToSI: 577 case Instruction::UIToFP: 578 case Instruction::SIToFP: 579 case Instruction::FPTrunc: 580 case Instruction::FPExt: 581 case Instruction::PtrToInt: 582 case Instruction::IntToPtr: 583 case Instruction::BitCast: 584 exp = create_expression(cast<CastInst>(I)); 585 break; 586 case Instruction::Select: 587 exp = create_expression(cast<SelectInst>(I)); 588 break; 589 case Instruction::ExtractElement: 590 exp = create_expression(cast<ExtractElementInst>(I)); 591 break; 592 case Instruction::InsertElement: 593 exp = create_expression(cast<InsertElementInst>(I)); 594 break; 595 case Instruction::ShuffleVector: 596 exp = create_expression(cast<ShuffleVectorInst>(I)); 597 break; 598 case Instruction::ExtractValue: 599 exp = create_expression(cast<ExtractValueInst>(I)); 600 break; 601 case Instruction::InsertValue: 602 exp = create_expression(cast<InsertValueInst>(I)); 603 break; 604 case Instruction::GetElementPtr: 605 exp = create_expression(cast<GetElementPtrInst>(I)); 606 break; 607 default: 608 valueNumbering[V] = nextValueNumber; 609 return nextValueNumber++; 610 } 611 612 uint32_t& e = expressionNumbering[exp]; 613 if (!e) e = nextValueNumber++; 614 valueNumbering[V] = e; 615 return e; 616} 617 618/// lookup - Returns the value number of the specified value. Fails if 619/// the value has not yet been numbered. 620uint32_t ValueTable::lookup(Value *V) const { 621 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 622 assert(VI != valueNumbering.end() && "Value not numbered?"); 623 return VI->second; 624} 625 626/// clear - Remove all entries from the ValueTable 627void ValueTable::clear() { 628 valueNumbering.clear(); 629 expressionNumbering.clear(); 630 nextValueNumber = 1; 631} 632 633/// erase - Remove a value from the value numbering 634void ValueTable::erase(Value *V) { 635 valueNumbering.erase(V); 636} 637 638/// verifyRemoved - Verify that the value is removed from all internal data 639/// structures. 640void ValueTable::verifyRemoved(const Value *V) const { 641 for (DenseMap<Value*, uint32_t>::const_iterator 642 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 643 assert(I->first != V && "Inst still occurs in value numbering map!"); 644 } 645} 646 647//===----------------------------------------------------------------------===// 648// GVN Pass 649//===----------------------------------------------------------------------===// 650 651namespace { 652 struct ValueNumberScope { 653 ValueNumberScope* parent; 654 DenseMap<uint32_t, Value*> table; 655 656 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 657 }; 658} 659 660namespace { 661 662 class GVN : public FunctionPass { 663 bool runOnFunction(Function &F); 664 public: 665 static char ID; // Pass identification, replacement for typeid 666 explicit GVN(bool noloads = false) 667 : FunctionPass(ID), NoLoads(noloads), MD(0) { } 668 669 private: 670 bool NoLoads; 671 MemoryDependenceAnalysis *MD; 672 DominatorTree *DT; 673 674 ValueTable VN; 675 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 676 677 // List of critical edges to be split between iterations. 678 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 679 680 // This transformation requires dominator postdominator info 681 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 682 AU.addRequired<DominatorTree>(); 683 if (!NoLoads) 684 AU.addRequired<MemoryDependenceAnalysis>(); 685 AU.addRequired<AliasAnalysis>(); 686 687 AU.addPreserved<DominatorTree>(); 688 AU.addPreserved<AliasAnalysis>(); 689 } 690 691 // Helper fuctions 692 // FIXME: eliminate or document these better 693 bool processLoad(LoadInst* L, 694 SmallVectorImpl<Instruction*> &toErase); 695 bool processInstruction(Instruction *I, 696 SmallVectorImpl<Instruction*> &toErase); 697 bool processNonLocalLoad(LoadInst* L, 698 SmallVectorImpl<Instruction*> &toErase); 699 bool processBlock(BasicBlock *BB); 700 void dump(DenseMap<uint32_t, Value*>& d); 701 bool iterateOnFunction(Function &F); 702 Value *CollapsePhi(PHINode* p); 703 bool performPRE(Function& F); 704 Value *lookupNumber(BasicBlock *BB, uint32_t num); 705 void cleanupGlobalSets(); 706 void verifyRemoved(const Instruction *I) const; 707 bool splitCriticalEdges(); 708 }; 709 710 char GVN::ID = 0; 711} 712 713// createGVNPass - The public interface to this file... 714FunctionPass *llvm::createGVNPass(bool NoLoads) { 715 return new GVN(NoLoads); 716} 717 718INITIALIZE_PASS(GVN, "gvn", "Global Value Numbering", false, false); 719 720void GVN::dump(DenseMap<uint32_t, Value*>& d) { 721 errs() << "{\n"; 722 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 723 E = d.end(); I != E; ++I) { 724 errs() << I->first << "\n"; 725 I->second->dump(); 726 } 727 errs() << "}\n"; 728} 729 730static bool isSafeReplacement(PHINode* p, Instruction *inst) { 731 if (!isa<PHINode>(inst)) 732 return true; 733 734 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 735 UI != E; ++UI) 736 if (PHINode* use_phi = dyn_cast<PHINode>(*UI)) 737 if (use_phi->getParent() == inst->getParent()) 738 return false; 739 740 return true; 741} 742 743Value *GVN::CollapsePhi(PHINode *PN) { 744 Value *ConstVal = PN->hasConstantValue(DT); 745 if (!ConstVal) return 0; 746 747 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 748 if (!Inst) 749 return ConstVal; 750 751 if (DT->dominates(Inst, PN)) 752 if (isSafeReplacement(PN, Inst)) 753 return Inst; 754 return 0; 755} 756 757/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 758/// we're analyzing is fully available in the specified block. As we go, keep 759/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 760/// map is actually a tri-state map with the following values: 761/// 0) we know the block *is not* fully available. 762/// 1) we know the block *is* fully available. 763/// 2) we do not know whether the block is fully available or not, but we are 764/// currently speculating that it will be. 765/// 3) we are speculating for this block and have used that to speculate for 766/// other blocks. 767static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 768 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 769 // Optimistically assume that the block is fully available and check to see 770 // if we already know about this block in one lookup. 771 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 772 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 773 774 // If the entry already existed for this block, return the precomputed value. 775 if (!IV.second) { 776 // If this is a speculative "available" value, mark it as being used for 777 // speculation of other blocks. 778 if (IV.first->second == 2) 779 IV.first->second = 3; 780 return IV.first->second != 0; 781 } 782 783 // Otherwise, see if it is fully available in all predecessors. 784 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 785 786 // If this block has no predecessors, it isn't live-in here. 787 if (PI == PE) 788 goto SpeculationFailure; 789 790 for (; PI != PE; ++PI) 791 // If the value isn't fully available in one of our predecessors, then it 792 // isn't fully available in this block either. Undo our previous 793 // optimistic assumption and bail out. 794 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 795 goto SpeculationFailure; 796 797 return true; 798 799// SpeculationFailure - If we get here, we found out that this is not, after 800// all, a fully-available block. We have a problem if we speculated on this and 801// used the speculation to mark other blocks as available. 802SpeculationFailure: 803 char &BBVal = FullyAvailableBlocks[BB]; 804 805 // If we didn't speculate on this, just return with it set to false. 806 if (BBVal == 2) { 807 BBVal = 0; 808 return false; 809 } 810 811 // If we did speculate on this value, we could have blocks set to 1 that are 812 // incorrect. Walk the (transitive) successors of this block and mark them as 813 // 0 if set to one. 814 SmallVector<BasicBlock*, 32> BBWorklist; 815 BBWorklist.push_back(BB); 816 817 do { 818 BasicBlock *Entry = BBWorklist.pop_back_val(); 819 // Note that this sets blocks to 0 (unavailable) if they happen to not 820 // already be in FullyAvailableBlocks. This is safe. 821 char &EntryVal = FullyAvailableBlocks[Entry]; 822 if (EntryVal == 0) continue; // Already unavailable. 823 824 // Mark as unavailable. 825 EntryVal = 0; 826 827 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 828 BBWorklist.push_back(*I); 829 } while (!BBWorklist.empty()); 830 831 return false; 832} 833 834 835/// CanCoerceMustAliasedValueToLoad - Return true if 836/// CoerceAvailableValueToLoadType will succeed. 837static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 838 const Type *LoadTy, 839 const TargetData &TD) { 840 // If the loaded or stored value is an first class array or struct, don't try 841 // to transform them. We need to be able to bitcast to integer. 842 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 843 StoredVal->getType()->isStructTy() || 844 StoredVal->getType()->isArrayTy()) 845 return false; 846 847 // The store has to be at least as big as the load. 848 if (TD.getTypeSizeInBits(StoredVal->getType()) < 849 TD.getTypeSizeInBits(LoadTy)) 850 return false; 851 852 return true; 853} 854 855 856/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 857/// then a load from a must-aliased pointer of a different type, try to coerce 858/// the stored value. LoadedTy is the type of the load we want to replace and 859/// InsertPt is the place to insert new instructions. 860/// 861/// If we can't do it, return null. 862static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 863 const Type *LoadedTy, 864 Instruction *InsertPt, 865 const TargetData &TD) { 866 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 867 return 0; 868 869 const Type *StoredValTy = StoredVal->getType(); 870 871 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy); 872 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 873 874 // If the store and reload are the same size, we can always reuse it. 875 if (StoreSize == LoadSize) { 876 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) { 877 // Pointer to Pointer -> use bitcast. 878 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 879 } 880 881 // Convert source pointers to integers, which can be bitcast. 882 if (StoredValTy->isPointerTy()) { 883 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 884 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 885 } 886 887 const Type *TypeToCastTo = LoadedTy; 888 if (TypeToCastTo->isPointerTy()) 889 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 890 891 if (StoredValTy != TypeToCastTo) 892 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 893 894 // Cast to pointer if the load needs a pointer type. 895 if (LoadedTy->isPointerTy()) 896 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 897 898 return StoredVal; 899 } 900 901 // If the loaded value is smaller than the available value, then we can 902 // extract out a piece from it. If the available value is too small, then we 903 // can't do anything. 904 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 905 906 // Convert source pointers to integers, which can be manipulated. 907 if (StoredValTy->isPointerTy()) { 908 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 909 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 910 } 911 912 // Convert vectors and fp to integer, which can be manipulated. 913 if (!StoredValTy->isIntegerTy()) { 914 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 915 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 916 } 917 918 // If this is a big-endian system, we need to shift the value down to the low 919 // bits so that a truncate will work. 920 if (TD.isBigEndian()) { 921 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 922 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 923 } 924 925 // Truncate the integer to the right size now. 926 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 927 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 928 929 if (LoadedTy == NewIntTy) 930 return StoredVal; 931 932 // If the result is a pointer, inttoptr. 933 if (LoadedTy->isPointerTy()) 934 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 935 936 // Otherwise, bitcast. 937 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 938} 939 940/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 941/// be expressed as a base pointer plus a constant offset. Return the base and 942/// offset to the caller. 943static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 944 const TargetData &TD) { 945 Operator *PtrOp = dyn_cast<Operator>(Ptr); 946 if (PtrOp == 0) return Ptr; 947 948 // Just look through bitcasts. 949 if (PtrOp->getOpcode() == Instruction::BitCast) 950 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 951 952 // If this is a GEP with constant indices, we can look through it. 953 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 954 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 955 956 gep_type_iterator GTI = gep_type_begin(GEP); 957 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 958 ++I, ++GTI) { 959 ConstantInt *OpC = cast<ConstantInt>(*I); 960 if (OpC->isZero()) continue; 961 962 // Handle a struct and array indices which add their offset to the pointer. 963 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 964 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 965 } else { 966 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 967 Offset += OpC->getSExtValue()*Size; 968 } 969 } 970 971 // Re-sign extend from the pointer size if needed to get overflow edge cases 972 // right. 973 unsigned PtrSize = TD.getPointerSizeInBits(); 974 if (PtrSize < 64) 975 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 976 977 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 978} 979 980 981/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 982/// memdep query of a load that ends up being a clobbering memory write (store, 983/// memset, memcpy, memmove). This means that the write *may* provide bits used 984/// by the load but we can't be sure because the pointers don't mustalias. 985/// 986/// Check this case to see if there is anything more we can do before we give 987/// up. This returns -1 if we have to give up, or a byte number in the stored 988/// value of the piece that feeds the load. 989static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 990 Value *WritePtr, 991 uint64_t WriteSizeInBits, 992 const TargetData &TD) { 993 // If the loaded or stored value is an first class array or struct, don't try 994 // to transform them. We need to be able to bitcast to integer. 995 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 996 return -1; 997 998 int64_t StoreOffset = 0, LoadOffset = 0; 999 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1000 Value *LoadBase = 1001 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1002 if (StoreBase != LoadBase) 1003 return -1; 1004 1005 // If the load and store are to the exact same address, they should have been 1006 // a must alias. AA must have gotten confused. 1007 // FIXME: Study to see if/when this happens. One case is forwarding a memset 1008 // to a load from the base of the memset. 1009#if 0 1010 if (LoadOffset == StoreOffset) { 1011 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1012 << "Base = " << *StoreBase << "\n" 1013 << "Store Ptr = " << *WritePtr << "\n" 1014 << "Store Offs = " << StoreOffset << "\n" 1015 << "Load Ptr = " << *LoadPtr << "\n"; 1016 abort(); 1017 } 1018#endif 1019 1020 // If the load and store don't overlap at all, the store doesn't provide 1021 // anything to the load. In this case, they really don't alias at all, AA 1022 // must have gotten confused. 1023 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1024 // remove this check, as it is duplicated with what we have below. 1025 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1026 1027 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1028 return -1; 1029 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1030 LoadSize >>= 3; 1031 1032 1033 bool isAAFailure = false; 1034 if (StoreOffset < LoadOffset) 1035 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1036 else 1037 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1038 1039 if (isAAFailure) { 1040#if 0 1041 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1042 << "Base = " << *StoreBase << "\n" 1043 << "Store Ptr = " << *WritePtr << "\n" 1044 << "Store Offs = " << StoreOffset << "\n" 1045 << "Load Ptr = " << *LoadPtr << "\n"; 1046 abort(); 1047#endif 1048 return -1; 1049 } 1050 1051 // If the Load isn't completely contained within the stored bits, we don't 1052 // have all the bits to feed it. We could do something crazy in the future 1053 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1054 // valuable. 1055 if (StoreOffset > LoadOffset || 1056 StoreOffset+StoreSize < LoadOffset+LoadSize) 1057 return -1; 1058 1059 // Okay, we can do this transformation. Return the number of bytes into the 1060 // store that the load is. 1061 return LoadOffset-StoreOffset; 1062} 1063 1064/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1065/// memdep query of a load that ends up being a clobbering store. 1066static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1067 StoreInst *DepSI, 1068 const TargetData &TD) { 1069 // Cannot handle reading from store of first-class aggregate yet. 1070 if (DepSI->getOperand(0)->getType()->isStructTy() || 1071 DepSI->getOperand(0)->getType()->isArrayTy()) 1072 return -1; 1073 1074 Value *StorePtr = DepSI->getPointerOperand(); 1075 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1076 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1077 StorePtr, StoreSize, TD); 1078} 1079 1080static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1081 MemIntrinsic *MI, 1082 const TargetData &TD) { 1083 // If the mem operation is a non-constant size, we can't handle it. 1084 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1085 if (SizeCst == 0) return -1; 1086 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1087 1088 // If this is memset, we just need to see if the offset is valid in the size 1089 // of the memset.. 1090 if (MI->getIntrinsicID() == Intrinsic::memset) 1091 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1092 MemSizeInBits, TD); 1093 1094 // If we have a memcpy/memmove, the only case we can handle is if this is a 1095 // copy from constant memory. In that case, we can read directly from the 1096 // constant memory. 1097 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1098 1099 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1100 if (Src == 0) return -1; 1101 1102 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1103 if (GV == 0 || !GV->isConstant()) return -1; 1104 1105 // See if the access is within the bounds of the transfer. 1106 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1107 MI->getDest(), MemSizeInBits, TD); 1108 if (Offset == -1) 1109 return Offset; 1110 1111 // Otherwise, see if we can constant fold a load from the constant with the 1112 // offset applied as appropriate. 1113 Src = ConstantExpr::getBitCast(Src, 1114 llvm::Type::getInt8PtrTy(Src->getContext())); 1115 Constant *OffsetCst = 1116 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1117 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1118 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1119 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1120 return Offset; 1121 return -1; 1122} 1123 1124 1125/// GetStoreValueForLoad - This function is called when we have a 1126/// memdep query of a load that ends up being a clobbering store. This means 1127/// that the store *may* provide bits used by the load but we can't be sure 1128/// because the pointers don't mustalias. Check this case to see if there is 1129/// anything more we can do before we give up. 1130static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1131 const Type *LoadTy, 1132 Instruction *InsertPt, const TargetData &TD){ 1133 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1134 1135 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1136 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 1137 1138 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1139 1140 // Compute which bits of the stored value are being used by the load. Convert 1141 // to an integer type to start with. 1142 if (SrcVal->getType()->isPointerTy()) 1143 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1144 if (!SrcVal->getType()->isIntegerTy()) 1145 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1146 "tmp"); 1147 1148 // Shift the bits to the least significant depending on endianness. 1149 unsigned ShiftAmt; 1150 if (TD.isLittleEndian()) 1151 ShiftAmt = Offset*8; 1152 else 1153 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1154 1155 if (ShiftAmt) 1156 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1157 1158 if (LoadSize != StoreSize) 1159 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1160 "tmp"); 1161 1162 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1163} 1164 1165/// GetMemInstValueForLoad - This function is called when we have a 1166/// memdep query of a load that ends up being a clobbering mem intrinsic. 1167static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1168 const Type *LoadTy, Instruction *InsertPt, 1169 const TargetData &TD){ 1170 LLVMContext &Ctx = LoadTy->getContext(); 1171 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1172 1173 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1174 1175 // We know that this method is only called when the mem transfer fully 1176 // provides the bits for the load. 1177 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1178 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1179 // independently of what the offset is. 1180 Value *Val = MSI->getValue(); 1181 if (LoadSize != 1) 1182 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1183 1184 Value *OneElt = Val; 1185 1186 // Splat the value out to the right number of bits. 1187 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1188 // If we can double the number of bytes set, do it. 1189 if (NumBytesSet*2 <= LoadSize) { 1190 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1191 Val = Builder.CreateOr(Val, ShVal); 1192 NumBytesSet <<= 1; 1193 continue; 1194 } 1195 1196 // Otherwise insert one byte at a time. 1197 Value *ShVal = Builder.CreateShl(Val, 1*8); 1198 Val = Builder.CreateOr(OneElt, ShVal); 1199 ++NumBytesSet; 1200 } 1201 1202 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1203 } 1204 1205 // Otherwise, this is a memcpy/memmove from a constant global. 1206 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1207 Constant *Src = cast<Constant>(MTI->getSource()); 1208 1209 // Otherwise, see if we can constant fold a load from the constant with the 1210 // offset applied as appropriate. 1211 Src = ConstantExpr::getBitCast(Src, 1212 llvm::Type::getInt8PtrTy(Src->getContext())); 1213 Constant *OffsetCst = 1214 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1215 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1216 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1217 return ConstantFoldLoadFromConstPtr(Src, &TD); 1218} 1219 1220namespace { 1221 1222struct AvailableValueInBlock { 1223 /// BB - The basic block in question. 1224 BasicBlock *BB; 1225 enum ValType { 1226 SimpleVal, // A simple offsetted value that is accessed. 1227 MemIntrin // A memory intrinsic which is loaded from. 1228 }; 1229 1230 /// V - The value that is live out of the block. 1231 PointerIntPair<Value *, 1, ValType> Val; 1232 1233 /// Offset - The byte offset in Val that is interesting for the load query. 1234 unsigned Offset; 1235 1236 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1237 unsigned Offset = 0) { 1238 AvailableValueInBlock Res; 1239 Res.BB = BB; 1240 Res.Val.setPointer(V); 1241 Res.Val.setInt(SimpleVal); 1242 Res.Offset = Offset; 1243 return Res; 1244 } 1245 1246 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1247 unsigned Offset = 0) { 1248 AvailableValueInBlock Res; 1249 Res.BB = BB; 1250 Res.Val.setPointer(MI); 1251 Res.Val.setInt(MemIntrin); 1252 Res.Offset = Offset; 1253 return Res; 1254 } 1255 1256 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1257 Value *getSimpleValue() const { 1258 assert(isSimpleValue() && "Wrong accessor"); 1259 return Val.getPointer(); 1260 } 1261 1262 MemIntrinsic *getMemIntrinValue() const { 1263 assert(!isSimpleValue() && "Wrong accessor"); 1264 return cast<MemIntrinsic>(Val.getPointer()); 1265 } 1266 1267 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1268 /// defined here to the specified type. This handles various coercion cases. 1269 Value *MaterializeAdjustedValue(const Type *LoadTy, 1270 const TargetData *TD) const { 1271 Value *Res; 1272 if (isSimpleValue()) { 1273 Res = getSimpleValue(); 1274 if (Res->getType() != LoadTy) { 1275 assert(TD && "Need target data to handle type mismatch case"); 1276 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1277 *TD); 1278 1279 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1280 << *getSimpleValue() << '\n' 1281 << *Res << '\n' << "\n\n\n"); 1282 } 1283 } else { 1284 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1285 LoadTy, BB->getTerminator(), *TD); 1286 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1287 << " " << *getMemIntrinValue() << '\n' 1288 << *Res << '\n' << "\n\n\n"); 1289 } 1290 return Res; 1291 } 1292}; 1293 1294} 1295 1296/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1297/// construct SSA form, allowing us to eliminate LI. This returns the value 1298/// that should be used at LI's definition site. 1299static Value *ConstructSSAForLoadSet(LoadInst *LI, 1300 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1301 const TargetData *TD, 1302 const DominatorTree &DT, 1303 AliasAnalysis *AA) { 1304 // Check for the fully redundant, dominating load case. In this case, we can 1305 // just use the dominating value directly. 1306 if (ValuesPerBlock.size() == 1 && 1307 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) 1308 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); 1309 1310 // Otherwise, we have to construct SSA form. 1311 SmallVector<PHINode*, 8> NewPHIs; 1312 SSAUpdater SSAUpdate(&NewPHIs); 1313 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1314 1315 const Type *LoadTy = LI->getType(); 1316 1317 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1318 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1319 BasicBlock *BB = AV.BB; 1320 1321 if (SSAUpdate.HasValueForBlock(BB)) 1322 continue; 1323 1324 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); 1325 } 1326 1327 // Perform PHI construction. 1328 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1329 1330 // If new PHI nodes were created, notify alias analysis. 1331 if (V->getType()->isPointerTy()) 1332 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1333 AA->copyValue(LI, NewPHIs[i]); 1334 1335 return V; 1336} 1337 1338static bool isLifetimeStart(const Instruction *Inst) { 1339 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1340 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1341 return false; 1342} 1343 1344/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1345/// non-local by performing PHI construction. 1346bool GVN::processNonLocalLoad(LoadInst *LI, 1347 SmallVectorImpl<Instruction*> &toErase) { 1348 // Find the non-local dependencies of the load. 1349 SmallVector<NonLocalDepResult, 64> Deps; 1350 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1351 Deps); 1352 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1353 // << Deps.size() << *LI << '\n'); 1354 1355 // If we had to process more than one hundred blocks to find the 1356 // dependencies, this load isn't worth worrying about. Optimizing 1357 // it will be too expensive. 1358 if (Deps.size() > 100) 1359 return false; 1360 1361 // If we had a phi translation failure, we'll have a single entry which is a 1362 // clobber in the current block. Reject this early. 1363 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1364 DEBUG( 1365 dbgs() << "GVN: non-local load "; 1366 WriteAsOperand(dbgs(), LI); 1367 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1368 ); 1369 return false; 1370 } 1371 1372 // Filter out useless results (non-locals, etc). Keep track of the blocks 1373 // where we have a value available in repl, also keep track of whether we see 1374 // dependencies that produce an unknown value for the load (such as a call 1375 // that could potentially clobber the load). 1376 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1377 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1378 1379 const TargetData *TD = 0; 1380 1381 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1382 BasicBlock *DepBB = Deps[i].getBB(); 1383 MemDepResult DepInfo = Deps[i].getResult(); 1384 1385 if (DepInfo.isClobber()) { 1386 // The address being loaded in this non-local block may not be the same as 1387 // the pointer operand of the load if PHI translation occurs. Make sure 1388 // to consider the right address. 1389 Value *Address = Deps[i].getAddress(); 1390 1391 // If the dependence is to a store that writes to a superset of the bits 1392 // read by the load, we can extract the bits we need for the load from the 1393 // stored value. 1394 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1395 if (TD == 0) 1396 TD = getAnalysisIfAvailable<TargetData>(); 1397 if (TD && Address) { 1398 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1399 DepSI, *TD); 1400 if (Offset != -1) { 1401 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1402 DepSI->getOperand(0), 1403 Offset)); 1404 continue; 1405 } 1406 } 1407 } 1408 1409 // If the clobbering value is a memset/memcpy/memmove, see if we can 1410 // forward a value on from it. 1411 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1412 if (TD == 0) 1413 TD = getAnalysisIfAvailable<TargetData>(); 1414 if (TD && Address) { 1415 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1416 DepMI, *TD); 1417 if (Offset != -1) { 1418 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1419 Offset)); 1420 continue; 1421 } 1422 } 1423 } 1424 1425 UnavailableBlocks.push_back(DepBB); 1426 continue; 1427 } 1428 1429 Instruction *DepInst = DepInfo.getInst(); 1430 1431 // Loading the allocation -> undef. 1432 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1433 // Loading immediately after lifetime begin -> undef. 1434 isLifetimeStart(DepInst)) { 1435 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1436 UndefValue::get(LI->getType()))); 1437 continue; 1438 } 1439 1440 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1441 // Reject loads and stores that are to the same address but are of 1442 // different types if we have to. 1443 if (S->getOperand(0)->getType() != LI->getType()) { 1444 if (TD == 0) 1445 TD = getAnalysisIfAvailable<TargetData>(); 1446 1447 // If the stored value is larger or equal to the loaded value, we can 1448 // reuse it. 1449 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1450 LI->getType(), *TD)) { 1451 UnavailableBlocks.push_back(DepBB); 1452 continue; 1453 } 1454 } 1455 1456 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1457 S->getOperand(0))); 1458 continue; 1459 } 1460 1461 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1462 // If the types mismatch and we can't handle it, reject reuse of the load. 1463 if (LD->getType() != LI->getType()) { 1464 if (TD == 0) 1465 TD = getAnalysisIfAvailable<TargetData>(); 1466 1467 // If the stored value is larger or equal to the loaded value, we can 1468 // reuse it. 1469 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1470 UnavailableBlocks.push_back(DepBB); 1471 continue; 1472 } 1473 } 1474 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1475 continue; 1476 } 1477 1478 UnavailableBlocks.push_back(DepBB); 1479 continue; 1480 } 1481 1482 // If we have no predecessors that produce a known value for this load, exit 1483 // early. 1484 if (ValuesPerBlock.empty()) return false; 1485 1486 // If all of the instructions we depend on produce a known value for this 1487 // load, then it is fully redundant and we can use PHI insertion to compute 1488 // its value. Insert PHIs and remove the fully redundant value now. 1489 if (UnavailableBlocks.empty()) { 1490 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1491 1492 // Perform PHI construction. 1493 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1494 VN.getAliasAnalysis()); 1495 LI->replaceAllUsesWith(V); 1496 1497 if (isa<PHINode>(V)) 1498 V->takeName(LI); 1499 if (V->getType()->isPointerTy()) 1500 MD->invalidateCachedPointerInfo(V); 1501 VN.erase(LI); 1502 toErase.push_back(LI); 1503 ++NumGVNLoad; 1504 return true; 1505 } 1506 1507 if (!EnablePRE || !EnableLoadPRE) 1508 return false; 1509 1510 // Okay, we have *some* definitions of the value. This means that the value 1511 // is available in some of our (transitive) predecessors. Lets think about 1512 // doing PRE of this load. This will involve inserting a new load into the 1513 // predecessor when it's not available. We could do this in general, but 1514 // prefer to not increase code size. As such, we only do this when we know 1515 // that we only have to insert *one* load (which means we're basically moving 1516 // the load, not inserting a new one). 1517 1518 SmallPtrSet<BasicBlock *, 4> Blockers; 1519 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1520 Blockers.insert(UnavailableBlocks[i]); 1521 1522 // Lets find first basic block with more than one predecessor. Walk backwards 1523 // through predecessors if needed. 1524 BasicBlock *LoadBB = LI->getParent(); 1525 BasicBlock *TmpBB = LoadBB; 1526 1527 bool isSinglePred = false; 1528 bool allSingleSucc = true; 1529 while (TmpBB->getSinglePredecessor()) { 1530 isSinglePred = true; 1531 TmpBB = TmpBB->getSinglePredecessor(); 1532 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1533 return false; 1534 if (Blockers.count(TmpBB)) 1535 return false; 1536 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1537 allSingleSucc = false; 1538 } 1539 1540 assert(TmpBB); 1541 LoadBB = TmpBB; 1542 1543 // If we have a repl set with LI itself in it, this means we have a loop where 1544 // at least one of the values is LI. Since this means that we won't be able 1545 // to eliminate LI even if we insert uses in the other predecessors, we will 1546 // end up increasing code size. Reject this by scanning for LI. 1547 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1548 if (ValuesPerBlock[i].isSimpleValue() && 1549 ValuesPerBlock[i].getSimpleValue() == LI) { 1550 // Skip cases where LI is the only definition, even for EnableFullLoadPRE. 1551 if (!EnableFullLoadPRE || e == 1) 1552 return false; 1553 } 1554 } 1555 1556 // FIXME: It is extremely unclear what this loop is doing, other than 1557 // artificially restricting loadpre. 1558 if (isSinglePred) { 1559 bool isHot = false; 1560 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1561 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1562 if (AV.isSimpleValue()) 1563 // "Hot" Instruction is in some loop (because it dominates its dep. 1564 // instruction). 1565 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1566 if (DT->dominates(LI, I)) { 1567 isHot = true; 1568 break; 1569 } 1570 } 1571 1572 // We are interested only in "hot" instructions. We don't want to do any 1573 // mis-optimizations here. 1574 if (!isHot) 1575 return false; 1576 } 1577 1578 // Check to see how many predecessors have the loaded value fully 1579 // available. 1580 DenseMap<BasicBlock*, Value*> PredLoads; 1581 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1582 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1583 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1584 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1585 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1586 1587 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1588 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1589 PI != E; ++PI) { 1590 BasicBlock *Pred = *PI; 1591 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1592 continue; 1593 } 1594 PredLoads[Pred] = 0; 1595 1596 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1597 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1598 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1599 << Pred->getName() << "': " << *LI << '\n'); 1600 return false; 1601 } 1602 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1603 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1604 } 1605 } 1606 if (!NeedToSplit.empty()) { 1607 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1608 return false; 1609 } 1610 1611 // Decide whether PRE is profitable for this load. 1612 unsigned NumUnavailablePreds = PredLoads.size(); 1613 assert(NumUnavailablePreds != 0 && 1614 "Fully available value should be eliminated above!"); 1615 if (!EnableFullLoadPRE) { 1616 // If this load is unavailable in multiple predecessors, reject it. 1617 // FIXME: If we could restructure the CFG, we could make a common pred with 1618 // all the preds that don't have an available LI and insert a new load into 1619 // that one block. 1620 if (NumUnavailablePreds != 1) 1621 return false; 1622 } 1623 1624 // Check if the load can safely be moved to all the unavailable predecessors. 1625 bool CanDoPRE = true; 1626 SmallVector<Instruction*, 8> NewInsts; 1627 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1628 E = PredLoads.end(); I != E; ++I) { 1629 BasicBlock *UnavailablePred = I->first; 1630 1631 // Do PHI translation to get its value in the predecessor if necessary. The 1632 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1633 1634 // If all preds have a single successor, then we know it is safe to insert 1635 // the load on the pred (?!?), so we can insert code to materialize the 1636 // pointer if it is not available. 1637 PHITransAddr Address(LI->getOperand(0), TD); 1638 Value *LoadPtr = 0; 1639 if (allSingleSucc) { 1640 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1641 *DT, NewInsts); 1642 } else { 1643 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1644 LoadPtr = Address.getAddr(); 1645 } 1646 1647 // If we couldn't find or insert a computation of this phi translated value, 1648 // we fail PRE. 1649 if (LoadPtr == 0) { 1650 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1651 << *LI->getOperand(0) << "\n"); 1652 CanDoPRE = false; 1653 break; 1654 } 1655 1656 // Make sure it is valid to move this load here. We have to watch out for: 1657 // @1 = getelementptr (i8* p, ... 1658 // test p and branch if == 0 1659 // load @1 1660 // It is valid to have the getelementptr before the test, even if p can be 0, 1661 // as getelementptr only does address arithmetic. 1662 // If we are not pushing the value through any multiple-successor blocks 1663 // we do not have this case. Otherwise, check that the load is safe to 1664 // put anywhere; this can be improved, but should be conservatively safe. 1665 if (!allSingleSucc && 1666 // FIXME: REEVALUTE THIS. 1667 !isSafeToLoadUnconditionally(LoadPtr, 1668 UnavailablePred->getTerminator(), 1669 LI->getAlignment(), TD)) { 1670 CanDoPRE = false; 1671 break; 1672 } 1673 1674 I->second = LoadPtr; 1675 } 1676 1677 if (!CanDoPRE) { 1678 while (!NewInsts.empty()) 1679 NewInsts.pop_back_val()->eraseFromParent(); 1680 return false; 1681 } 1682 1683 // Okay, we can eliminate this load by inserting a reload in the predecessor 1684 // and using PHI construction to get the value in the other predecessors, do 1685 // it. 1686 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1687 DEBUG(if (!NewInsts.empty()) 1688 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1689 << *NewInsts.back() << '\n'); 1690 1691 // Assign value numbers to the new instructions. 1692 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1693 // FIXME: We really _ought_ to insert these value numbers into their 1694 // parent's availability map. However, in doing so, we risk getting into 1695 // ordering issues. If a block hasn't been processed yet, we would be 1696 // marking a value as AVAIL-IN, which isn't what we intend. 1697 VN.lookup_or_add(NewInsts[i]); 1698 } 1699 1700 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1701 E = PredLoads.end(); I != E; ++I) { 1702 BasicBlock *UnavailablePred = I->first; 1703 Value *LoadPtr = I->second; 1704 1705 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1706 LI->getAlignment(), 1707 UnavailablePred->getTerminator()); 1708 1709 // Add the newly created load. 1710 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1711 NewLoad)); 1712 MD->invalidateCachedPointerInfo(LoadPtr); 1713 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1714 } 1715 1716 // Perform PHI construction. 1717 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1718 VN.getAliasAnalysis()); 1719 LI->replaceAllUsesWith(V); 1720 if (isa<PHINode>(V)) 1721 V->takeName(LI); 1722 if (V->getType()->isPointerTy()) 1723 MD->invalidateCachedPointerInfo(V); 1724 VN.erase(LI); 1725 toErase.push_back(LI); 1726 ++NumPRELoad; 1727 return true; 1728} 1729 1730/// processLoad - Attempt to eliminate a load, first by eliminating it 1731/// locally, and then attempting non-local elimination if that fails. 1732bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1733 if (!MD) 1734 return false; 1735 1736 if (L->isVolatile()) 1737 return false; 1738 1739 // ... to a pointer that has been loaded from before... 1740 MemDepResult Dep = MD->getDependency(L); 1741 1742 // If the value isn't available, don't do anything! 1743 if (Dep.isClobber()) { 1744 // Check to see if we have something like this: 1745 // store i32 123, i32* %P 1746 // %A = bitcast i32* %P to i8* 1747 // %B = gep i8* %A, i32 1 1748 // %C = load i8* %B 1749 // 1750 // We could do that by recognizing if the clobber instructions are obviously 1751 // a common base + constant offset, and if the previous store (or memset) 1752 // completely covers this load. This sort of thing can happen in bitfield 1753 // access code. 1754 Value *AvailVal = 0; 1755 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1756 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1757 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1758 L->getPointerOperand(), 1759 DepSI, *TD); 1760 if (Offset != -1) 1761 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1762 L->getType(), L, *TD); 1763 } 1764 1765 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1766 // a value on from it. 1767 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1768 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1769 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1770 L->getPointerOperand(), 1771 DepMI, *TD); 1772 if (Offset != -1) 1773 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1774 } 1775 } 1776 1777 if (AvailVal) { 1778 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1779 << *AvailVal << '\n' << *L << "\n\n\n"); 1780 1781 // Replace the load! 1782 L->replaceAllUsesWith(AvailVal); 1783 if (AvailVal->getType()->isPointerTy()) 1784 MD->invalidateCachedPointerInfo(AvailVal); 1785 VN.erase(L); 1786 toErase.push_back(L); 1787 ++NumGVNLoad; 1788 return true; 1789 } 1790 1791 DEBUG( 1792 // fast print dep, using operator<< on instruction would be too slow 1793 dbgs() << "GVN: load "; 1794 WriteAsOperand(dbgs(), L); 1795 Instruction *I = Dep.getInst(); 1796 dbgs() << " is clobbered by " << *I << '\n'; 1797 ); 1798 return false; 1799 } 1800 1801 // If it is defined in another block, try harder. 1802 if (Dep.isNonLocal()) 1803 return processNonLocalLoad(L, toErase); 1804 1805 Instruction *DepInst = Dep.getInst(); 1806 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1807 Value *StoredVal = DepSI->getOperand(0); 1808 1809 // The store and load are to a must-aliased pointer, but they may not 1810 // actually have the same type. See if we know how to reuse the stored 1811 // value (depending on its type). 1812 const TargetData *TD = 0; 1813 if (StoredVal->getType() != L->getType()) { 1814 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1815 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1816 L, *TD); 1817 if (StoredVal == 0) 1818 return false; 1819 1820 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1821 << '\n' << *L << "\n\n\n"); 1822 } 1823 else 1824 return false; 1825 } 1826 1827 // Remove it! 1828 L->replaceAllUsesWith(StoredVal); 1829 if (StoredVal->getType()->isPointerTy()) 1830 MD->invalidateCachedPointerInfo(StoredVal); 1831 VN.erase(L); 1832 toErase.push_back(L); 1833 ++NumGVNLoad; 1834 return true; 1835 } 1836 1837 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1838 Value *AvailableVal = DepLI; 1839 1840 // The loads are of a must-aliased pointer, but they may not actually have 1841 // the same type. See if we know how to reuse the previously loaded value 1842 // (depending on its type). 1843 const TargetData *TD = 0; 1844 if (DepLI->getType() != L->getType()) { 1845 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1846 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1847 if (AvailableVal == 0) 1848 return false; 1849 1850 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1851 << "\n" << *L << "\n\n\n"); 1852 } 1853 else 1854 return false; 1855 } 1856 1857 // Remove it! 1858 L->replaceAllUsesWith(AvailableVal); 1859 if (DepLI->getType()->isPointerTy()) 1860 MD->invalidateCachedPointerInfo(DepLI); 1861 VN.erase(L); 1862 toErase.push_back(L); 1863 ++NumGVNLoad; 1864 return true; 1865 } 1866 1867 // If this load really doesn't depend on anything, then we must be loading an 1868 // undef value. This can happen when loading for a fresh allocation with no 1869 // intervening stores, for example. 1870 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1871 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1872 VN.erase(L); 1873 toErase.push_back(L); 1874 ++NumGVNLoad; 1875 return true; 1876 } 1877 1878 // If this load occurs either right after a lifetime begin, 1879 // then the loaded value is undefined. 1880 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1881 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1882 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1883 VN.erase(L); 1884 toErase.push_back(L); 1885 ++NumGVNLoad; 1886 return true; 1887 } 1888 } 1889 1890 return false; 1891} 1892 1893Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1894 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1895 if (I == localAvail.end()) 1896 return 0; 1897 1898 ValueNumberScope *Locals = I->second; 1899 while (Locals) { 1900 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1901 if (I != Locals->table.end()) 1902 return I->second; 1903 Locals = Locals->parent; 1904 } 1905 1906 return 0; 1907} 1908 1909 1910/// processInstruction - When calculating availability, handle an instruction 1911/// by inserting it into the appropriate sets 1912bool GVN::processInstruction(Instruction *I, 1913 SmallVectorImpl<Instruction*> &toErase) { 1914 // Ignore dbg info intrinsics. 1915 if (isa<DbgInfoIntrinsic>(I)) 1916 return false; 1917 1918 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1919 bool Changed = processLoad(LI, toErase); 1920 1921 if (!Changed) { 1922 unsigned Num = VN.lookup_or_add(LI); 1923 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1924 } 1925 1926 return Changed; 1927 } 1928 1929 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1930 unsigned Num = VN.lookup_or_add(I); 1931 1932 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1933 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1934 1935 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1936 return false; 1937 1938 Value *BranchCond = BI->getCondition(); 1939 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1940 1941 BasicBlock *TrueSucc = BI->getSuccessor(0); 1942 BasicBlock *FalseSucc = BI->getSuccessor(1); 1943 1944 if (TrueSucc->getSinglePredecessor()) 1945 localAvail[TrueSucc]->table[CondVN] = 1946 ConstantInt::getTrue(TrueSucc->getContext()); 1947 if (FalseSucc->getSinglePredecessor()) 1948 localAvail[FalseSucc]->table[CondVN] = 1949 ConstantInt::getFalse(TrueSucc->getContext()); 1950 1951 return false; 1952 1953 // Allocations are always uniquely numbered, so we can save time and memory 1954 // by fast failing them. 1955 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1956 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1957 return false; 1958 } 1959 1960 // Collapse PHI nodes 1961 if (PHINode* p = dyn_cast<PHINode>(I)) { 1962 Value *constVal = CollapsePhi(p); 1963 1964 if (constVal) { 1965 p->replaceAllUsesWith(constVal); 1966 if (MD && constVal->getType()->isPointerTy()) 1967 MD->invalidateCachedPointerInfo(constVal); 1968 VN.erase(p); 1969 1970 toErase.push_back(p); 1971 } else { 1972 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1973 } 1974 1975 // If the number we were assigned was a brand new VN, then we don't 1976 // need to do a lookup to see if the number already exists 1977 // somewhere in the domtree: it can't! 1978 } else if (Num == NextNum) { 1979 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1980 1981 // Perform fast-path value-number based elimination of values inherited from 1982 // dominators. 1983 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1984 // Remove it! 1985 VN.erase(I); 1986 I->replaceAllUsesWith(repl); 1987 if (MD && repl->getType()->isPointerTy()) 1988 MD->invalidateCachedPointerInfo(repl); 1989 toErase.push_back(I); 1990 return true; 1991 1992 } else { 1993 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1994 } 1995 1996 return false; 1997} 1998 1999/// runOnFunction - This is the main transformation entry point for a function. 2000bool GVN::runOnFunction(Function& F) { 2001 if (!NoLoads) 2002 MD = &getAnalysis<MemoryDependenceAnalysis>(); 2003 DT = &getAnalysis<DominatorTree>(); 2004 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 2005 VN.setMemDep(MD); 2006 VN.setDomTree(DT); 2007 2008 bool Changed = false; 2009 bool ShouldContinue = true; 2010 2011 // Merge unconditional branches, allowing PRE to catch more 2012 // optimization opportunities. 2013 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2014 BasicBlock *BB = FI; 2015 ++FI; 2016 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2017 if (removedBlock) ++NumGVNBlocks; 2018 2019 Changed |= removedBlock; 2020 } 2021 2022 unsigned Iteration = 0; 2023 2024 while (ShouldContinue) { 2025 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2026 ShouldContinue = iterateOnFunction(F); 2027 if (splitCriticalEdges()) 2028 ShouldContinue = true; 2029 Changed |= ShouldContinue; 2030 ++Iteration; 2031 } 2032 2033 if (EnablePRE) { 2034 bool PREChanged = true; 2035 while (PREChanged) { 2036 PREChanged = performPRE(F); 2037 Changed |= PREChanged; 2038 } 2039 } 2040 // FIXME: Should perform GVN again after PRE does something. PRE can move 2041 // computations into blocks where they become fully redundant. Note that 2042 // we can't do this until PRE's critical edge splitting updates memdep. 2043 // Actually, when this happens, we should just fully integrate PRE into GVN. 2044 2045 cleanupGlobalSets(); 2046 2047 return Changed; 2048} 2049 2050 2051bool GVN::processBlock(BasicBlock *BB) { 2052 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2053 // incrementing BI before processing an instruction). 2054 SmallVector<Instruction*, 8> toErase; 2055 bool ChangedFunction = false; 2056 2057 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2058 BI != BE;) { 2059 ChangedFunction |= processInstruction(BI, toErase); 2060 if (toErase.empty()) { 2061 ++BI; 2062 continue; 2063 } 2064 2065 // If we need some instructions deleted, do it now. 2066 NumGVNInstr += toErase.size(); 2067 2068 // Avoid iterator invalidation. 2069 bool AtStart = BI == BB->begin(); 2070 if (!AtStart) 2071 --BI; 2072 2073 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2074 E = toErase.end(); I != E; ++I) { 2075 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2076 if (MD) MD->removeInstruction(*I); 2077 (*I)->eraseFromParent(); 2078 DEBUG(verifyRemoved(*I)); 2079 } 2080 toErase.clear(); 2081 2082 if (AtStart) 2083 BI = BB->begin(); 2084 else 2085 ++BI; 2086 } 2087 2088 return ChangedFunction; 2089} 2090 2091/// performPRE - Perform a purely local form of PRE that looks for diamond 2092/// control flow patterns and attempts to perform simple PRE at the join point. 2093bool GVN::performPRE(Function &F) { 2094 bool Changed = false; 2095 DenseMap<BasicBlock*, Value*> predMap; 2096 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2097 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2098 BasicBlock *CurrentBlock = *DI; 2099 2100 // Nothing to PRE in the entry block. 2101 if (CurrentBlock == &F.getEntryBlock()) continue; 2102 2103 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2104 BE = CurrentBlock->end(); BI != BE; ) { 2105 Instruction *CurInst = BI++; 2106 2107 if (isa<AllocaInst>(CurInst) || 2108 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2109 CurInst->getType()->isVoidTy() || 2110 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2111 isa<DbgInfoIntrinsic>(CurInst)) 2112 continue; 2113 2114 // We don't currently value number ANY inline asm calls. 2115 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2116 if (CallI->isInlineAsm()) 2117 continue; 2118 2119 uint32_t ValNo = VN.lookup(CurInst); 2120 2121 // Look for the predecessors for PRE opportunities. We're 2122 // only trying to solve the basic diamond case, where 2123 // a value is computed in the successor and one predecessor, 2124 // but not the other. We also explicitly disallow cases 2125 // where the successor is its own predecessor, because they're 2126 // more complicated to get right. 2127 unsigned NumWith = 0; 2128 unsigned NumWithout = 0; 2129 BasicBlock *PREPred = 0; 2130 predMap.clear(); 2131 2132 for (pred_iterator PI = pred_begin(CurrentBlock), 2133 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2134 BasicBlock *P = *PI; 2135 // We're not interested in PRE where the block is its 2136 // own predecessor, or in blocks with predecessors 2137 // that are not reachable. 2138 if (P == CurrentBlock) { 2139 NumWithout = 2; 2140 break; 2141 } else if (!localAvail.count(P)) { 2142 NumWithout = 2; 2143 break; 2144 } 2145 2146 DenseMap<uint32_t, Value*>::iterator predV = 2147 localAvail[P]->table.find(ValNo); 2148 if (predV == localAvail[P]->table.end()) { 2149 PREPred = P; 2150 ++NumWithout; 2151 } else if (predV->second == CurInst) { 2152 NumWithout = 2; 2153 } else { 2154 predMap[P] = predV->second; 2155 ++NumWith; 2156 } 2157 } 2158 2159 // Don't do PRE when it might increase code size, i.e. when 2160 // we would need to insert instructions in more than one pred. 2161 if (NumWithout != 1 || NumWith == 0) 2162 continue; 2163 2164 // Don't do PRE across indirect branch. 2165 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2166 continue; 2167 2168 // We can't do PRE safely on a critical edge, so instead we schedule 2169 // the edge to be split and perform the PRE the next time we iterate 2170 // on the function. 2171 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2172 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2173 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2174 continue; 2175 } 2176 2177 // Instantiate the expression in the predecessor that lacked it. 2178 // Because we are going top-down through the block, all value numbers 2179 // will be available in the predecessor by the time we need them. Any 2180 // that weren't originally present will have been instantiated earlier 2181 // in this loop. 2182 Instruction *PREInstr = CurInst->clone(); 2183 bool success = true; 2184 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2185 Value *Op = PREInstr->getOperand(i); 2186 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2187 continue; 2188 2189 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2190 PREInstr->setOperand(i, V); 2191 } else { 2192 success = false; 2193 break; 2194 } 2195 } 2196 2197 // Fail out if we encounter an operand that is not available in 2198 // the PRE predecessor. This is typically because of loads which 2199 // are not value numbered precisely. 2200 if (!success) { 2201 delete PREInstr; 2202 DEBUG(verifyRemoved(PREInstr)); 2203 continue; 2204 } 2205 2206 PREInstr->insertBefore(PREPred->getTerminator()); 2207 PREInstr->setName(CurInst->getName() + ".pre"); 2208 predMap[PREPred] = PREInstr; 2209 VN.add(PREInstr, ValNo); 2210 ++NumGVNPRE; 2211 2212 // Update the availability map to include the new instruction. 2213 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2214 2215 // Create a PHI to make the value available in this block. 2216 PHINode* Phi = PHINode::Create(CurInst->getType(), 2217 CurInst->getName() + ".pre-phi", 2218 CurrentBlock->begin()); 2219 for (pred_iterator PI = pred_begin(CurrentBlock), 2220 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2221 BasicBlock *P = *PI; 2222 Phi->addIncoming(predMap[P], P); 2223 } 2224 2225 VN.add(Phi, ValNo); 2226 localAvail[CurrentBlock]->table[ValNo] = Phi; 2227 2228 CurInst->replaceAllUsesWith(Phi); 2229 if (MD && Phi->getType()->isPointerTy()) 2230 MD->invalidateCachedPointerInfo(Phi); 2231 VN.erase(CurInst); 2232 2233 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2234 if (MD) MD->removeInstruction(CurInst); 2235 CurInst->eraseFromParent(); 2236 DEBUG(verifyRemoved(CurInst)); 2237 Changed = true; 2238 } 2239 } 2240 2241 if (splitCriticalEdges()) 2242 Changed = true; 2243 2244 return Changed; 2245} 2246 2247/// splitCriticalEdges - Split critical edges found during the previous 2248/// iteration that may enable further optimization. 2249bool GVN::splitCriticalEdges() { 2250 if (toSplit.empty()) 2251 return false; 2252 do { 2253 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2254 SplitCriticalEdge(Edge.first, Edge.second, this); 2255 } while (!toSplit.empty()); 2256 if (MD) MD->invalidateCachedPredecessors(); 2257 return true; 2258} 2259 2260/// iterateOnFunction - Executes one iteration of GVN 2261bool GVN::iterateOnFunction(Function &F) { 2262 cleanupGlobalSets(); 2263 2264 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2265 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2266 if (DI->getIDom()) 2267 localAvail[DI->getBlock()] = 2268 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2269 else 2270 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2271 } 2272 2273 // Top-down walk of the dominator tree 2274 bool Changed = false; 2275#if 0 2276 // Needed for value numbering with phi construction to work. 2277 ReversePostOrderTraversal<Function*> RPOT(&F); 2278 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2279 RE = RPOT.end(); RI != RE; ++RI) 2280 Changed |= processBlock(*RI); 2281#else 2282 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2283 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2284 Changed |= processBlock(DI->getBlock()); 2285#endif 2286 2287 return Changed; 2288} 2289 2290void GVN::cleanupGlobalSets() { 2291 VN.clear(); 2292 2293 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2294 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2295 delete I->second; 2296 localAvail.clear(); 2297} 2298 2299/// verifyRemoved - Verify that the specified instruction does not occur in our 2300/// internal data structures. 2301void GVN::verifyRemoved(const Instruction *Inst) const { 2302 VN.verifyRemoved(Inst); 2303 2304 // Walk through the value number scope to make sure the instruction isn't 2305 // ferreted away in it. 2306 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2307 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2308 const ValueNumberScope *VNS = I->second; 2309 2310 while (VNS) { 2311 for (DenseMap<uint32_t, Value*>::const_iterator 2312 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2313 assert(II->second != Inst && "Inst still in value numbering scope!"); 2314 } 2315 2316 VNS = VNS->parent; 2317 } 2318 } 2319} 2320