GVN.cpp revision 74175c2ca1d4c9180f5e46d362814b9f0a114536
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Operator.h" 28#include "llvm/Value.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/DepthFirstIterator.h" 31#include "llvm/ADT/PostOrderIterator.h" 32#include "llvm/ADT/SmallPtrSet.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/ConstantFolding.h" 37#include "llvm/Analysis/Dominators.h" 38#include "llvm/Analysis/MemoryBuiltins.h" 39#include "llvm/Analysis/MemoryDependenceAnalysis.h" 40#include "llvm/Analysis/PHITransAddr.h" 41#include "llvm/Support/CFG.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/Debug.h" 44#include "llvm/Support/ErrorHandling.h" 45#include "llvm/Support/GetElementPtrTypeIterator.h" 46#include "llvm/Support/IRBuilder.h" 47#include "llvm/Support/raw_ostream.h" 48#include "llvm/Target/TargetData.h" 49#include "llvm/Transforms/Utils/BasicBlockUtils.h" 50#include "llvm/Transforms/Utils/Local.h" 51#include "llvm/Transforms/Utils/SSAUpdater.h" 52using namespace llvm; 53 54STATISTIC(NumGVNInstr, "Number of instructions deleted"); 55STATISTIC(NumGVNLoad, "Number of loads deleted"); 56STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 57STATISTIC(NumGVNBlocks, "Number of blocks merged"); 58STATISTIC(NumPRELoad, "Number of loads PRE'd"); 59 60static cl::opt<bool> EnablePRE("enable-pre", 61 cl::init(true), cl::Hidden); 62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 63static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false)); 64 65//===----------------------------------------------------------------------===// 66// ValueTable Class 67//===----------------------------------------------------------------------===// 68 69/// This class holds the mapping between values and value numbers. It is used 70/// as an efficient mechanism to determine the expression-wise equivalence of 71/// two values. 72namespace { 73 struct Expression { 74 enum ExpressionOpcode { 75 ADD = Instruction::Add, 76 FADD = Instruction::FAdd, 77 SUB = Instruction::Sub, 78 FSUB = Instruction::FSub, 79 MUL = Instruction::Mul, 80 FMUL = Instruction::FMul, 81 UDIV = Instruction::UDiv, 82 SDIV = Instruction::SDiv, 83 FDIV = Instruction::FDiv, 84 UREM = Instruction::URem, 85 SREM = Instruction::SRem, 86 FREM = Instruction::FRem, 87 SHL = Instruction::Shl, 88 LSHR = Instruction::LShr, 89 ASHR = Instruction::AShr, 90 AND = Instruction::And, 91 OR = Instruction::Or, 92 XOR = Instruction::Xor, 93 TRUNC = Instruction::Trunc, 94 ZEXT = Instruction::ZExt, 95 SEXT = Instruction::SExt, 96 FPTOUI = Instruction::FPToUI, 97 FPTOSI = Instruction::FPToSI, 98 UITOFP = Instruction::UIToFP, 99 SITOFP = Instruction::SIToFP, 100 FPTRUNC = Instruction::FPTrunc, 101 FPEXT = Instruction::FPExt, 102 PTRTOINT = Instruction::PtrToInt, 103 INTTOPTR = Instruction::IntToPtr, 104 BITCAST = Instruction::BitCast, 105 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 106 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 107 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 108 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 109 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 110 SHUFFLE, SELECT, GEP, CALL, CONSTANT, 111 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 112 113 ExpressionOpcode opcode; 114 const Type* type; 115 SmallVector<uint32_t, 4> varargs; 116 Value *function; 117 118 Expression() { } 119 Expression(ExpressionOpcode o) : opcode(o) { } 120 121 bool operator==(const Expression &other) const { 122 if (opcode != other.opcode) 123 return false; 124 else if (opcode == EMPTY || opcode == TOMBSTONE) 125 return true; 126 else if (type != other.type) 127 return false; 128 else if (function != other.function) 129 return false; 130 else { 131 if (varargs.size() != other.varargs.size()) 132 return false; 133 134 for (size_t i = 0; i < varargs.size(); ++i) 135 if (varargs[i] != other.varargs[i]) 136 return false; 137 138 return true; 139 } 140 } 141 142 bool operator!=(const Expression &other) const { 143 return !(*this == other); 144 } 145 }; 146 147 class ValueTable { 148 private: 149 DenseMap<Value*, uint32_t> valueNumbering; 150 DenseMap<Expression, uint32_t> expressionNumbering; 151 AliasAnalysis* AA; 152 MemoryDependenceAnalysis* MD; 153 DominatorTree* DT; 154 155 uint32_t nextValueNumber; 156 157 Expression::ExpressionOpcode getOpcode(CmpInst* C); 158 Expression create_expression(BinaryOperator* BO); 159 Expression create_expression(CmpInst* C); 160 Expression create_expression(ShuffleVectorInst* V); 161 Expression create_expression(ExtractElementInst* C); 162 Expression create_expression(InsertElementInst* V); 163 Expression create_expression(SelectInst* V); 164 Expression create_expression(CastInst* C); 165 Expression create_expression(GetElementPtrInst* G); 166 Expression create_expression(CallInst* C); 167 Expression create_expression(Constant* C); 168 Expression create_expression(ExtractValueInst* C); 169 Expression create_expression(InsertValueInst* C); 170 171 uint32_t lookup_or_add_call(CallInst* C); 172 public: 173 ValueTable() : nextValueNumber(1) { } 174 uint32_t lookup_or_add(Value *V); 175 uint32_t lookup(Value *V) const; 176 void add(Value *V, uint32_t num); 177 void clear(); 178 void erase(Value *v); 179 unsigned size(); 180 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 181 AliasAnalysis *getAliasAnalysis() const { return AA; } 182 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 183 void setDomTree(DominatorTree* D) { DT = D; } 184 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 185 void verifyRemoved(const Value *) const; 186 }; 187} 188 189namespace llvm { 190template <> struct DenseMapInfo<Expression> { 191 static inline Expression getEmptyKey() { 192 return Expression(Expression::EMPTY); 193 } 194 195 static inline Expression getTombstoneKey() { 196 return Expression(Expression::TOMBSTONE); 197 } 198 199 static unsigned getHashValue(const Expression e) { 200 unsigned hash = e.opcode; 201 202 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 203 (unsigned)((uintptr_t)e.type >> 9)); 204 205 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 206 E = e.varargs.end(); I != E; ++I) 207 hash = *I + hash * 37; 208 209 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 210 (unsigned)((uintptr_t)e.function >> 9)) + 211 hash * 37; 212 213 return hash; 214 } 215 static bool isEqual(const Expression &LHS, const Expression &RHS) { 216 return LHS == RHS; 217 } 218}; 219 220template <> 221struct isPodLike<Expression> { static const bool value = true; }; 222 223} 224 225//===----------------------------------------------------------------------===// 226// ValueTable Internal Functions 227//===----------------------------------------------------------------------===// 228 229Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 230 if (isa<ICmpInst>(C)) { 231 switch (C->getPredicate()) { 232 default: // THIS SHOULD NEVER HAPPEN 233 llvm_unreachable("Comparison with unknown predicate?"); 234 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 235 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 236 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 237 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 238 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 239 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 240 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 241 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 242 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 243 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 244 } 245 } else { 246 switch (C->getPredicate()) { 247 default: // THIS SHOULD NEVER HAPPEN 248 llvm_unreachable("Comparison with unknown predicate?"); 249 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 250 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 251 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 252 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 253 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 254 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 255 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 256 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 257 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 258 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 259 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 260 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 261 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 262 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 263 } 264 } 265} 266 267Expression ValueTable::create_expression(CallInst* C) { 268 Expression e; 269 270 e.type = C->getType(); 271 e.function = C->getCalledFunction(); 272 e.opcode = Expression::CALL; 273 274 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); 275 I != E; ++I) 276 e.varargs.push_back(lookup_or_add(*I)); 277 278 return e; 279} 280 281Expression ValueTable::create_expression(BinaryOperator* BO) { 282 Expression e; 283 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 284 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 285 e.function = 0; 286 e.type = BO->getType(); 287 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode()); 288 289 return e; 290} 291 292Expression ValueTable::create_expression(CmpInst* C) { 293 Expression e; 294 295 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 296 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 297 e.function = 0; 298 e.type = C->getType(); 299 e.opcode = getOpcode(C); 300 301 return e; 302} 303 304Expression ValueTable::create_expression(CastInst* C) { 305 Expression e; 306 307 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 308 e.function = 0; 309 e.type = C->getType(); 310 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode()); 311 312 return e; 313} 314 315Expression ValueTable::create_expression(ShuffleVectorInst* S) { 316 Expression e; 317 318 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 319 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 320 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 321 e.function = 0; 322 e.type = S->getType(); 323 e.opcode = Expression::SHUFFLE; 324 325 return e; 326} 327 328Expression ValueTable::create_expression(ExtractElementInst* E) { 329 Expression e; 330 331 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 332 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 333 e.function = 0; 334 e.type = E->getType(); 335 e.opcode = Expression::EXTRACT; 336 337 return e; 338} 339 340Expression ValueTable::create_expression(InsertElementInst* I) { 341 Expression e; 342 343 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 344 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 345 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 346 e.function = 0; 347 e.type = I->getType(); 348 e.opcode = Expression::INSERT; 349 350 return e; 351} 352 353Expression ValueTable::create_expression(SelectInst* I) { 354 Expression e; 355 356 e.varargs.push_back(lookup_or_add(I->getCondition())); 357 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 358 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 359 e.function = 0; 360 e.type = I->getType(); 361 e.opcode = Expression::SELECT; 362 363 return e; 364} 365 366Expression ValueTable::create_expression(GetElementPtrInst* G) { 367 Expression e; 368 369 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 370 e.function = 0; 371 e.type = G->getType(); 372 e.opcode = Expression::GEP; 373 374 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 375 I != E; ++I) 376 e.varargs.push_back(lookup_or_add(*I)); 377 378 return e; 379} 380 381Expression ValueTable::create_expression(ExtractValueInst* E) { 382 Expression e; 383 384 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 385 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 386 II != IE; ++II) 387 e.varargs.push_back(*II); 388 e.function = 0; 389 e.type = E->getType(); 390 e.opcode = Expression::EXTRACTVALUE; 391 392 return e; 393} 394 395Expression ValueTable::create_expression(InsertValueInst* E) { 396 Expression e; 397 398 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 399 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 400 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 401 II != IE; ++II) 402 e.varargs.push_back(*II); 403 e.function = 0; 404 e.type = E->getType(); 405 e.opcode = Expression::INSERTVALUE; 406 407 return e; 408} 409 410//===----------------------------------------------------------------------===// 411// ValueTable External Functions 412//===----------------------------------------------------------------------===// 413 414/// add - Insert a value into the table with a specified value number. 415void ValueTable::add(Value *V, uint32_t num) { 416 valueNumbering.insert(std::make_pair(V, num)); 417} 418 419uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 420 if (AA->doesNotAccessMemory(C)) { 421 Expression exp = create_expression(C); 422 uint32_t& e = expressionNumbering[exp]; 423 if (!e) e = nextValueNumber++; 424 valueNumbering[C] = e; 425 return e; 426 } else if (AA->onlyReadsMemory(C)) { 427 Expression exp = create_expression(C); 428 uint32_t& e = expressionNumbering[exp]; 429 if (!e) { 430 e = nextValueNumber++; 431 valueNumbering[C] = e; 432 return e; 433 } 434 if (!MD) { 435 e = nextValueNumber++; 436 valueNumbering[C] = e; 437 return e; 438 } 439 440 MemDepResult local_dep = MD->getDependency(C); 441 442 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 443 valueNumbering[C] = nextValueNumber; 444 return nextValueNumber++; 445 } 446 447 if (local_dep.isDef()) { 448 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 449 450 if (local_cdep->getNumOperands() != C->getNumOperands()) { 451 valueNumbering[C] = nextValueNumber; 452 return nextValueNumber++; 453 } 454 455 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 456 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 457 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i)); 458 if (c_vn != cd_vn) { 459 valueNumbering[C] = nextValueNumber; 460 return nextValueNumber++; 461 } 462 } 463 464 uint32_t v = lookup_or_add(local_cdep); 465 valueNumbering[C] = v; 466 return v; 467 } 468 469 // Non-local case. 470 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 471 MD->getNonLocalCallDependency(CallSite(C)); 472 // FIXME: call/call dependencies for readonly calls should return def, not 473 // clobber! Move the checking logic to MemDep! 474 CallInst* cdep = 0; 475 476 // Check to see if we have a single dominating call instruction that is 477 // identical to C. 478 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 479 const NonLocalDepEntry *I = &deps[i]; 480 // Ignore non-local dependencies. 481 if (I->getResult().isNonLocal()) 482 continue; 483 484 // We don't handle non-depedencies. If we already have a call, reject 485 // instruction dependencies. 486 if (I->getResult().isClobber() || cdep != 0) { 487 cdep = 0; 488 break; 489 } 490 491 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 492 // FIXME: All duplicated with non-local case. 493 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 494 cdep = NonLocalDepCall; 495 continue; 496 } 497 498 cdep = 0; 499 break; 500 } 501 502 if (!cdep) { 503 valueNumbering[C] = nextValueNumber; 504 return nextValueNumber++; 505 } 506 507 if (cdep->getNumOperands() != C->getNumOperands()) { 508 valueNumbering[C] = nextValueNumber; 509 return nextValueNumber++; 510 } 511 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 512 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 513 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i)); 514 if (c_vn != cd_vn) { 515 valueNumbering[C] = nextValueNumber; 516 return nextValueNumber++; 517 } 518 } 519 520 uint32_t v = lookup_or_add(cdep); 521 valueNumbering[C] = v; 522 return v; 523 524 } else { 525 valueNumbering[C] = nextValueNumber; 526 return nextValueNumber++; 527 } 528} 529 530/// lookup_or_add - Returns the value number for the specified value, assigning 531/// it a new number if it did not have one before. 532uint32_t ValueTable::lookup_or_add(Value *V) { 533 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 534 if (VI != valueNumbering.end()) 535 return VI->second; 536 537 if (!isa<Instruction>(V)) { 538 valueNumbering[V] = nextValueNumber; 539 return nextValueNumber++; 540 } 541 542 Instruction* I = cast<Instruction>(V); 543 Expression exp; 544 switch (I->getOpcode()) { 545 case Instruction::Call: 546 return lookup_or_add_call(cast<CallInst>(I)); 547 case Instruction::Add: 548 case Instruction::FAdd: 549 case Instruction::Sub: 550 case Instruction::FSub: 551 case Instruction::Mul: 552 case Instruction::FMul: 553 case Instruction::UDiv: 554 case Instruction::SDiv: 555 case Instruction::FDiv: 556 case Instruction::URem: 557 case Instruction::SRem: 558 case Instruction::FRem: 559 case Instruction::Shl: 560 case Instruction::LShr: 561 case Instruction::AShr: 562 case Instruction::And: 563 case Instruction::Or : 564 case Instruction::Xor: 565 exp = create_expression(cast<BinaryOperator>(I)); 566 break; 567 case Instruction::ICmp: 568 case Instruction::FCmp: 569 exp = create_expression(cast<CmpInst>(I)); 570 break; 571 case Instruction::Trunc: 572 case Instruction::ZExt: 573 case Instruction::SExt: 574 case Instruction::FPToUI: 575 case Instruction::FPToSI: 576 case Instruction::UIToFP: 577 case Instruction::SIToFP: 578 case Instruction::FPTrunc: 579 case Instruction::FPExt: 580 case Instruction::PtrToInt: 581 case Instruction::IntToPtr: 582 case Instruction::BitCast: 583 exp = create_expression(cast<CastInst>(I)); 584 break; 585 case Instruction::Select: 586 exp = create_expression(cast<SelectInst>(I)); 587 break; 588 case Instruction::ExtractElement: 589 exp = create_expression(cast<ExtractElementInst>(I)); 590 break; 591 case Instruction::InsertElement: 592 exp = create_expression(cast<InsertElementInst>(I)); 593 break; 594 case Instruction::ShuffleVector: 595 exp = create_expression(cast<ShuffleVectorInst>(I)); 596 break; 597 case Instruction::ExtractValue: 598 exp = create_expression(cast<ExtractValueInst>(I)); 599 break; 600 case Instruction::InsertValue: 601 exp = create_expression(cast<InsertValueInst>(I)); 602 break; 603 case Instruction::GetElementPtr: 604 exp = create_expression(cast<GetElementPtrInst>(I)); 605 break; 606 default: 607 valueNumbering[V] = nextValueNumber; 608 return nextValueNumber++; 609 } 610 611 uint32_t& e = expressionNumbering[exp]; 612 if (!e) e = nextValueNumber++; 613 valueNumbering[V] = e; 614 return e; 615} 616 617/// lookup - Returns the value number of the specified value. Fails if 618/// the value has not yet been numbered. 619uint32_t ValueTable::lookup(Value *V) const { 620 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 621 assert(VI != valueNumbering.end() && "Value not numbered?"); 622 return VI->second; 623} 624 625/// clear - Remove all entries from the ValueTable 626void ValueTable::clear() { 627 valueNumbering.clear(); 628 expressionNumbering.clear(); 629 nextValueNumber = 1; 630} 631 632/// erase - Remove a value from the value numbering 633void ValueTable::erase(Value *V) { 634 valueNumbering.erase(V); 635} 636 637/// verifyRemoved - Verify that the value is removed from all internal data 638/// structures. 639void ValueTable::verifyRemoved(const Value *V) const { 640 for (DenseMap<Value*, uint32_t>::const_iterator 641 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 642 assert(I->first != V && "Inst still occurs in value numbering map!"); 643 } 644} 645 646//===----------------------------------------------------------------------===// 647// GVN Pass 648//===----------------------------------------------------------------------===// 649 650namespace { 651 struct ValueNumberScope { 652 ValueNumberScope* parent; 653 DenseMap<uint32_t, Value*> table; 654 655 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 656 }; 657} 658 659namespace { 660 661 class GVN : public FunctionPass { 662 bool runOnFunction(Function &F); 663 public: 664 static char ID; // Pass identification, replacement for typeid 665 explicit GVN(bool nopre = false, bool noloads = false) 666 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { } 667 668 private: 669 bool NoPRE; 670 bool NoLoads; 671 MemoryDependenceAnalysis *MD; 672 DominatorTree *DT; 673 674 ValueTable VN; 675 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 676 677 // List of critical edges to be split between iterations. 678 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 679 680 // This transformation requires dominator postdominator info 681 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 682 AU.addRequired<DominatorTree>(); 683 if (!NoLoads) 684 AU.addRequired<MemoryDependenceAnalysis>(); 685 AU.addRequired<AliasAnalysis>(); 686 687 AU.addPreserved<DominatorTree>(); 688 AU.addPreserved<AliasAnalysis>(); 689 } 690 691 // Helper fuctions 692 // FIXME: eliminate or document these better 693 bool processLoad(LoadInst* L, 694 SmallVectorImpl<Instruction*> &toErase); 695 bool processInstruction(Instruction *I, 696 SmallVectorImpl<Instruction*> &toErase); 697 bool processNonLocalLoad(LoadInst* L, 698 SmallVectorImpl<Instruction*> &toErase); 699 bool processBlock(BasicBlock *BB); 700 void dump(DenseMap<uint32_t, Value*>& d); 701 bool iterateOnFunction(Function &F); 702 Value *CollapsePhi(PHINode* p); 703 bool performPRE(Function& F); 704 Value *lookupNumber(BasicBlock *BB, uint32_t num); 705 void cleanupGlobalSets(); 706 void verifyRemoved(const Instruction *I) const; 707 bool splitCriticalEdges(); 708 }; 709 710 char GVN::ID = 0; 711} 712 713// createGVNPass - The public interface to this file... 714FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) { 715 return new GVN(NoPRE, NoLoads); 716} 717 718static RegisterPass<GVN> X("gvn", 719 "Global Value Numbering"); 720 721void GVN::dump(DenseMap<uint32_t, Value*>& d) { 722 errs() << "{\n"; 723 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 724 E = d.end(); I != E; ++I) { 725 errs() << I->first << "\n"; 726 I->second->dump(); 727 } 728 errs() << "}\n"; 729} 730 731static bool isSafeReplacement(PHINode* p, Instruction *inst) { 732 if (!isa<PHINode>(inst)) 733 return true; 734 735 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 736 UI != E; ++UI) 737 if (PHINode* use_phi = dyn_cast<PHINode>(UI)) 738 if (use_phi->getParent() == inst->getParent()) 739 return false; 740 741 return true; 742} 743 744Value *GVN::CollapsePhi(PHINode *PN) { 745 Value *ConstVal = PN->hasConstantValue(DT); 746 if (!ConstVal) return 0; 747 748 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 749 if (!Inst) 750 return ConstVal; 751 752 if (DT->dominates(Inst, PN)) 753 if (isSafeReplacement(PN, Inst)) 754 return Inst; 755 return 0; 756} 757 758/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 759/// we're analyzing is fully available in the specified block. As we go, keep 760/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 761/// map is actually a tri-state map with the following values: 762/// 0) we know the block *is not* fully available. 763/// 1) we know the block *is* fully available. 764/// 2) we do not know whether the block is fully available or not, but we are 765/// currently speculating that it will be. 766/// 3) we are speculating for this block and have used that to speculate for 767/// other blocks. 768static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 769 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 770 // Optimistically assume that the block is fully available and check to see 771 // if we already know about this block in one lookup. 772 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 773 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 774 775 // If the entry already existed for this block, return the precomputed value. 776 if (!IV.second) { 777 // If this is a speculative "available" value, mark it as being used for 778 // speculation of other blocks. 779 if (IV.first->second == 2) 780 IV.first->second = 3; 781 return IV.first->second != 0; 782 } 783 784 // Otherwise, see if it is fully available in all predecessors. 785 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 786 787 // If this block has no predecessors, it isn't live-in here. 788 if (PI == PE) 789 goto SpeculationFailure; 790 791 for (; PI != PE; ++PI) 792 // If the value isn't fully available in one of our predecessors, then it 793 // isn't fully available in this block either. Undo our previous 794 // optimistic assumption and bail out. 795 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 796 goto SpeculationFailure; 797 798 return true; 799 800// SpeculationFailure - If we get here, we found out that this is not, after 801// all, a fully-available block. We have a problem if we speculated on this and 802// used the speculation to mark other blocks as available. 803SpeculationFailure: 804 char &BBVal = FullyAvailableBlocks[BB]; 805 806 // If we didn't speculate on this, just return with it set to false. 807 if (BBVal == 2) { 808 BBVal = 0; 809 return false; 810 } 811 812 // If we did speculate on this value, we could have blocks set to 1 that are 813 // incorrect. Walk the (transitive) successors of this block and mark them as 814 // 0 if set to one. 815 SmallVector<BasicBlock*, 32> BBWorklist; 816 BBWorklist.push_back(BB); 817 818 do { 819 BasicBlock *Entry = BBWorklist.pop_back_val(); 820 // Note that this sets blocks to 0 (unavailable) if they happen to not 821 // already be in FullyAvailableBlocks. This is safe. 822 char &EntryVal = FullyAvailableBlocks[Entry]; 823 if (EntryVal == 0) continue; // Already unavailable. 824 825 // Mark as unavailable. 826 EntryVal = 0; 827 828 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 829 BBWorklist.push_back(*I); 830 } while (!BBWorklist.empty()); 831 832 return false; 833} 834 835 836/// CanCoerceMustAliasedValueToLoad - Return true if 837/// CoerceAvailableValueToLoadType will succeed. 838static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 839 const Type *LoadTy, 840 const TargetData &TD) { 841 // If the loaded or stored value is an first class array or struct, don't try 842 // to transform them. We need to be able to bitcast to integer. 843 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 844 StoredVal->getType()->isStructTy() || 845 StoredVal->getType()->isArrayTy()) 846 return false; 847 848 // The store has to be at least as big as the load. 849 if (TD.getTypeSizeInBits(StoredVal->getType()) < 850 TD.getTypeSizeInBits(LoadTy)) 851 return false; 852 853 return true; 854} 855 856 857/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 858/// then a load from a must-aliased pointer of a different type, try to coerce 859/// the stored value. LoadedTy is the type of the load we want to replace and 860/// InsertPt is the place to insert new instructions. 861/// 862/// If we can't do it, return null. 863static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 864 const Type *LoadedTy, 865 Instruction *InsertPt, 866 const TargetData &TD) { 867 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 868 return 0; 869 870 const Type *StoredValTy = StoredVal->getType(); 871 872 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); 873 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 874 875 // If the store and reload are the same size, we can always reuse it. 876 if (StoreSize == LoadSize) { 877 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) { 878 // Pointer to Pointer -> use bitcast. 879 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 880 } 881 882 // Convert source pointers to integers, which can be bitcast. 883 if (StoredValTy->isPointerTy()) { 884 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 885 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 886 } 887 888 const Type *TypeToCastTo = LoadedTy; 889 if (TypeToCastTo->isPointerTy()) 890 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 891 892 if (StoredValTy != TypeToCastTo) 893 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 894 895 // Cast to pointer if the load needs a pointer type. 896 if (LoadedTy->isPointerTy()) 897 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 898 899 return StoredVal; 900 } 901 902 // If the loaded value is smaller than the available value, then we can 903 // extract out a piece from it. If the available value is too small, then we 904 // can't do anything. 905 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 906 907 // Convert source pointers to integers, which can be manipulated. 908 if (StoredValTy->isPointerTy()) { 909 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 910 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 911 } 912 913 // Convert vectors and fp to integer, which can be manipulated. 914 if (!StoredValTy->isIntegerTy()) { 915 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 916 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 917 } 918 919 // If this is a big-endian system, we need to shift the value down to the low 920 // bits so that a truncate will work. 921 if (TD.isBigEndian()) { 922 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 923 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 924 } 925 926 // Truncate the integer to the right size now. 927 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 928 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 929 930 if (LoadedTy == NewIntTy) 931 return StoredVal; 932 933 // If the result is a pointer, inttoptr. 934 if (LoadedTy->isPointerTy()) 935 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 936 937 // Otherwise, bitcast. 938 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 939} 940 941/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 942/// be expressed as a base pointer plus a constant offset. Return the base and 943/// offset to the caller. 944static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 945 const TargetData &TD) { 946 Operator *PtrOp = dyn_cast<Operator>(Ptr); 947 if (PtrOp == 0) return Ptr; 948 949 // Just look through bitcasts. 950 if (PtrOp->getOpcode() == Instruction::BitCast) 951 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 952 953 // If this is a GEP with constant indices, we can look through it. 954 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 955 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 956 957 gep_type_iterator GTI = gep_type_begin(GEP); 958 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 959 ++I, ++GTI) { 960 ConstantInt *OpC = cast<ConstantInt>(*I); 961 if (OpC->isZero()) continue; 962 963 // Handle a struct and array indices which add their offset to the pointer. 964 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 965 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 966 } else { 967 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 968 Offset += OpC->getSExtValue()*Size; 969 } 970 } 971 972 // Re-sign extend from the pointer size if needed to get overflow edge cases 973 // right. 974 unsigned PtrSize = TD.getPointerSizeInBits(); 975 if (PtrSize < 64) 976 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 977 978 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 979} 980 981 982/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 983/// memdep query of a load that ends up being a clobbering memory write (store, 984/// memset, memcpy, memmove). This means that the write *may* provide bits used 985/// by the load but we can't be sure because the pointers don't mustalias. 986/// 987/// Check this case to see if there is anything more we can do before we give 988/// up. This returns -1 if we have to give up, or a byte number in the stored 989/// value of the piece that feeds the load. 990static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 991 Value *WritePtr, 992 uint64_t WriteSizeInBits, 993 const TargetData &TD) { 994 // If the loaded or stored value is an first class array or struct, don't try 995 // to transform them. We need to be able to bitcast to integer. 996 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 997 return -1; 998 999 int64_t StoreOffset = 0, LoadOffset = 0; 1000 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1001 Value *LoadBase = 1002 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1003 if (StoreBase != LoadBase) 1004 return -1; 1005 1006 // If the load and store are to the exact same address, they should have been 1007 // a must alias. AA must have gotten confused. 1008 // FIXME: Study to see if/when this happens. 1009 if (LoadOffset == StoreOffset) { 1010#if 0 1011 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1012 << "Base = " << *StoreBase << "\n" 1013 << "Store Ptr = " << *WritePtr << "\n" 1014 << "Store Offs = " << StoreOffset << "\n" 1015 << "Load Ptr = " << *LoadPtr << "\n"; 1016 abort(); 1017#endif 1018 return -1; 1019 } 1020 1021 // If the load and store don't overlap at all, the store doesn't provide 1022 // anything to the load. In this case, they really don't alias at all, AA 1023 // must have gotten confused. 1024 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1025 // remove this check, as it is duplicated with what we have below. 1026 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1027 1028 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1029 return -1; 1030 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1031 LoadSize >>= 3; 1032 1033 1034 bool isAAFailure = false; 1035 if (StoreOffset < LoadOffset) { 1036 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1037 } else { 1038 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1039 } 1040 if (isAAFailure) { 1041#if 0 1042 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1043 << "Base = " << *StoreBase << "\n" 1044 << "Store Ptr = " << *WritePtr << "\n" 1045 << "Store Offs = " << StoreOffset << "\n" 1046 << "Load Ptr = " << *LoadPtr << "\n"; 1047 abort(); 1048#endif 1049 return -1; 1050 } 1051 1052 // If the Load isn't completely contained within the stored bits, we don't 1053 // have all the bits to feed it. We could do something crazy in the future 1054 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1055 // valuable. 1056 if (StoreOffset > LoadOffset || 1057 StoreOffset+StoreSize < LoadOffset+LoadSize) 1058 return -1; 1059 1060 // Okay, we can do this transformation. Return the number of bytes into the 1061 // store that the load is. 1062 return LoadOffset-StoreOffset; 1063} 1064 1065/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1066/// memdep query of a load that ends up being a clobbering store. 1067static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1068 StoreInst *DepSI, 1069 const TargetData &TD) { 1070 // Cannot handle reading from store of first-class aggregate yet. 1071 if (DepSI->getOperand(0)->getType()->isStructTy() || 1072 DepSI->getOperand(0)->getType()->isArrayTy()) 1073 return -1; 1074 1075 Value *StorePtr = DepSI->getPointerOperand(); 1076 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1077 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1078 StorePtr, StoreSize, TD); 1079} 1080 1081static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1082 MemIntrinsic *MI, 1083 const TargetData &TD) { 1084 // If the mem operation is a non-constant size, we can't handle it. 1085 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1086 if (SizeCst == 0) return -1; 1087 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1088 1089 // If this is memset, we just need to see if the offset is valid in the size 1090 // of the memset.. 1091 if (MI->getIntrinsicID() == Intrinsic::memset) 1092 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1093 MemSizeInBits, TD); 1094 1095 // If we have a memcpy/memmove, the only case we can handle is if this is a 1096 // copy from constant memory. In that case, we can read directly from the 1097 // constant memory. 1098 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1099 1100 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1101 if (Src == 0) return -1; 1102 1103 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1104 if (GV == 0 || !GV->isConstant()) return -1; 1105 1106 // See if the access is within the bounds of the transfer. 1107 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1108 MI->getDest(), MemSizeInBits, TD); 1109 if (Offset == -1) 1110 return Offset; 1111 1112 // Otherwise, see if we can constant fold a load from the constant with the 1113 // offset applied as appropriate. 1114 Src = ConstantExpr::getBitCast(Src, 1115 llvm::Type::getInt8PtrTy(Src->getContext())); 1116 Constant *OffsetCst = 1117 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1118 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1119 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1120 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1121 return Offset; 1122 return -1; 1123} 1124 1125 1126/// GetStoreValueForLoad - This function is called when we have a 1127/// memdep query of a load that ends up being a clobbering store. This means 1128/// that the store *may* provide bits used by the load but we can't be sure 1129/// because the pointers don't mustalias. Check this case to see if there is 1130/// anything more we can do before we give up. 1131static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1132 const Type *LoadTy, 1133 Instruction *InsertPt, const TargetData &TD){ 1134 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1135 1136 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8; 1137 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1138 1139 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1140 1141 // Compute which bits of the stored value are being used by the load. Convert 1142 // to an integer type to start with. 1143 if (SrcVal->getType()->isPointerTy()) 1144 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1145 if (!SrcVal->getType()->isIntegerTy()) 1146 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1147 "tmp"); 1148 1149 // Shift the bits to the least significant depending on endianness. 1150 unsigned ShiftAmt; 1151 if (TD.isLittleEndian()) 1152 ShiftAmt = Offset*8; 1153 else 1154 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1155 1156 if (ShiftAmt) 1157 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1158 1159 if (LoadSize != StoreSize) 1160 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1161 "tmp"); 1162 1163 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1164} 1165 1166/// GetMemInstValueForLoad - This function is called when we have a 1167/// memdep query of a load that ends up being a clobbering mem intrinsic. 1168static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1169 const Type *LoadTy, Instruction *InsertPt, 1170 const TargetData &TD){ 1171 LLVMContext &Ctx = LoadTy->getContext(); 1172 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1173 1174 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1175 1176 // We know that this method is only called when the mem transfer fully 1177 // provides the bits for the load. 1178 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1179 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1180 // independently of what the offset is. 1181 Value *Val = MSI->getValue(); 1182 if (LoadSize != 1) 1183 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1184 1185 Value *OneElt = Val; 1186 1187 // Splat the value out to the right number of bits. 1188 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1189 // If we can double the number of bytes set, do it. 1190 if (NumBytesSet*2 <= LoadSize) { 1191 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1192 Val = Builder.CreateOr(Val, ShVal); 1193 NumBytesSet <<= 1; 1194 continue; 1195 } 1196 1197 // Otherwise insert one byte at a time. 1198 Value *ShVal = Builder.CreateShl(Val, 1*8); 1199 Val = Builder.CreateOr(OneElt, ShVal); 1200 ++NumBytesSet; 1201 } 1202 1203 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1204 } 1205 1206 // Otherwise, this is a memcpy/memmove from a constant global. 1207 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1208 Constant *Src = cast<Constant>(MTI->getSource()); 1209 1210 // Otherwise, see if we can constant fold a load from the constant with the 1211 // offset applied as appropriate. 1212 Src = ConstantExpr::getBitCast(Src, 1213 llvm::Type::getInt8PtrTy(Src->getContext())); 1214 Constant *OffsetCst = 1215 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1216 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1217 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1218 return ConstantFoldLoadFromConstPtr(Src, &TD); 1219} 1220 1221 1222 1223struct AvailableValueInBlock { 1224 /// BB - The basic block in question. 1225 BasicBlock *BB; 1226 enum ValType { 1227 SimpleVal, // A simple offsetted value that is accessed. 1228 MemIntrin // A memory intrinsic which is loaded from. 1229 }; 1230 1231 /// V - The value that is live out of the block. 1232 PointerIntPair<Value *, 1, ValType> Val; 1233 1234 /// Offset - The byte offset in Val that is interesting for the load query. 1235 unsigned Offset; 1236 1237 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1238 unsigned Offset = 0) { 1239 AvailableValueInBlock Res; 1240 Res.BB = BB; 1241 Res.Val.setPointer(V); 1242 Res.Val.setInt(SimpleVal); 1243 Res.Offset = Offset; 1244 return Res; 1245 } 1246 1247 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1248 unsigned Offset = 0) { 1249 AvailableValueInBlock Res; 1250 Res.BB = BB; 1251 Res.Val.setPointer(MI); 1252 Res.Val.setInt(MemIntrin); 1253 Res.Offset = Offset; 1254 return Res; 1255 } 1256 1257 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1258 Value *getSimpleValue() const { 1259 assert(isSimpleValue() && "Wrong accessor"); 1260 return Val.getPointer(); 1261 } 1262 1263 MemIntrinsic *getMemIntrinValue() const { 1264 assert(!isSimpleValue() && "Wrong accessor"); 1265 return cast<MemIntrinsic>(Val.getPointer()); 1266 } 1267 1268 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1269 /// defined here to the specified type. This handles various coercion cases. 1270 Value *MaterializeAdjustedValue(const Type *LoadTy, 1271 const TargetData *TD) const { 1272 Value *Res; 1273 if (isSimpleValue()) { 1274 Res = getSimpleValue(); 1275 if (Res->getType() != LoadTy) { 1276 assert(TD && "Need target data to handle type mismatch case"); 1277 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1278 *TD); 1279 1280 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1281 << *getSimpleValue() << '\n' 1282 << *Res << '\n' << "\n\n\n"); 1283 } 1284 } else { 1285 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1286 LoadTy, BB->getTerminator(), *TD); 1287 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1288 << " " << *getMemIntrinValue() << '\n' 1289 << *Res << '\n' << "\n\n\n"); 1290 } 1291 return Res; 1292 } 1293}; 1294 1295/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1296/// construct SSA form, allowing us to eliminate LI. This returns the value 1297/// that should be used at LI's definition site. 1298static Value *ConstructSSAForLoadSet(LoadInst *LI, 1299 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1300 const TargetData *TD, 1301 const DominatorTree &DT, 1302 AliasAnalysis *AA) { 1303 // Check for the fully redundant, dominating load case. In this case, we can 1304 // just use the dominating value directly. 1305 if (ValuesPerBlock.size() == 1 && 1306 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) 1307 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); 1308 1309 // Otherwise, we have to construct SSA form. 1310 SmallVector<PHINode*, 8> NewPHIs; 1311 SSAUpdater SSAUpdate(&NewPHIs); 1312 SSAUpdate.Initialize(LI); 1313 1314 const Type *LoadTy = LI->getType(); 1315 1316 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1317 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1318 BasicBlock *BB = AV.BB; 1319 1320 if (SSAUpdate.HasValueForBlock(BB)) 1321 continue; 1322 1323 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); 1324 } 1325 1326 // Perform PHI construction. 1327 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1328 1329 // If new PHI nodes were created, notify alias analysis. 1330 if (V->getType()->isPointerTy()) 1331 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1332 AA->copyValue(LI, NewPHIs[i]); 1333 1334 return V; 1335} 1336 1337static bool isLifetimeStart(Instruction *Inst) { 1338 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1339 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1340 return false; 1341} 1342 1343/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1344/// non-local by performing PHI construction. 1345bool GVN::processNonLocalLoad(LoadInst *LI, 1346 SmallVectorImpl<Instruction*> &toErase) { 1347 // Find the non-local dependencies of the load. 1348 SmallVector<NonLocalDepResult, 64> Deps; 1349 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1350 Deps); 1351 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1352 // << Deps.size() << *LI << '\n'); 1353 1354 // If we had to process more than one hundred blocks to find the 1355 // dependencies, this load isn't worth worrying about. Optimizing 1356 // it will be too expensive. 1357 if (Deps.size() > 100) 1358 return false; 1359 1360 // If we had a phi translation failure, we'll have a single entry which is a 1361 // clobber in the current block. Reject this early. 1362 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1363 DEBUG( 1364 dbgs() << "GVN: non-local load "; 1365 WriteAsOperand(dbgs(), LI); 1366 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1367 ); 1368 return false; 1369 } 1370 1371 // Filter out useless results (non-locals, etc). Keep track of the blocks 1372 // where we have a value available in repl, also keep track of whether we see 1373 // dependencies that produce an unknown value for the load (such as a call 1374 // that could potentially clobber the load). 1375 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1376 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1377 1378 const TargetData *TD = 0; 1379 1380 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1381 BasicBlock *DepBB = Deps[i].getBB(); 1382 MemDepResult DepInfo = Deps[i].getResult(); 1383 1384 if (DepInfo.isClobber()) { 1385 // The address being loaded in this non-local block may not be the same as 1386 // the pointer operand of the load if PHI translation occurs. Make sure 1387 // to consider the right address. 1388 Value *Address = Deps[i].getAddress(); 1389 1390 // If the dependence is to a store that writes to a superset of the bits 1391 // read by the load, we can extract the bits we need for the load from the 1392 // stored value. 1393 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1394 if (TD == 0) 1395 TD = getAnalysisIfAvailable<TargetData>(); 1396 if (TD && Address) { 1397 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1398 DepSI, *TD); 1399 if (Offset != -1) { 1400 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1401 DepSI->getOperand(0), 1402 Offset)); 1403 continue; 1404 } 1405 } 1406 } 1407 1408 // If the clobbering value is a memset/memcpy/memmove, see if we can 1409 // forward a value on from it. 1410 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1411 if (TD == 0) 1412 TD = getAnalysisIfAvailable<TargetData>(); 1413 if (TD && Address) { 1414 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1415 DepMI, *TD); 1416 if (Offset != -1) { 1417 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1418 Offset)); 1419 continue; 1420 } 1421 } 1422 } 1423 1424 UnavailableBlocks.push_back(DepBB); 1425 continue; 1426 } 1427 1428 Instruction *DepInst = DepInfo.getInst(); 1429 1430 // Loading the allocation -> undef. 1431 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1432 // Loading immediately after lifetime begin -> undef. 1433 isLifetimeStart(DepInst)) { 1434 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1435 UndefValue::get(LI->getType()))); 1436 continue; 1437 } 1438 1439 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1440 // Reject loads and stores that are to the same address but are of 1441 // different types if we have to. 1442 if (S->getOperand(0)->getType() != LI->getType()) { 1443 if (TD == 0) 1444 TD = getAnalysisIfAvailable<TargetData>(); 1445 1446 // If the stored value is larger or equal to the loaded value, we can 1447 // reuse it. 1448 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1449 LI->getType(), *TD)) { 1450 UnavailableBlocks.push_back(DepBB); 1451 continue; 1452 } 1453 } 1454 1455 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1456 S->getOperand(0))); 1457 continue; 1458 } 1459 1460 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1461 // If the types mismatch and we can't handle it, reject reuse of the load. 1462 if (LD->getType() != LI->getType()) { 1463 if (TD == 0) 1464 TD = getAnalysisIfAvailable<TargetData>(); 1465 1466 // If the stored value is larger or equal to the loaded value, we can 1467 // reuse it. 1468 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1469 UnavailableBlocks.push_back(DepBB); 1470 continue; 1471 } 1472 } 1473 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1474 continue; 1475 } 1476 1477 UnavailableBlocks.push_back(DepBB); 1478 continue; 1479 } 1480 1481 // If we have no predecessors that produce a known value for this load, exit 1482 // early. 1483 if (ValuesPerBlock.empty()) return false; 1484 1485 // If all of the instructions we depend on produce a known value for this 1486 // load, then it is fully redundant and we can use PHI insertion to compute 1487 // its value. Insert PHIs and remove the fully redundant value now. 1488 if (UnavailableBlocks.empty()) { 1489 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1490 1491 // Perform PHI construction. 1492 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1493 VN.getAliasAnalysis()); 1494 LI->replaceAllUsesWith(V); 1495 1496 if (isa<PHINode>(V)) 1497 V->takeName(LI); 1498 if (V->getType()->isPointerTy()) 1499 MD->invalidateCachedPointerInfo(V); 1500 VN.erase(LI); 1501 toErase.push_back(LI); 1502 NumGVNLoad++; 1503 return true; 1504 } 1505 1506 if (!EnablePRE || !EnableLoadPRE) 1507 return false; 1508 1509 // Okay, we have *some* definitions of the value. This means that the value 1510 // is available in some of our (transitive) predecessors. Lets think about 1511 // doing PRE of this load. This will involve inserting a new load into the 1512 // predecessor when it's not available. We could do this in general, but 1513 // prefer to not increase code size. As such, we only do this when we know 1514 // that we only have to insert *one* load (which means we're basically moving 1515 // the load, not inserting a new one). 1516 1517 SmallPtrSet<BasicBlock *, 4> Blockers; 1518 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1519 Blockers.insert(UnavailableBlocks[i]); 1520 1521 // Lets find first basic block with more than one predecessor. Walk backwards 1522 // through predecessors if needed. 1523 BasicBlock *LoadBB = LI->getParent(); 1524 BasicBlock *TmpBB = LoadBB; 1525 1526 bool isSinglePred = false; 1527 bool allSingleSucc = true; 1528 while (TmpBB->getSinglePredecessor()) { 1529 isSinglePred = true; 1530 TmpBB = TmpBB->getSinglePredecessor(); 1531 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1532 return false; 1533 if (Blockers.count(TmpBB)) 1534 return false; 1535 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1536 allSingleSucc = false; 1537 } 1538 1539 assert(TmpBB); 1540 LoadBB = TmpBB; 1541 1542 // If we have a repl set with LI itself in it, this means we have a loop where 1543 // at least one of the values is LI. Since this means that we won't be able 1544 // to eliminate LI even if we insert uses in the other predecessors, we will 1545 // end up increasing code size. Reject this by scanning for LI. 1546 if (!EnableFullLoadPRE) { 1547 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1548 if (ValuesPerBlock[i].isSimpleValue() && 1549 ValuesPerBlock[i].getSimpleValue() == LI) 1550 return false; 1551 } 1552 1553 // FIXME: It is extremely unclear what this loop is doing, other than 1554 // artificially restricting loadpre. 1555 if (isSinglePred) { 1556 bool isHot = false; 1557 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1558 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1559 if (AV.isSimpleValue()) 1560 // "Hot" Instruction is in some loop (because it dominates its dep. 1561 // instruction). 1562 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1563 if (DT->dominates(LI, I)) { 1564 isHot = true; 1565 break; 1566 } 1567 } 1568 1569 // We are interested only in "hot" instructions. We don't want to do any 1570 // mis-optimizations here. 1571 if (!isHot) 1572 return false; 1573 } 1574 1575 // Check to see how many predecessors have the loaded value fully 1576 // available. 1577 DenseMap<BasicBlock*, Value*> PredLoads; 1578 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1579 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1580 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1581 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1582 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1583 1584 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1585 PI != E; ++PI) { 1586 BasicBlock *Pred = *PI; 1587 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1588 continue; 1589 } 1590 PredLoads[Pred] = 0; 1591 1592 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1593 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1594 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1595 << Pred->getName() << "': " << *LI << '\n'); 1596 return false; 1597 } 1598 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1599 toSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1600 return false; 1601 } 1602 } 1603 1604 // Decide whether PRE is profitable for this load. 1605 unsigned NumUnavailablePreds = PredLoads.size(); 1606 assert(NumUnavailablePreds != 0 && 1607 "Fully available value should be eliminated above!"); 1608 if (!EnableFullLoadPRE) { 1609 // If this load is unavailable in multiple predecessors, reject it. 1610 // FIXME: If we could restructure the CFG, we could make a common pred with 1611 // all the preds that don't have an available LI and insert a new load into 1612 // that one block. 1613 if (NumUnavailablePreds != 1) 1614 return false; 1615 } 1616 1617 // Check if the load can safely be moved to all the unavailable predecessors. 1618 bool CanDoPRE = true; 1619 SmallVector<Instruction*, 8> NewInsts; 1620 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1621 E = PredLoads.end(); I != E; ++I) { 1622 BasicBlock *UnavailablePred = I->first; 1623 1624 // Do PHI translation to get its value in the predecessor if necessary. The 1625 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1626 1627 // If all preds have a single successor, then we know it is safe to insert 1628 // the load on the pred (?!?), so we can insert code to materialize the 1629 // pointer if it is not available. 1630 PHITransAddr Address(LI->getOperand(0), TD); 1631 Value *LoadPtr = 0; 1632 if (allSingleSucc) { 1633 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1634 *DT, NewInsts); 1635 } else { 1636 Address.PHITranslateValue(LoadBB, UnavailablePred); 1637 LoadPtr = Address.getAddr(); 1638 1639 // Make sure the value is live in the predecessor. 1640 if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr)) 1641 if (!DT->dominates(Inst->getParent(), UnavailablePred)) 1642 LoadPtr = 0; 1643 } 1644 1645 // If we couldn't find or insert a computation of this phi translated value, 1646 // we fail PRE. 1647 if (LoadPtr == 0) { 1648 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1649 << *LI->getOperand(0) << "\n"); 1650 CanDoPRE = false; 1651 break; 1652 } 1653 1654 // Make sure it is valid to move this load here. We have to watch out for: 1655 // @1 = getelementptr (i8* p, ... 1656 // test p and branch if == 0 1657 // load @1 1658 // It is valid to have the getelementptr before the test, even if p can be 0, 1659 // as getelementptr only does address arithmetic. 1660 // If we are not pushing the value through any multiple-successor blocks 1661 // we do not have this case. Otherwise, check that the load is safe to 1662 // put anywhere; this can be improved, but should be conservatively safe. 1663 if (!allSingleSucc && 1664 // FIXME: REEVALUTE THIS. 1665 !isSafeToLoadUnconditionally(LoadPtr, 1666 UnavailablePred->getTerminator(), 1667 LI->getAlignment(), TD)) { 1668 CanDoPRE = false; 1669 break; 1670 } 1671 1672 I->second = LoadPtr; 1673 } 1674 1675 if (!CanDoPRE) { 1676 while (!NewInsts.empty()) 1677 NewInsts.pop_back_val()->eraseFromParent(); 1678 return false; 1679 } 1680 1681 // Okay, we can eliminate this load by inserting a reload in the predecessor 1682 // and using PHI construction to get the value in the other predecessors, do 1683 // it. 1684 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1685 DEBUG(if (!NewInsts.empty()) 1686 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1687 << *NewInsts.back() << '\n'); 1688 1689 // Assign value numbers to the new instructions. 1690 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1691 // FIXME: We really _ought_ to insert these value numbers into their 1692 // parent's availability map. However, in doing so, we risk getting into 1693 // ordering issues. If a block hasn't been processed yet, we would be 1694 // marking a value as AVAIL-IN, which isn't what we intend. 1695 VN.lookup_or_add(NewInsts[i]); 1696 } 1697 1698 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1699 E = PredLoads.end(); I != E; ++I) { 1700 BasicBlock *UnavailablePred = I->first; 1701 Value *LoadPtr = I->second; 1702 1703 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1704 LI->getAlignment(), 1705 UnavailablePred->getTerminator()); 1706 1707 // Add the newly created load. 1708 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1709 NewLoad)); 1710 } 1711 1712 // Perform PHI construction. 1713 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1714 VN.getAliasAnalysis()); 1715 LI->replaceAllUsesWith(V); 1716 if (isa<PHINode>(V)) 1717 V->takeName(LI); 1718 if (V->getType()->isPointerTy()) 1719 MD->invalidateCachedPointerInfo(V); 1720 VN.erase(LI); 1721 toErase.push_back(LI); 1722 NumPRELoad++; 1723 return true; 1724} 1725 1726/// processLoad - Attempt to eliminate a load, first by eliminating it 1727/// locally, and then attempting non-local elimination if that fails. 1728bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1729 if (!MD) 1730 return false; 1731 1732 if (L->isVolatile()) 1733 return false; 1734 1735 // ... to a pointer that has been loaded from before... 1736 MemDepResult Dep = MD->getDependency(L); 1737 1738 // If the value isn't available, don't do anything! 1739 if (Dep.isClobber()) { 1740 // Check to see if we have something like this: 1741 // store i32 123, i32* %P 1742 // %A = bitcast i32* %P to i8* 1743 // %B = gep i8* %A, i32 1 1744 // %C = load i8* %B 1745 // 1746 // We could do that by recognizing if the clobber instructions are obviously 1747 // a common base + constant offset, and if the previous store (or memset) 1748 // completely covers this load. This sort of thing can happen in bitfield 1749 // access code. 1750 Value *AvailVal = 0; 1751 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1752 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1753 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1754 L->getPointerOperand(), 1755 DepSI, *TD); 1756 if (Offset != -1) 1757 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1758 L->getType(), L, *TD); 1759 } 1760 1761 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1762 // a value on from it. 1763 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1764 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1765 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1766 L->getPointerOperand(), 1767 DepMI, *TD); 1768 if (Offset != -1) 1769 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1770 } 1771 } 1772 1773 if (AvailVal) { 1774 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1775 << *AvailVal << '\n' << *L << "\n\n\n"); 1776 1777 // Replace the load! 1778 L->replaceAllUsesWith(AvailVal); 1779 if (AvailVal->getType()->isPointerTy()) 1780 MD->invalidateCachedPointerInfo(AvailVal); 1781 VN.erase(L); 1782 toErase.push_back(L); 1783 NumGVNLoad++; 1784 return true; 1785 } 1786 1787 DEBUG( 1788 // fast print dep, using operator<< on instruction would be too slow 1789 dbgs() << "GVN: load "; 1790 WriteAsOperand(dbgs(), L); 1791 Instruction *I = Dep.getInst(); 1792 dbgs() << " is clobbered by " << *I << '\n'; 1793 ); 1794 return false; 1795 } 1796 1797 // If it is defined in another block, try harder. 1798 if (Dep.isNonLocal()) 1799 return processNonLocalLoad(L, toErase); 1800 1801 Instruction *DepInst = Dep.getInst(); 1802 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1803 Value *StoredVal = DepSI->getOperand(0); 1804 1805 // The store and load are to a must-aliased pointer, but they may not 1806 // actually have the same type. See if we know how to reuse the stored 1807 // value (depending on its type). 1808 const TargetData *TD = 0; 1809 if (StoredVal->getType() != L->getType()) { 1810 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1811 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1812 L, *TD); 1813 if (StoredVal == 0) 1814 return false; 1815 1816 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1817 << '\n' << *L << "\n\n\n"); 1818 } 1819 else 1820 return false; 1821 } 1822 1823 // Remove it! 1824 L->replaceAllUsesWith(StoredVal); 1825 if (StoredVal->getType()->isPointerTy()) 1826 MD->invalidateCachedPointerInfo(StoredVal); 1827 VN.erase(L); 1828 toErase.push_back(L); 1829 NumGVNLoad++; 1830 return true; 1831 } 1832 1833 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1834 Value *AvailableVal = DepLI; 1835 1836 // The loads are of a must-aliased pointer, but they may not actually have 1837 // the same type. See if we know how to reuse the previously loaded value 1838 // (depending on its type). 1839 const TargetData *TD = 0; 1840 if (DepLI->getType() != L->getType()) { 1841 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1842 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1843 if (AvailableVal == 0) 1844 return false; 1845 1846 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1847 << "\n" << *L << "\n\n\n"); 1848 } 1849 else 1850 return false; 1851 } 1852 1853 // Remove it! 1854 L->replaceAllUsesWith(AvailableVal); 1855 if (DepLI->getType()->isPointerTy()) 1856 MD->invalidateCachedPointerInfo(DepLI); 1857 VN.erase(L); 1858 toErase.push_back(L); 1859 NumGVNLoad++; 1860 return true; 1861 } 1862 1863 // If this load really doesn't depend on anything, then we must be loading an 1864 // undef value. This can happen when loading for a fresh allocation with no 1865 // intervening stores, for example. 1866 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1867 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1868 VN.erase(L); 1869 toErase.push_back(L); 1870 NumGVNLoad++; 1871 return true; 1872 } 1873 1874 // If this load occurs either right after a lifetime begin, 1875 // then the loaded value is undefined. 1876 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1877 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1878 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1879 VN.erase(L); 1880 toErase.push_back(L); 1881 NumGVNLoad++; 1882 return true; 1883 } 1884 } 1885 1886 return false; 1887} 1888 1889Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1890 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1891 if (I == localAvail.end()) 1892 return 0; 1893 1894 ValueNumberScope *Locals = I->second; 1895 while (Locals) { 1896 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1897 if (I != Locals->table.end()) 1898 return I->second; 1899 Locals = Locals->parent; 1900 } 1901 1902 return 0; 1903} 1904 1905 1906/// processInstruction - When calculating availability, handle an instruction 1907/// by inserting it into the appropriate sets 1908bool GVN::processInstruction(Instruction *I, 1909 SmallVectorImpl<Instruction*> &toErase) { 1910 // Ignore dbg info intrinsics. 1911 if (isa<DbgInfoIntrinsic>(I)) 1912 return false; 1913 1914 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1915 bool Changed = processLoad(LI, toErase); 1916 1917 if (!Changed) { 1918 unsigned Num = VN.lookup_or_add(LI); 1919 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1920 } 1921 1922 return Changed; 1923 } 1924 1925 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1926 unsigned Num = VN.lookup_or_add(I); 1927 1928 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1929 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1930 1931 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1932 return false; 1933 1934 Value *BranchCond = BI->getCondition(); 1935 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1936 1937 BasicBlock *TrueSucc = BI->getSuccessor(0); 1938 BasicBlock *FalseSucc = BI->getSuccessor(1); 1939 1940 if (TrueSucc->getSinglePredecessor()) 1941 localAvail[TrueSucc]->table[CondVN] = 1942 ConstantInt::getTrue(TrueSucc->getContext()); 1943 if (FalseSucc->getSinglePredecessor()) 1944 localAvail[FalseSucc]->table[CondVN] = 1945 ConstantInt::getFalse(TrueSucc->getContext()); 1946 1947 return false; 1948 1949 // Allocations are always uniquely numbered, so we can save time and memory 1950 // by fast failing them. 1951 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1952 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1953 return false; 1954 } 1955 1956 // Collapse PHI nodes 1957 if (PHINode* p = dyn_cast<PHINode>(I)) { 1958 Value *constVal = CollapsePhi(p); 1959 1960 if (constVal) { 1961 p->replaceAllUsesWith(constVal); 1962 if (MD && constVal->getType()->isPointerTy()) 1963 MD->invalidateCachedPointerInfo(constVal); 1964 VN.erase(p); 1965 1966 toErase.push_back(p); 1967 } else { 1968 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1969 } 1970 1971 // If the number we were assigned was a brand new VN, then we don't 1972 // need to do a lookup to see if the number already exists 1973 // somewhere in the domtree: it can't! 1974 } else if (Num == NextNum) { 1975 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1976 1977 // Perform fast-path value-number based elimination of values inherited from 1978 // dominators. 1979 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1980 // Remove it! 1981 VN.erase(I); 1982 I->replaceAllUsesWith(repl); 1983 if (MD && repl->getType()->isPointerTy()) 1984 MD->invalidateCachedPointerInfo(repl); 1985 toErase.push_back(I); 1986 return true; 1987 1988 } else { 1989 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1990 } 1991 1992 return false; 1993} 1994 1995/// runOnFunction - This is the main transformation entry point for a function. 1996bool GVN::runOnFunction(Function& F) { 1997 if (!NoLoads) 1998 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1999 DT = &getAnalysis<DominatorTree>(); 2000 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 2001 VN.setMemDep(MD); 2002 VN.setDomTree(DT); 2003 2004 bool Changed = false; 2005 bool ShouldContinue = true; 2006 2007 // Merge unconditional branches, allowing PRE to catch more 2008 // optimization opportunities. 2009 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2010 BasicBlock *BB = FI; 2011 ++FI; 2012 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2013 if (removedBlock) NumGVNBlocks++; 2014 2015 Changed |= removedBlock; 2016 } 2017 2018 unsigned Iteration = 0; 2019 2020 while (ShouldContinue) { 2021 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2022 ShouldContinue = iterateOnFunction(F); 2023 if (splitCriticalEdges()) 2024 ShouldContinue = true; 2025 Changed |= ShouldContinue; 2026 ++Iteration; 2027 } 2028 2029 if (EnablePRE) { 2030 bool PREChanged = true; 2031 while (PREChanged) { 2032 PREChanged = performPRE(F); 2033 Changed |= PREChanged; 2034 } 2035 } 2036 // FIXME: Should perform GVN again after PRE does something. PRE can move 2037 // computations into blocks where they become fully redundant. Note that 2038 // we can't do this until PRE's critical edge splitting updates memdep. 2039 // Actually, when this happens, we should just fully integrate PRE into GVN. 2040 2041 cleanupGlobalSets(); 2042 2043 return Changed; 2044} 2045 2046 2047bool GVN::processBlock(BasicBlock *BB) { 2048 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2049 // incrementing BI before processing an instruction). 2050 SmallVector<Instruction*, 8> toErase; 2051 bool ChangedFunction = false; 2052 2053 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2054 BI != BE;) { 2055 ChangedFunction |= processInstruction(BI, toErase); 2056 if (toErase.empty()) { 2057 ++BI; 2058 continue; 2059 } 2060 2061 // If we need some instructions deleted, do it now. 2062 NumGVNInstr += toErase.size(); 2063 2064 // Avoid iterator invalidation. 2065 bool AtStart = BI == BB->begin(); 2066 if (!AtStart) 2067 --BI; 2068 2069 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2070 E = toErase.end(); I != E; ++I) { 2071 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2072 if (MD) MD->removeInstruction(*I); 2073 (*I)->eraseFromParent(); 2074 DEBUG(verifyRemoved(*I)); 2075 } 2076 toErase.clear(); 2077 2078 if (AtStart) 2079 BI = BB->begin(); 2080 else 2081 ++BI; 2082 } 2083 2084 return ChangedFunction; 2085} 2086 2087/// performPRE - Perform a purely local form of PRE that looks for diamond 2088/// control flow patterns and attempts to perform simple PRE at the join point. 2089bool GVN::performPRE(Function &F) { 2090 bool Changed = false; 2091 DenseMap<BasicBlock*, Value*> predMap; 2092 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2093 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2094 BasicBlock *CurrentBlock = *DI; 2095 2096 // Nothing to PRE in the entry block. 2097 if (CurrentBlock == &F.getEntryBlock()) continue; 2098 2099 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2100 BE = CurrentBlock->end(); BI != BE; ) { 2101 Instruction *CurInst = BI++; 2102 2103 if (isa<AllocaInst>(CurInst) || 2104 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2105 CurInst->getType()->isVoidTy() || 2106 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2107 isa<DbgInfoIntrinsic>(CurInst)) 2108 continue; 2109 2110 uint32_t ValNo = VN.lookup(CurInst); 2111 2112 // Look for the predecessors for PRE opportunities. We're 2113 // only trying to solve the basic diamond case, where 2114 // a value is computed in the successor and one predecessor, 2115 // but not the other. We also explicitly disallow cases 2116 // where the successor is its own predecessor, because they're 2117 // more complicated to get right. 2118 unsigned NumWith = 0; 2119 unsigned NumWithout = 0; 2120 BasicBlock *PREPred = 0; 2121 predMap.clear(); 2122 2123 for (pred_iterator PI = pred_begin(CurrentBlock), 2124 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2125 // We're not interested in PRE where the block is its 2126 // own predecessor, or in blocks with predecessors 2127 // that are not reachable. 2128 if (*PI == CurrentBlock) { 2129 NumWithout = 2; 2130 break; 2131 } else if (!localAvail.count(*PI)) { 2132 NumWithout = 2; 2133 break; 2134 } 2135 2136 DenseMap<uint32_t, Value*>::iterator predV = 2137 localAvail[*PI]->table.find(ValNo); 2138 if (predV == localAvail[*PI]->table.end()) { 2139 PREPred = *PI; 2140 NumWithout++; 2141 } else if (predV->second == CurInst) { 2142 NumWithout = 2; 2143 } else { 2144 predMap[*PI] = predV->second; 2145 NumWith++; 2146 } 2147 } 2148 2149 // Don't do PRE when it might increase code size, i.e. when 2150 // we would need to insert instructions in more than one pred. 2151 if (NumWithout != 1 || NumWith == 0) 2152 continue; 2153 2154 // Don't do PRE across indirect branch. 2155 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2156 continue; 2157 2158 // We can't do PRE safely on a critical edge, so instead we schedule 2159 // the edge to be split and perform the PRE the next time we iterate 2160 // on the function. 2161 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2162 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2163 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2164 continue; 2165 } 2166 2167 // Instantiate the expression in the predecessor that lacked it. 2168 // Because we are going top-down through the block, all value numbers 2169 // will be available in the predecessor by the time we need them. Any 2170 // that weren't originally present will have been instantiated earlier 2171 // in this loop. 2172 Instruction *PREInstr = CurInst->clone(); 2173 bool success = true; 2174 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2175 Value *Op = PREInstr->getOperand(i); 2176 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2177 continue; 2178 2179 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2180 PREInstr->setOperand(i, V); 2181 } else { 2182 success = false; 2183 break; 2184 } 2185 } 2186 2187 // Fail out if we encounter an operand that is not available in 2188 // the PRE predecessor. This is typically because of loads which 2189 // are not value numbered precisely. 2190 if (!success) { 2191 delete PREInstr; 2192 DEBUG(verifyRemoved(PREInstr)); 2193 continue; 2194 } 2195 2196 PREInstr->insertBefore(PREPred->getTerminator()); 2197 PREInstr->setName(CurInst->getName() + ".pre"); 2198 predMap[PREPred] = PREInstr; 2199 VN.add(PREInstr, ValNo); 2200 NumGVNPRE++; 2201 2202 // Update the availability map to include the new instruction. 2203 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2204 2205 // Create a PHI to make the value available in this block. 2206 PHINode* Phi = PHINode::Create(CurInst->getType(), 2207 CurInst->getName() + ".pre-phi", 2208 CurrentBlock->begin()); 2209 for (pred_iterator PI = pred_begin(CurrentBlock), 2210 PE = pred_end(CurrentBlock); PI != PE; ++PI) 2211 Phi->addIncoming(predMap[*PI], *PI); 2212 2213 VN.add(Phi, ValNo); 2214 localAvail[CurrentBlock]->table[ValNo] = Phi; 2215 2216 CurInst->replaceAllUsesWith(Phi); 2217 if (MD && Phi->getType()->isPointerTy()) 2218 MD->invalidateCachedPointerInfo(Phi); 2219 VN.erase(CurInst); 2220 2221 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2222 if (MD) MD->removeInstruction(CurInst); 2223 CurInst->eraseFromParent(); 2224 DEBUG(verifyRemoved(CurInst)); 2225 Changed = true; 2226 } 2227 } 2228 2229 if (splitCriticalEdges()) 2230 Changed = true; 2231 2232 return Changed; 2233} 2234 2235/// splitCriticalEdges - Split critical edges found during the previous 2236/// iteration that may enable further optimization. 2237bool GVN::splitCriticalEdges() { 2238 if (toSplit.empty()) 2239 return false; 2240 do { 2241 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2242 SplitCriticalEdge(Edge.first, Edge.second, this); 2243 } while (!toSplit.empty()); 2244 MD->invalidateCachedPredecessors(); 2245 return true; 2246} 2247 2248/// iterateOnFunction - Executes one iteration of GVN 2249bool GVN::iterateOnFunction(Function &F) { 2250 cleanupGlobalSets(); 2251 2252 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2253 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2254 if (DI->getIDom()) 2255 localAvail[DI->getBlock()] = 2256 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2257 else 2258 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2259 } 2260 2261 // Top-down walk of the dominator tree 2262 bool Changed = false; 2263#if 0 2264 // Needed for value numbering with phi construction to work. 2265 ReversePostOrderTraversal<Function*> RPOT(&F); 2266 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2267 RE = RPOT.end(); RI != RE; ++RI) 2268 Changed |= processBlock(*RI); 2269#else 2270 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2271 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2272 Changed |= processBlock(DI->getBlock()); 2273#endif 2274 2275 return Changed; 2276} 2277 2278void GVN::cleanupGlobalSets() { 2279 VN.clear(); 2280 2281 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2282 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2283 delete I->second; 2284 localAvail.clear(); 2285} 2286 2287/// verifyRemoved - Verify that the specified instruction does not occur in our 2288/// internal data structures. 2289void GVN::verifyRemoved(const Instruction *Inst) const { 2290 VN.verifyRemoved(Inst); 2291 2292 // Walk through the value number scope to make sure the instruction isn't 2293 // ferreted away in it. 2294 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2295 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2296 const ValueNumberScope *VNS = I->second; 2297 2298 while (VNS) { 2299 for (DenseMap<uint32_t, Value*>::const_iterator 2300 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2301 assert(II->second != Inst && "Inst still in value numbering scope!"); 2302 } 2303 2304 VNS = VNS->parent; 2305 } 2306 } 2307} 2308