GVN.cpp revision 722cc1f41413530e15a46eb940ce68330647ff27
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Operator.h" 28#include "llvm/Value.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/DepthFirstIterator.h" 31#include "llvm/ADT/PostOrderIterator.h" 32#include "llvm/ADT/SmallPtrSet.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/ConstantFolding.h" 37#include "llvm/Analysis/Dominators.h" 38#include "llvm/Analysis/Loads.h" 39#include "llvm/Analysis/MemoryBuiltins.h" 40#include "llvm/Analysis/MemoryDependenceAnalysis.h" 41#include "llvm/Analysis/PHITransAddr.h" 42#include "llvm/Support/CFG.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Support/GetElementPtrTypeIterator.h" 47#include "llvm/Support/IRBuilder.h" 48#include "llvm/Support/raw_ostream.h" 49#include "llvm/Target/TargetData.h" 50#include "llvm/Transforms/Utils/BasicBlockUtils.h" 51#include "llvm/Transforms/Utils/Local.h" 52#include "llvm/Transforms/Utils/SSAUpdater.h" 53using namespace llvm; 54 55STATISTIC(NumGVNInstr, "Number of instructions deleted"); 56STATISTIC(NumGVNLoad, "Number of loads deleted"); 57STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 58STATISTIC(NumGVNBlocks, "Number of blocks merged"); 59STATISTIC(NumPRELoad, "Number of loads PRE'd"); 60 61static cl::opt<bool> EnablePRE("enable-pre", 62 cl::init(true), cl::Hidden); 63static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 64static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false)); 65 66//===----------------------------------------------------------------------===// 67// ValueTable Class 68//===----------------------------------------------------------------------===// 69 70/// This class holds the mapping between values and value numbers. It is used 71/// as an efficient mechanism to determine the expression-wise equivalence of 72/// two values. 73namespace { 74 struct Expression { 75 enum ExpressionOpcode { 76 ADD = Instruction::Add, 77 FADD = Instruction::FAdd, 78 SUB = Instruction::Sub, 79 FSUB = Instruction::FSub, 80 MUL = Instruction::Mul, 81 FMUL = Instruction::FMul, 82 UDIV = Instruction::UDiv, 83 SDIV = Instruction::SDiv, 84 FDIV = Instruction::FDiv, 85 UREM = Instruction::URem, 86 SREM = Instruction::SRem, 87 FREM = Instruction::FRem, 88 SHL = Instruction::Shl, 89 LSHR = Instruction::LShr, 90 ASHR = Instruction::AShr, 91 AND = Instruction::And, 92 OR = Instruction::Or, 93 XOR = Instruction::Xor, 94 TRUNC = Instruction::Trunc, 95 ZEXT = Instruction::ZExt, 96 SEXT = Instruction::SExt, 97 FPTOUI = Instruction::FPToUI, 98 FPTOSI = Instruction::FPToSI, 99 UITOFP = Instruction::UIToFP, 100 SITOFP = Instruction::SIToFP, 101 FPTRUNC = Instruction::FPTrunc, 102 FPEXT = Instruction::FPExt, 103 PTRTOINT = Instruction::PtrToInt, 104 INTTOPTR = Instruction::IntToPtr, 105 BITCAST = Instruction::BitCast, 106 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 107 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 108 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 109 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 110 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 111 SHUFFLE, SELECT, GEP, CALL, CONSTANT, 112 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 113 114 ExpressionOpcode opcode; 115 const Type* type; 116 SmallVector<uint32_t, 4> varargs; 117 Value *function; 118 119 Expression() { } 120 Expression(ExpressionOpcode o) : opcode(o) { } 121 122 bool operator==(const Expression &other) const { 123 if (opcode != other.opcode) 124 return false; 125 else if (opcode == EMPTY || opcode == TOMBSTONE) 126 return true; 127 else if (type != other.type) 128 return false; 129 else if (function != other.function) 130 return false; 131 else { 132 if (varargs.size() != other.varargs.size()) 133 return false; 134 135 for (size_t i = 0; i < varargs.size(); ++i) 136 if (varargs[i] != other.varargs[i]) 137 return false; 138 139 return true; 140 } 141 } 142 143 /*bool operator!=(const Expression &other) const { 144 return !(*this == other); 145 }*/ 146 }; 147 148 class ValueTable { 149 private: 150 DenseMap<Value*, uint32_t> valueNumbering; 151 DenseMap<Expression, uint32_t> expressionNumbering; 152 AliasAnalysis* AA; 153 MemoryDependenceAnalysis* MD; 154 DominatorTree* DT; 155 156 uint32_t nextValueNumber; 157 158 Expression::ExpressionOpcode getOpcode(CmpInst* C); 159 Expression create_expression(BinaryOperator* BO); 160 Expression create_expression(CmpInst* C); 161 Expression create_expression(ShuffleVectorInst* V); 162 Expression create_expression(ExtractElementInst* C); 163 Expression create_expression(InsertElementInst* V); 164 Expression create_expression(SelectInst* V); 165 Expression create_expression(CastInst* C); 166 Expression create_expression(GetElementPtrInst* G); 167 Expression create_expression(CallInst* C); 168 Expression create_expression(ExtractValueInst* C); 169 Expression create_expression(InsertValueInst* C); 170 171 uint32_t lookup_or_add_call(CallInst* C); 172 public: 173 ValueTable() : nextValueNumber(1) { } 174 uint32_t lookup_or_add(Value *V); 175 uint32_t lookup(Value *V) const; 176 void add(Value *V, uint32_t num); 177 void clear(); 178 void erase(Value *v); 179 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 180 AliasAnalysis *getAliasAnalysis() const { return AA; } 181 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 182 void setDomTree(DominatorTree* D) { DT = D; } 183 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 184 void verifyRemoved(const Value *) const; 185 }; 186} 187 188namespace llvm { 189template <> struct DenseMapInfo<Expression> { 190 static inline Expression getEmptyKey() { 191 return Expression(Expression::EMPTY); 192 } 193 194 static inline Expression getTombstoneKey() { 195 return Expression(Expression::TOMBSTONE); 196 } 197 198 static unsigned getHashValue(const Expression e) { 199 unsigned hash = e.opcode; 200 201 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 202 (unsigned)((uintptr_t)e.type >> 9)); 203 204 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 205 E = e.varargs.end(); I != E; ++I) 206 hash = *I + hash * 37; 207 208 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 209 (unsigned)((uintptr_t)e.function >> 9)) + 210 hash * 37; 211 212 return hash; 213 } 214 static bool isEqual(const Expression &LHS, const Expression &RHS) { 215 return LHS == RHS; 216 } 217}; 218 219template <> 220struct isPodLike<Expression> { static const bool value = true; }; 221 222} 223 224//===----------------------------------------------------------------------===// 225// ValueTable Internal Functions 226//===----------------------------------------------------------------------===// 227 228Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 229 if (isa<ICmpInst>(C)) { 230 switch (C->getPredicate()) { 231 default: // THIS SHOULD NEVER HAPPEN 232 llvm_unreachable("Comparison with unknown predicate?"); 233 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 234 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 235 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 236 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 237 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 238 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 239 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 240 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 241 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 242 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 243 } 244 } else { 245 switch (C->getPredicate()) { 246 default: // THIS SHOULD NEVER HAPPEN 247 llvm_unreachable("Comparison with unknown predicate?"); 248 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 249 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 250 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 251 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 252 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 253 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 254 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 255 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 256 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 257 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 258 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 259 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 260 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 261 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 262 } 263 } 264} 265 266Expression ValueTable::create_expression(CallInst* C) { 267 Expression e; 268 269 e.type = C->getType(); 270 e.function = C->getCalledFunction(); 271 e.opcode = Expression::CALL; 272 273 CallSite CS(C); 274 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end(); 275 I != E; ++I) 276 e.varargs.push_back(lookup_or_add(*I)); 277 278 return e; 279} 280 281Expression ValueTable::create_expression(BinaryOperator* BO) { 282 Expression e; 283 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 284 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 285 e.function = 0; 286 e.type = BO->getType(); 287 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode()); 288 289 return e; 290} 291 292Expression ValueTable::create_expression(CmpInst* C) { 293 Expression e; 294 295 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 296 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 297 e.function = 0; 298 e.type = C->getType(); 299 e.opcode = getOpcode(C); 300 301 return e; 302} 303 304Expression ValueTable::create_expression(CastInst* C) { 305 Expression e; 306 307 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 308 e.function = 0; 309 e.type = C->getType(); 310 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode()); 311 312 return e; 313} 314 315Expression ValueTable::create_expression(ShuffleVectorInst* S) { 316 Expression e; 317 318 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 319 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 320 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 321 e.function = 0; 322 e.type = S->getType(); 323 e.opcode = Expression::SHUFFLE; 324 325 return e; 326} 327 328Expression ValueTable::create_expression(ExtractElementInst* E) { 329 Expression e; 330 331 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 332 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 333 e.function = 0; 334 e.type = E->getType(); 335 e.opcode = Expression::EXTRACT; 336 337 return e; 338} 339 340Expression ValueTable::create_expression(InsertElementInst* I) { 341 Expression e; 342 343 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 344 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 345 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 346 e.function = 0; 347 e.type = I->getType(); 348 e.opcode = Expression::INSERT; 349 350 return e; 351} 352 353Expression ValueTable::create_expression(SelectInst* I) { 354 Expression e; 355 356 e.varargs.push_back(lookup_or_add(I->getCondition())); 357 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 358 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 359 e.function = 0; 360 e.type = I->getType(); 361 e.opcode = Expression::SELECT; 362 363 return e; 364} 365 366Expression ValueTable::create_expression(GetElementPtrInst* G) { 367 Expression e; 368 369 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 370 e.function = 0; 371 e.type = G->getType(); 372 e.opcode = Expression::GEP; 373 374 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 375 I != E; ++I) 376 e.varargs.push_back(lookup_or_add(*I)); 377 378 return e; 379} 380 381Expression ValueTable::create_expression(ExtractValueInst* E) { 382 Expression e; 383 384 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 385 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 386 II != IE; ++II) 387 e.varargs.push_back(*II); 388 e.function = 0; 389 e.type = E->getType(); 390 e.opcode = Expression::EXTRACTVALUE; 391 392 return e; 393} 394 395Expression ValueTable::create_expression(InsertValueInst* E) { 396 Expression e; 397 398 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 399 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 400 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 401 II != IE; ++II) 402 e.varargs.push_back(*II); 403 e.function = 0; 404 e.type = E->getType(); 405 e.opcode = Expression::INSERTVALUE; 406 407 return e; 408} 409 410//===----------------------------------------------------------------------===// 411// ValueTable External Functions 412//===----------------------------------------------------------------------===// 413 414/// add - Insert a value into the table with a specified value number. 415void ValueTable::add(Value *V, uint32_t num) { 416 valueNumbering.insert(std::make_pair(V, num)); 417} 418 419uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 420 if (AA->doesNotAccessMemory(C)) { 421 Expression exp = create_expression(C); 422 uint32_t& e = expressionNumbering[exp]; 423 if (!e) e = nextValueNumber++; 424 valueNumbering[C] = e; 425 return e; 426 } else if (AA->onlyReadsMemory(C)) { 427 Expression exp = create_expression(C); 428 uint32_t& e = expressionNumbering[exp]; 429 if (!e) { 430 e = nextValueNumber++; 431 valueNumbering[C] = e; 432 return e; 433 } 434 if (!MD) { 435 e = nextValueNumber++; 436 valueNumbering[C] = e; 437 return e; 438 } 439 440 MemDepResult local_dep = MD->getDependency(C); 441 442 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 443 valueNumbering[C] = nextValueNumber; 444 return nextValueNumber++; 445 } 446 447 if (local_dep.isDef()) { 448 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 449 450 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 451 valueNumbering[C] = nextValueNumber; 452 return nextValueNumber++; 453 } 454 455 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 456 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 457 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 458 if (c_vn != cd_vn) { 459 valueNumbering[C] = nextValueNumber; 460 return nextValueNumber++; 461 } 462 } 463 464 uint32_t v = lookup_or_add(local_cdep); 465 valueNumbering[C] = v; 466 return v; 467 } 468 469 // Non-local case. 470 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 471 MD->getNonLocalCallDependency(CallSite(C)); 472 // FIXME: call/call dependencies for readonly calls should return def, not 473 // clobber! Move the checking logic to MemDep! 474 CallInst* cdep = 0; 475 476 // Check to see if we have a single dominating call instruction that is 477 // identical to C. 478 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 479 const NonLocalDepEntry *I = &deps[i]; 480 // Ignore non-local dependencies. 481 if (I->getResult().isNonLocal()) 482 continue; 483 484 // We don't handle non-depedencies. If we already have a call, reject 485 // instruction dependencies. 486 if (I->getResult().isClobber() || cdep != 0) { 487 cdep = 0; 488 break; 489 } 490 491 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 492 // FIXME: All duplicated with non-local case. 493 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 494 cdep = NonLocalDepCall; 495 continue; 496 } 497 498 cdep = 0; 499 break; 500 } 501 502 if (!cdep) { 503 valueNumbering[C] = nextValueNumber; 504 return nextValueNumber++; 505 } 506 507 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 508 valueNumbering[C] = nextValueNumber; 509 return nextValueNumber++; 510 } 511 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 512 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 513 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 514 if (c_vn != cd_vn) { 515 valueNumbering[C] = nextValueNumber; 516 return nextValueNumber++; 517 } 518 } 519 520 uint32_t v = lookup_or_add(cdep); 521 valueNumbering[C] = v; 522 return v; 523 524 } else { 525 valueNumbering[C] = nextValueNumber; 526 return nextValueNumber++; 527 } 528} 529 530/// lookup_or_add - Returns the value number for the specified value, assigning 531/// it a new number if it did not have one before. 532uint32_t ValueTable::lookup_or_add(Value *V) { 533 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 534 if (VI != valueNumbering.end()) 535 return VI->second; 536 537 if (!isa<Instruction>(V)) { 538 valueNumbering[V] = nextValueNumber; 539 return nextValueNumber++; 540 } 541 542 Instruction* I = cast<Instruction>(V); 543 Expression exp; 544 switch (I->getOpcode()) { 545 case Instruction::Call: 546 return lookup_or_add_call(cast<CallInst>(I)); 547 case Instruction::Add: 548 case Instruction::FAdd: 549 case Instruction::Sub: 550 case Instruction::FSub: 551 case Instruction::Mul: 552 case Instruction::FMul: 553 case Instruction::UDiv: 554 case Instruction::SDiv: 555 case Instruction::FDiv: 556 case Instruction::URem: 557 case Instruction::SRem: 558 case Instruction::FRem: 559 case Instruction::Shl: 560 case Instruction::LShr: 561 case Instruction::AShr: 562 case Instruction::And: 563 case Instruction::Or : 564 case Instruction::Xor: 565 exp = create_expression(cast<BinaryOperator>(I)); 566 break; 567 case Instruction::ICmp: 568 case Instruction::FCmp: 569 exp = create_expression(cast<CmpInst>(I)); 570 break; 571 case Instruction::Trunc: 572 case Instruction::ZExt: 573 case Instruction::SExt: 574 case Instruction::FPToUI: 575 case Instruction::FPToSI: 576 case Instruction::UIToFP: 577 case Instruction::SIToFP: 578 case Instruction::FPTrunc: 579 case Instruction::FPExt: 580 case Instruction::PtrToInt: 581 case Instruction::IntToPtr: 582 case Instruction::BitCast: 583 exp = create_expression(cast<CastInst>(I)); 584 break; 585 case Instruction::Select: 586 exp = create_expression(cast<SelectInst>(I)); 587 break; 588 case Instruction::ExtractElement: 589 exp = create_expression(cast<ExtractElementInst>(I)); 590 break; 591 case Instruction::InsertElement: 592 exp = create_expression(cast<InsertElementInst>(I)); 593 break; 594 case Instruction::ShuffleVector: 595 exp = create_expression(cast<ShuffleVectorInst>(I)); 596 break; 597 case Instruction::ExtractValue: 598 exp = create_expression(cast<ExtractValueInst>(I)); 599 break; 600 case Instruction::InsertValue: 601 exp = create_expression(cast<InsertValueInst>(I)); 602 break; 603 case Instruction::GetElementPtr: 604 exp = create_expression(cast<GetElementPtrInst>(I)); 605 break; 606 default: 607 valueNumbering[V] = nextValueNumber; 608 return nextValueNumber++; 609 } 610 611 uint32_t& e = expressionNumbering[exp]; 612 if (!e) e = nextValueNumber++; 613 valueNumbering[V] = e; 614 return e; 615} 616 617/// lookup - Returns the value number of the specified value. Fails if 618/// the value has not yet been numbered. 619uint32_t ValueTable::lookup(Value *V) const { 620 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 621 assert(VI != valueNumbering.end() && "Value not numbered?"); 622 return VI->second; 623} 624 625/// clear - Remove all entries from the ValueTable 626void ValueTable::clear() { 627 valueNumbering.clear(); 628 expressionNumbering.clear(); 629 nextValueNumber = 1; 630} 631 632/// erase - Remove a value from the value numbering 633void ValueTable::erase(Value *V) { 634 valueNumbering.erase(V); 635} 636 637/// verifyRemoved - Verify that the value is removed from all internal data 638/// structures. 639void ValueTable::verifyRemoved(const Value *V) const { 640 for (DenseMap<Value*, uint32_t>::const_iterator 641 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 642 assert(I->first != V && "Inst still occurs in value numbering map!"); 643 } 644} 645 646//===----------------------------------------------------------------------===// 647// GVN Pass 648//===----------------------------------------------------------------------===// 649 650namespace { 651 struct ValueNumberScope { 652 ValueNumberScope* parent; 653 DenseMap<uint32_t, Value*> table; 654 655 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 656 }; 657} 658 659namespace { 660 661 class GVN : public FunctionPass { 662 bool runOnFunction(Function &F); 663 public: 664 static char ID; // Pass identification, replacement for typeid 665 explicit GVN(bool noloads = false) 666 : FunctionPass(ID), NoLoads(noloads), MD(0) { } 667 668 private: 669 bool NoLoads; 670 MemoryDependenceAnalysis *MD; 671 DominatorTree *DT; 672 673 ValueTable VN; 674 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 675 676 // List of critical edges to be split between iterations. 677 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 678 679 // This transformation requires dominator postdominator info 680 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 681 AU.addRequired<DominatorTree>(); 682 if (!NoLoads) 683 AU.addRequired<MemoryDependenceAnalysis>(); 684 AU.addRequired<AliasAnalysis>(); 685 686 AU.addPreserved<DominatorTree>(); 687 AU.addPreserved<AliasAnalysis>(); 688 } 689 690 // Helper fuctions 691 // FIXME: eliminate or document these better 692 bool processLoad(LoadInst* L, 693 SmallVectorImpl<Instruction*> &toErase); 694 bool processInstruction(Instruction *I, 695 SmallVectorImpl<Instruction*> &toErase); 696 bool processNonLocalLoad(LoadInst* L, 697 SmallVectorImpl<Instruction*> &toErase); 698 bool processBlock(BasicBlock *BB); 699 void dump(DenseMap<uint32_t, Value*>& d); 700 bool iterateOnFunction(Function &F); 701 Value *CollapsePhi(PHINode* p); 702 bool performPRE(Function& F); 703 Value *lookupNumber(BasicBlock *BB, uint32_t num); 704 void cleanupGlobalSets(); 705 void verifyRemoved(const Instruction *I) const; 706 bool splitCriticalEdges(); 707 }; 708 709 char GVN::ID = 0; 710} 711 712// createGVNPass - The public interface to this file... 713FunctionPass *llvm::createGVNPass(bool NoLoads) { 714 return new GVN(NoLoads); 715} 716 717INITIALIZE_PASS(GVN, "gvn", "Global Value Numbering", false, false); 718 719void GVN::dump(DenseMap<uint32_t, Value*>& d) { 720 errs() << "{\n"; 721 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 722 E = d.end(); I != E; ++I) { 723 errs() << I->first << "\n"; 724 I->second->dump(); 725 } 726 errs() << "}\n"; 727} 728 729static bool isSafeReplacement(PHINode* p, Instruction *inst) { 730 if (!isa<PHINode>(inst)) 731 return true; 732 733 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 734 UI != E; ++UI) 735 if (PHINode* use_phi = dyn_cast<PHINode>(*UI)) 736 if (use_phi->getParent() == inst->getParent()) 737 return false; 738 739 return true; 740} 741 742Value *GVN::CollapsePhi(PHINode *PN) { 743 Value *ConstVal = PN->hasConstantValue(DT); 744 if (!ConstVal) return 0; 745 746 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 747 if (!Inst) 748 return ConstVal; 749 750 if (DT->dominates(Inst, PN)) 751 if (isSafeReplacement(PN, Inst)) 752 return Inst; 753 return 0; 754} 755 756/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 757/// we're analyzing is fully available in the specified block. As we go, keep 758/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 759/// map is actually a tri-state map with the following values: 760/// 0) we know the block *is not* fully available. 761/// 1) we know the block *is* fully available. 762/// 2) we do not know whether the block is fully available or not, but we are 763/// currently speculating that it will be. 764/// 3) we are speculating for this block and have used that to speculate for 765/// other blocks. 766static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 767 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 768 // Optimistically assume that the block is fully available and check to see 769 // if we already know about this block in one lookup. 770 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 771 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 772 773 // If the entry already existed for this block, return the precomputed value. 774 if (!IV.second) { 775 // If this is a speculative "available" value, mark it as being used for 776 // speculation of other blocks. 777 if (IV.first->second == 2) 778 IV.first->second = 3; 779 return IV.first->second != 0; 780 } 781 782 // Otherwise, see if it is fully available in all predecessors. 783 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 784 785 // If this block has no predecessors, it isn't live-in here. 786 if (PI == PE) 787 goto SpeculationFailure; 788 789 for (; PI != PE; ++PI) 790 // If the value isn't fully available in one of our predecessors, then it 791 // isn't fully available in this block either. Undo our previous 792 // optimistic assumption and bail out. 793 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 794 goto SpeculationFailure; 795 796 return true; 797 798// SpeculationFailure - If we get here, we found out that this is not, after 799// all, a fully-available block. We have a problem if we speculated on this and 800// used the speculation to mark other blocks as available. 801SpeculationFailure: 802 char &BBVal = FullyAvailableBlocks[BB]; 803 804 // If we didn't speculate on this, just return with it set to false. 805 if (BBVal == 2) { 806 BBVal = 0; 807 return false; 808 } 809 810 // If we did speculate on this value, we could have blocks set to 1 that are 811 // incorrect. Walk the (transitive) successors of this block and mark them as 812 // 0 if set to one. 813 SmallVector<BasicBlock*, 32> BBWorklist; 814 BBWorklist.push_back(BB); 815 816 do { 817 BasicBlock *Entry = BBWorklist.pop_back_val(); 818 // Note that this sets blocks to 0 (unavailable) if they happen to not 819 // already be in FullyAvailableBlocks. This is safe. 820 char &EntryVal = FullyAvailableBlocks[Entry]; 821 if (EntryVal == 0) continue; // Already unavailable. 822 823 // Mark as unavailable. 824 EntryVal = 0; 825 826 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 827 BBWorklist.push_back(*I); 828 } while (!BBWorklist.empty()); 829 830 return false; 831} 832 833 834/// CanCoerceMustAliasedValueToLoad - Return true if 835/// CoerceAvailableValueToLoadType will succeed. 836static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 837 const Type *LoadTy, 838 const TargetData &TD) { 839 // If the loaded or stored value is an first class array or struct, don't try 840 // to transform them. We need to be able to bitcast to integer. 841 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 842 StoredVal->getType()->isStructTy() || 843 StoredVal->getType()->isArrayTy()) 844 return false; 845 846 // The store has to be at least as big as the load. 847 if (TD.getTypeSizeInBits(StoredVal->getType()) < 848 TD.getTypeSizeInBits(LoadTy)) 849 return false; 850 851 return true; 852} 853 854 855/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 856/// then a load from a must-aliased pointer of a different type, try to coerce 857/// the stored value. LoadedTy is the type of the load we want to replace and 858/// InsertPt is the place to insert new instructions. 859/// 860/// If we can't do it, return null. 861static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 862 const Type *LoadedTy, 863 Instruction *InsertPt, 864 const TargetData &TD) { 865 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 866 return 0; 867 868 const Type *StoredValTy = StoredVal->getType(); 869 870 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy); 871 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 872 873 // If the store and reload are the same size, we can always reuse it. 874 if (StoreSize == LoadSize) { 875 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) { 876 // Pointer to Pointer -> use bitcast. 877 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 878 } 879 880 // Convert source pointers to integers, which can be bitcast. 881 if (StoredValTy->isPointerTy()) { 882 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 883 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 884 } 885 886 const Type *TypeToCastTo = LoadedTy; 887 if (TypeToCastTo->isPointerTy()) 888 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 889 890 if (StoredValTy != TypeToCastTo) 891 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 892 893 // Cast to pointer if the load needs a pointer type. 894 if (LoadedTy->isPointerTy()) 895 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 896 897 return StoredVal; 898 } 899 900 // If the loaded value is smaller than the available value, then we can 901 // extract out a piece from it. If the available value is too small, then we 902 // can't do anything. 903 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 904 905 // Convert source pointers to integers, which can be manipulated. 906 if (StoredValTy->isPointerTy()) { 907 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 908 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 909 } 910 911 // Convert vectors and fp to integer, which can be manipulated. 912 if (!StoredValTy->isIntegerTy()) { 913 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 914 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 915 } 916 917 // If this is a big-endian system, we need to shift the value down to the low 918 // bits so that a truncate will work. 919 if (TD.isBigEndian()) { 920 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 921 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 922 } 923 924 // Truncate the integer to the right size now. 925 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 926 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 927 928 if (LoadedTy == NewIntTy) 929 return StoredVal; 930 931 // If the result is a pointer, inttoptr. 932 if (LoadedTy->isPointerTy()) 933 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 934 935 // Otherwise, bitcast. 936 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 937} 938 939/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 940/// be expressed as a base pointer plus a constant offset. Return the base and 941/// offset to the caller. 942static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 943 const TargetData &TD) { 944 Operator *PtrOp = dyn_cast<Operator>(Ptr); 945 if (PtrOp == 0) return Ptr; 946 947 // Just look through bitcasts. 948 if (PtrOp->getOpcode() == Instruction::BitCast) 949 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 950 951 // If this is a GEP with constant indices, we can look through it. 952 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 953 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 954 955 gep_type_iterator GTI = gep_type_begin(GEP); 956 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 957 ++I, ++GTI) { 958 ConstantInt *OpC = cast<ConstantInt>(*I); 959 if (OpC->isZero()) continue; 960 961 // Handle a struct and array indices which add their offset to the pointer. 962 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 963 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 964 } else { 965 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 966 Offset += OpC->getSExtValue()*Size; 967 } 968 } 969 970 // Re-sign extend from the pointer size if needed to get overflow edge cases 971 // right. 972 unsigned PtrSize = TD.getPointerSizeInBits(); 973 if (PtrSize < 64) 974 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 975 976 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 977} 978 979 980/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 981/// memdep query of a load that ends up being a clobbering memory write (store, 982/// memset, memcpy, memmove). This means that the write *may* provide bits used 983/// by the load but we can't be sure because the pointers don't mustalias. 984/// 985/// Check this case to see if there is anything more we can do before we give 986/// up. This returns -1 if we have to give up, or a byte number in the stored 987/// value of the piece that feeds the load. 988static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 989 Value *WritePtr, 990 uint64_t WriteSizeInBits, 991 const TargetData &TD) { 992 // If the loaded or stored value is an first class array or struct, don't try 993 // to transform them. We need to be able to bitcast to integer. 994 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 995 return -1; 996 997 int64_t StoreOffset = 0, LoadOffset = 0; 998 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 999 Value *LoadBase = 1000 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1001 if (StoreBase != LoadBase) 1002 return -1; 1003 1004 // If the load and store are to the exact same address, they should have been 1005 // a must alias. AA must have gotten confused. 1006 // FIXME: Study to see if/when this happens. One case is forwarding a memset 1007 // to a load from the base of the memset. 1008#if 0 1009 if (LoadOffset == StoreOffset) { 1010 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1011 << "Base = " << *StoreBase << "\n" 1012 << "Store Ptr = " << *WritePtr << "\n" 1013 << "Store Offs = " << StoreOffset << "\n" 1014 << "Load Ptr = " << *LoadPtr << "\n"; 1015 abort(); 1016 } 1017#endif 1018 1019 // If the load and store don't overlap at all, the store doesn't provide 1020 // anything to the load. In this case, they really don't alias at all, AA 1021 // must have gotten confused. 1022 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1023 // remove this check, as it is duplicated with what we have below. 1024 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1025 1026 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1027 return -1; 1028 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1029 LoadSize >>= 3; 1030 1031 1032 bool isAAFailure = false; 1033 if (StoreOffset < LoadOffset) 1034 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1035 else 1036 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1037 1038 if (isAAFailure) { 1039#if 0 1040 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1041 << "Base = " << *StoreBase << "\n" 1042 << "Store Ptr = " << *WritePtr << "\n" 1043 << "Store Offs = " << StoreOffset << "\n" 1044 << "Load Ptr = " << *LoadPtr << "\n"; 1045 abort(); 1046#endif 1047 return -1; 1048 } 1049 1050 // If the Load isn't completely contained within the stored bits, we don't 1051 // have all the bits to feed it. We could do something crazy in the future 1052 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1053 // valuable. 1054 if (StoreOffset > LoadOffset || 1055 StoreOffset+StoreSize < LoadOffset+LoadSize) 1056 return -1; 1057 1058 // Okay, we can do this transformation. Return the number of bytes into the 1059 // store that the load is. 1060 return LoadOffset-StoreOffset; 1061} 1062 1063/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1064/// memdep query of a load that ends up being a clobbering store. 1065static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1066 StoreInst *DepSI, 1067 const TargetData &TD) { 1068 // Cannot handle reading from store of first-class aggregate yet. 1069 if (DepSI->getOperand(0)->getType()->isStructTy() || 1070 DepSI->getOperand(0)->getType()->isArrayTy()) 1071 return -1; 1072 1073 Value *StorePtr = DepSI->getPointerOperand(); 1074 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1075 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1076 StorePtr, StoreSize, TD); 1077} 1078 1079static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1080 MemIntrinsic *MI, 1081 const TargetData &TD) { 1082 // If the mem operation is a non-constant size, we can't handle it. 1083 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1084 if (SizeCst == 0) return -1; 1085 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1086 1087 // If this is memset, we just need to see if the offset is valid in the size 1088 // of the memset.. 1089 if (MI->getIntrinsicID() == Intrinsic::memset) 1090 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1091 MemSizeInBits, TD); 1092 1093 // If we have a memcpy/memmove, the only case we can handle is if this is a 1094 // copy from constant memory. In that case, we can read directly from the 1095 // constant memory. 1096 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1097 1098 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1099 if (Src == 0) return -1; 1100 1101 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1102 if (GV == 0 || !GV->isConstant()) return -1; 1103 1104 // See if the access is within the bounds of the transfer. 1105 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1106 MI->getDest(), MemSizeInBits, TD); 1107 if (Offset == -1) 1108 return Offset; 1109 1110 // Otherwise, see if we can constant fold a load from the constant with the 1111 // offset applied as appropriate. 1112 Src = ConstantExpr::getBitCast(Src, 1113 llvm::Type::getInt8PtrTy(Src->getContext())); 1114 Constant *OffsetCst = 1115 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1116 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1117 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1118 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1119 return Offset; 1120 return -1; 1121} 1122 1123 1124/// GetStoreValueForLoad - This function is called when we have a 1125/// memdep query of a load that ends up being a clobbering store. This means 1126/// that the store *may* provide bits used by the load but we can't be sure 1127/// because the pointers don't mustalias. Check this case to see if there is 1128/// anything more we can do before we give up. 1129static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1130 const Type *LoadTy, 1131 Instruction *InsertPt, const TargetData &TD){ 1132 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1133 1134 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1135 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 1136 1137 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1138 1139 // Compute which bits of the stored value are being used by the load. Convert 1140 // to an integer type to start with. 1141 if (SrcVal->getType()->isPointerTy()) 1142 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1143 if (!SrcVal->getType()->isIntegerTy()) 1144 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1145 "tmp"); 1146 1147 // Shift the bits to the least significant depending on endianness. 1148 unsigned ShiftAmt; 1149 if (TD.isLittleEndian()) 1150 ShiftAmt = Offset*8; 1151 else 1152 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1153 1154 if (ShiftAmt) 1155 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1156 1157 if (LoadSize != StoreSize) 1158 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1159 "tmp"); 1160 1161 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1162} 1163 1164/// GetMemInstValueForLoad - This function is called when we have a 1165/// memdep query of a load that ends up being a clobbering mem intrinsic. 1166static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1167 const Type *LoadTy, Instruction *InsertPt, 1168 const TargetData &TD){ 1169 LLVMContext &Ctx = LoadTy->getContext(); 1170 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1171 1172 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1173 1174 // We know that this method is only called when the mem transfer fully 1175 // provides the bits for the load. 1176 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1177 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1178 // independently of what the offset is. 1179 Value *Val = MSI->getValue(); 1180 if (LoadSize != 1) 1181 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1182 1183 Value *OneElt = Val; 1184 1185 // Splat the value out to the right number of bits. 1186 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1187 // If we can double the number of bytes set, do it. 1188 if (NumBytesSet*2 <= LoadSize) { 1189 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1190 Val = Builder.CreateOr(Val, ShVal); 1191 NumBytesSet <<= 1; 1192 continue; 1193 } 1194 1195 // Otherwise insert one byte at a time. 1196 Value *ShVal = Builder.CreateShl(Val, 1*8); 1197 Val = Builder.CreateOr(OneElt, ShVal); 1198 ++NumBytesSet; 1199 } 1200 1201 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1202 } 1203 1204 // Otherwise, this is a memcpy/memmove from a constant global. 1205 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1206 Constant *Src = cast<Constant>(MTI->getSource()); 1207 1208 // Otherwise, see if we can constant fold a load from the constant with the 1209 // offset applied as appropriate. 1210 Src = ConstantExpr::getBitCast(Src, 1211 llvm::Type::getInt8PtrTy(Src->getContext())); 1212 Constant *OffsetCst = 1213 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1214 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1215 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1216 return ConstantFoldLoadFromConstPtr(Src, &TD); 1217} 1218 1219namespace { 1220 1221struct AvailableValueInBlock { 1222 /// BB - The basic block in question. 1223 BasicBlock *BB; 1224 enum ValType { 1225 SimpleVal, // A simple offsetted value that is accessed. 1226 MemIntrin // A memory intrinsic which is loaded from. 1227 }; 1228 1229 /// V - The value that is live out of the block. 1230 PointerIntPair<Value *, 1, ValType> Val; 1231 1232 /// Offset - The byte offset in Val that is interesting for the load query. 1233 unsigned Offset; 1234 1235 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1236 unsigned Offset = 0) { 1237 AvailableValueInBlock Res; 1238 Res.BB = BB; 1239 Res.Val.setPointer(V); 1240 Res.Val.setInt(SimpleVal); 1241 Res.Offset = Offset; 1242 return Res; 1243 } 1244 1245 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1246 unsigned Offset = 0) { 1247 AvailableValueInBlock Res; 1248 Res.BB = BB; 1249 Res.Val.setPointer(MI); 1250 Res.Val.setInt(MemIntrin); 1251 Res.Offset = Offset; 1252 return Res; 1253 } 1254 1255 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1256 Value *getSimpleValue() const { 1257 assert(isSimpleValue() && "Wrong accessor"); 1258 return Val.getPointer(); 1259 } 1260 1261 MemIntrinsic *getMemIntrinValue() const { 1262 assert(!isSimpleValue() && "Wrong accessor"); 1263 return cast<MemIntrinsic>(Val.getPointer()); 1264 } 1265 1266 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1267 /// defined here to the specified type. This handles various coercion cases. 1268 Value *MaterializeAdjustedValue(const Type *LoadTy, 1269 const TargetData *TD) const { 1270 Value *Res; 1271 if (isSimpleValue()) { 1272 Res = getSimpleValue(); 1273 if (Res->getType() != LoadTy) { 1274 assert(TD && "Need target data to handle type mismatch case"); 1275 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1276 *TD); 1277 1278 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1279 << *getSimpleValue() << '\n' 1280 << *Res << '\n' << "\n\n\n"); 1281 } 1282 } else { 1283 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1284 LoadTy, BB->getTerminator(), *TD); 1285 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1286 << " " << *getMemIntrinValue() << '\n' 1287 << *Res << '\n' << "\n\n\n"); 1288 } 1289 return Res; 1290 } 1291}; 1292 1293} 1294 1295/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1296/// construct SSA form, allowing us to eliminate LI. This returns the value 1297/// that should be used at LI's definition site. 1298static Value *ConstructSSAForLoadSet(LoadInst *LI, 1299 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1300 const TargetData *TD, 1301 const DominatorTree &DT, 1302 AliasAnalysis *AA) { 1303 // Check for the fully redundant, dominating load case. In this case, we can 1304 // just use the dominating value directly. 1305 if (ValuesPerBlock.size() == 1 && 1306 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) 1307 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); 1308 1309 // Otherwise, we have to construct SSA form. 1310 SmallVector<PHINode*, 8> NewPHIs; 1311 SSAUpdater SSAUpdate(&NewPHIs); 1312 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1313 1314 const Type *LoadTy = LI->getType(); 1315 1316 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1317 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1318 BasicBlock *BB = AV.BB; 1319 1320 if (SSAUpdate.HasValueForBlock(BB)) 1321 continue; 1322 1323 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); 1324 } 1325 1326 // Perform PHI construction. 1327 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1328 1329 // If new PHI nodes were created, notify alias analysis. 1330 if (V->getType()->isPointerTy()) 1331 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1332 AA->copyValue(LI, NewPHIs[i]); 1333 1334 return V; 1335} 1336 1337static bool isLifetimeStart(const Instruction *Inst) { 1338 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1339 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1340 return false; 1341} 1342 1343/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1344/// non-local by performing PHI construction. 1345bool GVN::processNonLocalLoad(LoadInst *LI, 1346 SmallVectorImpl<Instruction*> &toErase) { 1347 // Find the non-local dependencies of the load. 1348 SmallVector<NonLocalDepResult, 64> Deps; 1349 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1350 Deps); 1351 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1352 // << Deps.size() << *LI << '\n'); 1353 1354 // If we had to process more than one hundred blocks to find the 1355 // dependencies, this load isn't worth worrying about. Optimizing 1356 // it will be too expensive. 1357 if (Deps.size() > 100) 1358 return false; 1359 1360 // If we had a phi translation failure, we'll have a single entry which is a 1361 // clobber in the current block. Reject this early. 1362 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1363 DEBUG( 1364 dbgs() << "GVN: non-local load "; 1365 WriteAsOperand(dbgs(), LI); 1366 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1367 ); 1368 return false; 1369 } 1370 1371 // Filter out useless results (non-locals, etc). Keep track of the blocks 1372 // where we have a value available in repl, also keep track of whether we see 1373 // dependencies that produce an unknown value for the load (such as a call 1374 // that could potentially clobber the load). 1375 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1376 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1377 1378 const TargetData *TD = 0; 1379 1380 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1381 BasicBlock *DepBB = Deps[i].getBB(); 1382 MemDepResult DepInfo = Deps[i].getResult(); 1383 1384 if (DepInfo.isClobber()) { 1385 // The address being loaded in this non-local block may not be the same as 1386 // the pointer operand of the load if PHI translation occurs. Make sure 1387 // to consider the right address. 1388 Value *Address = Deps[i].getAddress(); 1389 1390 // If the dependence is to a store that writes to a superset of the bits 1391 // read by the load, we can extract the bits we need for the load from the 1392 // stored value. 1393 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1394 if (TD == 0) 1395 TD = getAnalysisIfAvailable<TargetData>(); 1396 if (TD && Address) { 1397 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1398 DepSI, *TD); 1399 if (Offset != -1) { 1400 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1401 DepSI->getOperand(0), 1402 Offset)); 1403 continue; 1404 } 1405 } 1406 } 1407 1408 // If the clobbering value is a memset/memcpy/memmove, see if we can 1409 // forward a value on from it. 1410 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1411 if (TD == 0) 1412 TD = getAnalysisIfAvailable<TargetData>(); 1413 if (TD && Address) { 1414 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1415 DepMI, *TD); 1416 if (Offset != -1) { 1417 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1418 Offset)); 1419 continue; 1420 } 1421 } 1422 } 1423 1424 UnavailableBlocks.push_back(DepBB); 1425 continue; 1426 } 1427 1428 Instruction *DepInst = DepInfo.getInst(); 1429 1430 // Loading the allocation -> undef. 1431 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1432 // Loading immediately after lifetime begin -> undef. 1433 isLifetimeStart(DepInst)) { 1434 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1435 UndefValue::get(LI->getType()))); 1436 continue; 1437 } 1438 1439 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1440 // Reject loads and stores that are to the same address but are of 1441 // different types if we have to. 1442 if (S->getOperand(0)->getType() != LI->getType()) { 1443 if (TD == 0) 1444 TD = getAnalysisIfAvailable<TargetData>(); 1445 1446 // If the stored value is larger or equal to the loaded value, we can 1447 // reuse it. 1448 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1449 LI->getType(), *TD)) { 1450 UnavailableBlocks.push_back(DepBB); 1451 continue; 1452 } 1453 } 1454 1455 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1456 S->getOperand(0))); 1457 continue; 1458 } 1459 1460 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1461 // If the types mismatch and we can't handle it, reject reuse of the load. 1462 if (LD->getType() != LI->getType()) { 1463 if (TD == 0) 1464 TD = getAnalysisIfAvailable<TargetData>(); 1465 1466 // If the stored value is larger or equal to the loaded value, we can 1467 // reuse it. 1468 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1469 UnavailableBlocks.push_back(DepBB); 1470 continue; 1471 } 1472 } 1473 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1474 continue; 1475 } 1476 1477 UnavailableBlocks.push_back(DepBB); 1478 continue; 1479 } 1480 1481 // If we have no predecessors that produce a known value for this load, exit 1482 // early. 1483 if (ValuesPerBlock.empty()) return false; 1484 1485 // If all of the instructions we depend on produce a known value for this 1486 // load, then it is fully redundant and we can use PHI insertion to compute 1487 // its value. Insert PHIs and remove the fully redundant value now. 1488 if (UnavailableBlocks.empty()) { 1489 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1490 1491 // Perform PHI construction. 1492 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1493 VN.getAliasAnalysis()); 1494 LI->replaceAllUsesWith(V); 1495 1496 if (isa<PHINode>(V)) 1497 V->takeName(LI); 1498 if (V->getType()->isPointerTy()) 1499 MD->invalidateCachedPointerInfo(V); 1500 VN.erase(LI); 1501 toErase.push_back(LI); 1502 ++NumGVNLoad; 1503 return true; 1504 } 1505 1506 if (!EnablePRE || !EnableLoadPRE) 1507 return false; 1508 1509 // Okay, we have *some* definitions of the value. This means that the value 1510 // is available in some of our (transitive) predecessors. Lets think about 1511 // doing PRE of this load. This will involve inserting a new load into the 1512 // predecessor when it's not available. We could do this in general, but 1513 // prefer to not increase code size. As such, we only do this when we know 1514 // that we only have to insert *one* load (which means we're basically moving 1515 // the load, not inserting a new one). 1516 1517 SmallPtrSet<BasicBlock *, 4> Blockers; 1518 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1519 Blockers.insert(UnavailableBlocks[i]); 1520 1521 // Lets find first basic block with more than one predecessor. Walk backwards 1522 // through predecessors if needed. 1523 BasicBlock *LoadBB = LI->getParent(); 1524 BasicBlock *TmpBB = LoadBB; 1525 1526 bool isSinglePred = false; 1527 bool allSingleSucc = true; 1528 while (TmpBB->getSinglePredecessor()) { 1529 isSinglePred = true; 1530 TmpBB = TmpBB->getSinglePredecessor(); 1531 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1532 return false; 1533 if (Blockers.count(TmpBB)) 1534 return false; 1535 1536 // If any of these blocks has more than one successor (i.e. if the edge we 1537 // just traversed was critical), then there are other paths through this 1538 // block along which the load may not be anticipated. Hoisting the load 1539 // above this block would be adding the load to execution paths along 1540 // which it was not previously executed. 1541 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1542 return false; 1543 } 1544 1545 assert(TmpBB); 1546 LoadBB = TmpBB; 1547 1548 // FIXME: It is extremely unclear what this loop is doing, other than 1549 // artificially restricting loadpre. 1550 if (isSinglePred) { 1551 bool isHot = false; 1552 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1553 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1554 if (AV.isSimpleValue()) 1555 // "Hot" Instruction is in some loop (because it dominates its dep. 1556 // instruction). 1557 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1558 if (DT->dominates(LI, I)) { 1559 isHot = true; 1560 break; 1561 } 1562 } 1563 1564 // We are interested only in "hot" instructions. We don't want to do any 1565 // mis-optimizations here. 1566 if (!isHot) 1567 return false; 1568 } 1569 1570 // Check to see how many predecessors have the loaded value fully 1571 // available. 1572 DenseMap<BasicBlock*, Value*> PredLoads; 1573 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1574 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1575 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1576 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1577 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1578 1579 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1580 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1581 PI != E; ++PI) { 1582 BasicBlock *Pred = *PI; 1583 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1584 continue; 1585 } 1586 PredLoads[Pred] = 0; 1587 1588 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1589 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1590 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1591 << Pred->getName() << "': " << *LI << '\n'); 1592 return false; 1593 } 1594 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1595 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1596 } 1597 } 1598 if (!NeedToSplit.empty()) { 1599 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1600 return false; 1601 } 1602 1603 // Decide whether PRE is profitable for this load. 1604 unsigned NumUnavailablePreds = PredLoads.size(); 1605 assert(NumUnavailablePreds != 0 && 1606 "Fully available value should be eliminated above!"); 1607 if (!EnableFullLoadPRE) { 1608 // If this load is unavailable in multiple predecessors, reject it. 1609 // FIXME: If we could restructure the CFG, we could make a common pred with 1610 // all the preds that don't have an available LI and insert a new load into 1611 // that one block. 1612 if (NumUnavailablePreds != 1) 1613 return false; 1614 } 1615 1616 // Check if the load can safely be moved to all the unavailable predecessors. 1617 bool CanDoPRE = true; 1618 SmallVector<Instruction*, 8> NewInsts; 1619 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1620 E = PredLoads.end(); I != E; ++I) { 1621 BasicBlock *UnavailablePred = I->first; 1622 1623 // Do PHI translation to get its value in the predecessor if necessary. The 1624 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1625 1626 // If all preds have a single successor, then we know it is safe to insert 1627 // the load on the pred (?!?), so we can insert code to materialize the 1628 // pointer if it is not available. 1629 PHITransAddr Address(LI->getOperand(0), TD); 1630 Value *LoadPtr = 0; 1631 if (allSingleSucc) { 1632 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1633 *DT, NewInsts); 1634 } else { 1635 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1636 LoadPtr = Address.getAddr(); 1637 } 1638 1639 // If we couldn't find or insert a computation of this phi translated value, 1640 // we fail PRE. 1641 if (LoadPtr == 0) { 1642 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1643 << *LI->getOperand(0) << "\n"); 1644 CanDoPRE = false; 1645 break; 1646 } 1647 1648 // Make sure it is valid to move this load here. We have to watch out for: 1649 // @1 = getelementptr (i8* p, ... 1650 // test p and branch if == 0 1651 // load @1 1652 // It is valid to have the getelementptr before the test, even if p can be 0, 1653 // as getelementptr only does address arithmetic. 1654 // If we are not pushing the value through any multiple-successor blocks 1655 // we do not have this case. Otherwise, check that the load is safe to 1656 // put anywhere; this can be improved, but should be conservatively safe. 1657 if (!allSingleSucc && 1658 // FIXME: REEVALUTE THIS. 1659 !isSafeToLoadUnconditionally(LoadPtr, 1660 UnavailablePred->getTerminator(), 1661 LI->getAlignment(), TD)) { 1662 CanDoPRE = false; 1663 break; 1664 } 1665 1666 I->second = LoadPtr; 1667 } 1668 1669 if (!CanDoPRE) { 1670 while (!NewInsts.empty()) 1671 NewInsts.pop_back_val()->eraseFromParent(); 1672 return false; 1673 } 1674 1675 // Okay, we can eliminate this load by inserting a reload in the predecessor 1676 // and using PHI construction to get the value in the other predecessors, do 1677 // it. 1678 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1679 DEBUG(if (!NewInsts.empty()) 1680 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1681 << *NewInsts.back() << '\n'); 1682 1683 // Assign value numbers to the new instructions. 1684 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1685 // FIXME: We really _ought_ to insert these value numbers into their 1686 // parent's availability map. However, in doing so, we risk getting into 1687 // ordering issues. If a block hasn't been processed yet, we would be 1688 // marking a value as AVAIL-IN, which isn't what we intend. 1689 VN.lookup_or_add(NewInsts[i]); 1690 } 1691 1692 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1693 E = PredLoads.end(); I != E; ++I) { 1694 BasicBlock *UnavailablePred = I->first; 1695 Value *LoadPtr = I->second; 1696 1697 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1698 LI->getAlignment(), 1699 UnavailablePred->getTerminator()); 1700 1701 // Add the newly created load. 1702 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1703 NewLoad)); 1704 MD->invalidateCachedPointerInfo(LoadPtr); 1705 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1706 } 1707 1708 // Perform PHI construction. 1709 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1710 VN.getAliasAnalysis()); 1711 LI->replaceAllUsesWith(V); 1712 if (isa<PHINode>(V)) 1713 V->takeName(LI); 1714 if (V->getType()->isPointerTy()) 1715 MD->invalidateCachedPointerInfo(V); 1716 VN.erase(LI); 1717 toErase.push_back(LI); 1718 ++NumPRELoad; 1719 return true; 1720} 1721 1722/// processLoad - Attempt to eliminate a load, first by eliminating it 1723/// locally, and then attempting non-local elimination if that fails. 1724bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1725 if (!MD) 1726 return false; 1727 1728 if (L->isVolatile()) 1729 return false; 1730 1731 // ... to a pointer that has been loaded from before... 1732 MemDepResult Dep = MD->getDependency(L); 1733 1734 // If the value isn't available, don't do anything! 1735 if (Dep.isClobber()) { 1736 // Check to see if we have something like this: 1737 // store i32 123, i32* %P 1738 // %A = bitcast i32* %P to i8* 1739 // %B = gep i8* %A, i32 1 1740 // %C = load i8* %B 1741 // 1742 // We could do that by recognizing if the clobber instructions are obviously 1743 // a common base + constant offset, and if the previous store (or memset) 1744 // completely covers this load. This sort of thing can happen in bitfield 1745 // access code. 1746 Value *AvailVal = 0; 1747 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1748 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1749 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1750 L->getPointerOperand(), 1751 DepSI, *TD); 1752 if (Offset != -1) 1753 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1754 L->getType(), L, *TD); 1755 } 1756 1757 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1758 // a value on from it. 1759 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1760 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1761 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1762 L->getPointerOperand(), 1763 DepMI, *TD); 1764 if (Offset != -1) 1765 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1766 } 1767 } 1768 1769 if (AvailVal) { 1770 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1771 << *AvailVal << '\n' << *L << "\n\n\n"); 1772 1773 // Replace the load! 1774 L->replaceAllUsesWith(AvailVal); 1775 if (AvailVal->getType()->isPointerTy()) 1776 MD->invalidateCachedPointerInfo(AvailVal); 1777 VN.erase(L); 1778 toErase.push_back(L); 1779 ++NumGVNLoad; 1780 return true; 1781 } 1782 1783 DEBUG( 1784 // fast print dep, using operator<< on instruction would be too slow 1785 dbgs() << "GVN: load "; 1786 WriteAsOperand(dbgs(), L); 1787 Instruction *I = Dep.getInst(); 1788 dbgs() << " is clobbered by " << *I << '\n'; 1789 ); 1790 return false; 1791 } 1792 1793 // If it is defined in another block, try harder. 1794 if (Dep.isNonLocal()) 1795 return processNonLocalLoad(L, toErase); 1796 1797 Instruction *DepInst = Dep.getInst(); 1798 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1799 Value *StoredVal = DepSI->getOperand(0); 1800 1801 // The store and load are to a must-aliased pointer, but they may not 1802 // actually have the same type. See if we know how to reuse the stored 1803 // value (depending on its type). 1804 const TargetData *TD = 0; 1805 if (StoredVal->getType() != L->getType()) { 1806 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1807 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1808 L, *TD); 1809 if (StoredVal == 0) 1810 return false; 1811 1812 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1813 << '\n' << *L << "\n\n\n"); 1814 } 1815 else 1816 return false; 1817 } 1818 1819 // Remove it! 1820 L->replaceAllUsesWith(StoredVal); 1821 if (StoredVal->getType()->isPointerTy()) 1822 MD->invalidateCachedPointerInfo(StoredVal); 1823 VN.erase(L); 1824 toErase.push_back(L); 1825 ++NumGVNLoad; 1826 return true; 1827 } 1828 1829 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1830 Value *AvailableVal = DepLI; 1831 1832 // The loads are of a must-aliased pointer, but they may not actually have 1833 // the same type. See if we know how to reuse the previously loaded value 1834 // (depending on its type). 1835 const TargetData *TD = 0; 1836 if (DepLI->getType() != L->getType()) { 1837 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1838 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1839 if (AvailableVal == 0) 1840 return false; 1841 1842 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1843 << "\n" << *L << "\n\n\n"); 1844 } 1845 else 1846 return false; 1847 } 1848 1849 // Remove it! 1850 L->replaceAllUsesWith(AvailableVal); 1851 if (DepLI->getType()->isPointerTy()) 1852 MD->invalidateCachedPointerInfo(DepLI); 1853 VN.erase(L); 1854 toErase.push_back(L); 1855 ++NumGVNLoad; 1856 return true; 1857 } 1858 1859 // If this load really doesn't depend on anything, then we must be loading an 1860 // undef value. This can happen when loading for a fresh allocation with no 1861 // intervening stores, for example. 1862 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1863 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1864 VN.erase(L); 1865 toErase.push_back(L); 1866 ++NumGVNLoad; 1867 return true; 1868 } 1869 1870 // If this load occurs either right after a lifetime begin, 1871 // then the loaded value is undefined. 1872 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1873 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1874 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1875 VN.erase(L); 1876 toErase.push_back(L); 1877 ++NumGVNLoad; 1878 return true; 1879 } 1880 } 1881 1882 return false; 1883} 1884 1885Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1886 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1887 if (I == localAvail.end()) 1888 return 0; 1889 1890 ValueNumberScope *Locals = I->second; 1891 while (Locals) { 1892 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1893 if (I != Locals->table.end()) 1894 return I->second; 1895 Locals = Locals->parent; 1896 } 1897 1898 return 0; 1899} 1900 1901 1902/// processInstruction - When calculating availability, handle an instruction 1903/// by inserting it into the appropriate sets 1904bool GVN::processInstruction(Instruction *I, 1905 SmallVectorImpl<Instruction*> &toErase) { 1906 // Ignore dbg info intrinsics. 1907 if (isa<DbgInfoIntrinsic>(I)) 1908 return false; 1909 1910 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1911 bool Changed = processLoad(LI, toErase); 1912 1913 if (!Changed) { 1914 unsigned Num = VN.lookup_or_add(LI); 1915 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1916 } 1917 1918 return Changed; 1919 } 1920 1921 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1922 unsigned Num = VN.lookup_or_add(I); 1923 1924 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1925 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1926 1927 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1928 return false; 1929 1930 Value *BranchCond = BI->getCondition(); 1931 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1932 1933 BasicBlock *TrueSucc = BI->getSuccessor(0); 1934 BasicBlock *FalseSucc = BI->getSuccessor(1); 1935 1936 if (TrueSucc->getSinglePredecessor()) 1937 localAvail[TrueSucc]->table[CondVN] = 1938 ConstantInt::getTrue(TrueSucc->getContext()); 1939 if (FalseSucc->getSinglePredecessor()) 1940 localAvail[FalseSucc]->table[CondVN] = 1941 ConstantInt::getFalse(TrueSucc->getContext()); 1942 1943 return false; 1944 1945 // Allocations are always uniquely numbered, so we can save time and memory 1946 // by fast failing them. 1947 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1948 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1949 return false; 1950 } 1951 1952 // Collapse PHI nodes 1953 if (PHINode* p = dyn_cast<PHINode>(I)) { 1954 Value *constVal = CollapsePhi(p); 1955 1956 if (constVal) { 1957 p->replaceAllUsesWith(constVal); 1958 if (MD && constVal->getType()->isPointerTy()) 1959 MD->invalidateCachedPointerInfo(constVal); 1960 VN.erase(p); 1961 1962 toErase.push_back(p); 1963 } else { 1964 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1965 } 1966 1967 // If the number we were assigned was a brand new VN, then we don't 1968 // need to do a lookup to see if the number already exists 1969 // somewhere in the domtree: it can't! 1970 } else if (Num == NextNum) { 1971 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1972 1973 // Perform fast-path value-number based elimination of values inherited from 1974 // dominators. 1975 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1976 // Remove it! 1977 VN.erase(I); 1978 I->replaceAllUsesWith(repl); 1979 if (MD && repl->getType()->isPointerTy()) 1980 MD->invalidateCachedPointerInfo(repl); 1981 toErase.push_back(I); 1982 return true; 1983 1984 } else { 1985 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1986 } 1987 1988 return false; 1989} 1990 1991/// runOnFunction - This is the main transformation entry point for a function. 1992bool GVN::runOnFunction(Function& F) { 1993 if (!NoLoads) 1994 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1995 DT = &getAnalysis<DominatorTree>(); 1996 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1997 VN.setMemDep(MD); 1998 VN.setDomTree(DT); 1999 2000 bool Changed = false; 2001 bool ShouldContinue = true; 2002 2003 // Merge unconditional branches, allowing PRE to catch more 2004 // optimization opportunities. 2005 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2006 BasicBlock *BB = FI; 2007 ++FI; 2008 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2009 if (removedBlock) ++NumGVNBlocks; 2010 2011 Changed |= removedBlock; 2012 } 2013 2014 unsigned Iteration = 0; 2015 2016 while (ShouldContinue) { 2017 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2018 ShouldContinue = iterateOnFunction(F); 2019 if (splitCriticalEdges()) 2020 ShouldContinue = true; 2021 Changed |= ShouldContinue; 2022 ++Iteration; 2023 } 2024 2025 if (EnablePRE) { 2026 bool PREChanged = true; 2027 while (PREChanged) { 2028 PREChanged = performPRE(F); 2029 Changed |= PREChanged; 2030 } 2031 } 2032 // FIXME: Should perform GVN again after PRE does something. PRE can move 2033 // computations into blocks where they become fully redundant. Note that 2034 // we can't do this until PRE's critical edge splitting updates memdep. 2035 // Actually, when this happens, we should just fully integrate PRE into GVN. 2036 2037 cleanupGlobalSets(); 2038 2039 return Changed; 2040} 2041 2042 2043bool GVN::processBlock(BasicBlock *BB) { 2044 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2045 // incrementing BI before processing an instruction). 2046 SmallVector<Instruction*, 8> toErase; 2047 bool ChangedFunction = false; 2048 2049 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2050 BI != BE;) { 2051 ChangedFunction |= processInstruction(BI, toErase); 2052 if (toErase.empty()) { 2053 ++BI; 2054 continue; 2055 } 2056 2057 // If we need some instructions deleted, do it now. 2058 NumGVNInstr += toErase.size(); 2059 2060 // Avoid iterator invalidation. 2061 bool AtStart = BI == BB->begin(); 2062 if (!AtStart) 2063 --BI; 2064 2065 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2066 E = toErase.end(); I != E; ++I) { 2067 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2068 if (MD) MD->removeInstruction(*I); 2069 (*I)->eraseFromParent(); 2070 DEBUG(verifyRemoved(*I)); 2071 } 2072 toErase.clear(); 2073 2074 if (AtStart) 2075 BI = BB->begin(); 2076 else 2077 ++BI; 2078 } 2079 2080 return ChangedFunction; 2081} 2082 2083/// performPRE - Perform a purely local form of PRE that looks for diamond 2084/// control flow patterns and attempts to perform simple PRE at the join point. 2085bool GVN::performPRE(Function &F) { 2086 bool Changed = false; 2087 DenseMap<BasicBlock*, Value*> predMap; 2088 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2089 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2090 BasicBlock *CurrentBlock = *DI; 2091 2092 // Nothing to PRE in the entry block. 2093 if (CurrentBlock == &F.getEntryBlock()) continue; 2094 2095 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2096 BE = CurrentBlock->end(); BI != BE; ) { 2097 Instruction *CurInst = BI++; 2098 2099 if (isa<AllocaInst>(CurInst) || 2100 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2101 CurInst->getType()->isVoidTy() || 2102 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2103 isa<DbgInfoIntrinsic>(CurInst)) 2104 continue; 2105 2106 // We don't currently value number ANY inline asm calls. 2107 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2108 if (CallI->isInlineAsm()) 2109 continue; 2110 2111 uint32_t ValNo = VN.lookup(CurInst); 2112 2113 // Look for the predecessors for PRE opportunities. We're 2114 // only trying to solve the basic diamond case, where 2115 // a value is computed in the successor and one predecessor, 2116 // but not the other. We also explicitly disallow cases 2117 // where the successor is its own predecessor, because they're 2118 // more complicated to get right. 2119 unsigned NumWith = 0; 2120 unsigned NumWithout = 0; 2121 BasicBlock *PREPred = 0; 2122 predMap.clear(); 2123 2124 for (pred_iterator PI = pred_begin(CurrentBlock), 2125 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2126 BasicBlock *P = *PI; 2127 // We're not interested in PRE where the block is its 2128 // own predecessor, or in blocks with predecessors 2129 // that are not reachable. 2130 if (P == CurrentBlock) { 2131 NumWithout = 2; 2132 break; 2133 } else if (!localAvail.count(P)) { 2134 NumWithout = 2; 2135 break; 2136 } 2137 2138 DenseMap<uint32_t, Value*>::iterator predV = 2139 localAvail[P]->table.find(ValNo); 2140 if (predV == localAvail[P]->table.end()) { 2141 PREPred = P; 2142 ++NumWithout; 2143 } else if (predV->second == CurInst) { 2144 NumWithout = 2; 2145 } else { 2146 predMap[P] = predV->second; 2147 ++NumWith; 2148 } 2149 } 2150 2151 // Don't do PRE when it might increase code size, i.e. when 2152 // we would need to insert instructions in more than one pred. 2153 if (NumWithout != 1 || NumWith == 0) 2154 continue; 2155 2156 // Don't do PRE across indirect branch. 2157 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2158 continue; 2159 2160 // We can't do PRE safely on a critical edge, so instead we schedule 2161 // the edge to be split and perform the PRE the next time we iterate 2162 // on the function. 2163 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2164 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2165 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2166 continue; 2167 } 2168 2169 // Instantiate the expression in the predecessor that lacked it. 2170 // Because we are going top-down through the block, all value numbers 2171 // will be available in the predecessor by the time we need them. Any 2172 // that weren't originally present will have been instantiated earlier 2173 // in this loop. 2174 Instruction *PREInstr = CurInst->clone(); 2175 bool success = true; 2176 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2177 Value *Op = PREInstr->getOperand(i); 2178 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2179 continue; 2180 2181 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2182 PREInstr->setOperand(i, V); 2183 } else { 2184 success = false; 2185 break; 2186 } 2187 } 2188 2189 // Fail out if we encounter an operand that is not available in 2190 // the PRE predecessor. This is typically because of loads which 2191 // are not value numbered precisely. 2192 if (!success) { 2193 delete PREInstr; 2194 DEBUG(verifyRemoved(PREInstr)); 2195 continue; 2196 } 2197 2198 PREInstr->insertBefore(PREPred->getTerminator()); 2199 PREInstr->setName(CurInst->getName() + ".pre"); 2200 predMap[PREPred] = PREInstr; 2201 VN.add(PREInstr, ValNo); 2202 ++NumGVNPRE; 2203 2204 // Update the availability map to include the new instruction. 2205 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2206 2207 // Create a PHI to make the value available in this block. 2208 PHINode* Phi = PHINode::Create(CurInst->getType(), 2209 CurInst->getName() + ".pre-phi", 2210 CurrentBlock->begin()); 2211 for (pred_iterator PI = pred_begin(CurrentBlock), 2212 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2213 BasicBlock *P = *PI; 2214 Phi->addIncoming(predMap[P], P); 2215 } 2216 2217 VN.add(Phi, ValNo); 2218 localAvail[CurrentBlock]->table[ValNo] = Phi; 2219 2220 CurInst->replaceAllUsesWith(Phi); 2221 if (MD && Phi->getType()->isPointerTy()) 2222 MD->invalidateCachedPointerInfo(Phi); 2223 VN.erase(CurInst); 2224 2225 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2226 if (MD) MD->removeInstruction(CurInst); 2227 CurInst->eraseFromParent(); 2228 DEBUG(verifyRemoved(CurInst)); 2229 Changed = true; 2230 } 2231 } 2232 2233 if (splitCriticalEdges()) 2234 Changed = true; 2235 2236 return Changed; 2237} 2238 2239/// splitCriticalEdges - Split critical edges found during the previous 2240/// iteration that may enable further optimization. 2241bool GVN::splitCriticalEdges() { 2242 if (toSplit.empty()) 2243 return false; 2244 do { 2245 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2246 SplitCriticalEdge(Edge.first, Edge.second, this); 2247 } while (!toSplit.empty()); 2248 if (MD) MD->invalidateCachedPredecessors(); 2249 return true; 2250} 2251 2252/// iterateOnFunction - Executes one iteration of GVN 2253bool GVN::iterateOnFunction(Function &F) { 2254 cleanupGlobalSets(); 2255 2256 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2257 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2258 if (DI->getIDom()) 2259 localAvail[DI->getBlock()] = 2260 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2261 else 2262 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2263 } 2264 2265 // Top-down walk of the dominator tree 2266 bool Changed = false; 2267#if 0 2268 // Needed for value numbering with phi construction to work. 2269 ReversePostOrderTraversal<Function*> RPOT(&F); 2270 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2271 RE = RPOT.end(); RI != RE; ++RI) 2272 Changed |= processBlock(*RI); 2273#else 2274 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2275 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2276 Changed |= processBlock(DI->getBlock()); 2277#endif 2278 2279 return Changed; 2280} 2281 2282void GVN::cleanupGlobalSets() { 2283 VN.clear(); 2284 2285 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2286 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2287 delete I->second; 2288 localAvail.clear(); 2289} 2290 2291/// verifyRemoved - Verify that the specified instruction does not occur in our 2292/// internal data structures. 2293void GVN::verifyRemoved(const Instruction *Inst) const { 2294 VN.verifyRemoved(Inst); 2295 2296 // Walk through the value number scope to make sure the instruction isn't 2297 // ferreted away in it. 2298 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2299 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2300 const ValueNumberScope *VNS = I->second; 2301 2302 while (VNS) { 2303 for (DenseMap<uint32_t, Value*>::const_iterator 2304 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2305 assert(II->second != Inst && "Inst still in value numbering scope!"); 2306 } 2307 2308 VNS = VNS->parent; 2309 } 2310 } 2311} 2312