GVN.cpp revision ce665bd2e2b581ab0858d1afe359192bac96b868
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Operator.h" 28#include "llvm/Value.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/DepthFirstIterator.h" 31#include "llvm/ADT/PostOrderIterator.h" 32#include "llvm/ADT/SmallPtrSet.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/ConstantFolding.h" 37#include "llvm/Analysis/Dominators.h" 38#include "llvm/Analysis/Loads.h" 39#include "llvm/Analysis/MemoryBuiltins.h" 40#include "llvm/Analysis/MemoryDependenceAnalysis.h" 41#include "llvm/Analysis/PHITransAddr.h" 42#include "llvm/Support/CFG.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Support/GetElementPtrTypeIterator.h" 47#include "llvm/Support/IRBuilder.h" 48#include "llvm/Support/raw_ostream.h" 49#include "llvm/Target/TargetData.h" 50#include "llvm/Transforms/Utils/BasicBlockUtils.h" 51#include "llvm/Transforms/Utils/Local.h" 52#include "llvm/Transforms/Utils/SSAUpdater.h" 53using namespace llvm; 54 55STATISTIC(NumGVNInstr, "Number of instructions deleted"); 56STATISTIC(NumGVNLoad, "Number of loads deleted"); 57STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 58STATISTIC(NumGVNBlocks, "Number of blocks merged"); 59STATISTIC(NumPRELoad, "Number of loads PRE'd"); 60 61static cl::opt<bool> EnablePRE("enable-pre", 62 cl::init(true), cl::Hidden); 63static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 64 65//===----------------------------------------------------------------------===// 66// ValueTable Class 67//===----------------------------------------------------------------------===// 68 69/// This class holds the mapping between values and value numbers. It is used 70/// as an efficient mechanism to determine the expression-wise equivalence of 71/// two values. 72namespace { 73 struct Expression { 74 enum ExpressionOpcode { 75 ADD = Instruction::Add, 76 FADD = Instruction::FAdd, 77 SUB = Instruction::Sub, 78 FSUB = Instruction::FSub, 79 MUL = Instruction::Mul, 80 FMUL = Instruction::FMul, 81 UDIV = Instruction::UDiv, 82 SDIV = Instruction::SDiv, 83 FDIV = Instruction::FDiv, 84 UREM = Instruction::URem, 85 SREM = Instruction::SRem, 86 FREM = Instruction::FRem, 87 SHL = Instruction::Shl, 88 LSHR = Instruction::LShr, 89 ASHR = Instruction::AShr, 90 AND = Instruction::And, 91 OR = Instruction::Or, 92 XOR = Instruction::Xor, 93 TRUNC = Instruction::Trunc, 94 ZEXT = Instruction::ZExt, 95 SEXT = Instruction::SExt, 96 FPTOUI = Instruction::FPToUI, 97 FPTOSI = Instruction::FPToSI, 98 UITOFP = Instruction::UIToFP, 99 SITOFP = Instruction::SIToFP, 100 FPTRUNC = Instruction::FPTrunc, 101 FPEXT = Instruction::FPExt, 102 PTRTOINT = Instruction::PtrToInt, 103 INTTOPTR = Instruction::IntToPtr, 104 BITCAST = Instruction::BitCast, 105 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 106 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 107 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 108 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 109 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 110 SHUFFLE, SELECT, GEP, CALL, CONSTANT, 111 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 112 113 ExpressionOpcode opcode; 114 const Type* type; 115 SmallVector<uint32_t, 4> varargs; 116 Value *function; 117 118 Expression() { } 119 Expression(ExpressionOpcode o) : opcode(o) { } 120 121 bool operator==(const Expression &other) const { 122 if (opcode != other.opcode) 123 return false; 124 else if (opcode == EMPTY || opcode == TOMBSTONE) 125 return true; 126 else if (type != other.type) 127 return false; 128 else if (function != other.function) 129 return false; 130 else { 131 if (varargs.size() != other.varargs.size()) 132 return false; 133 134 for (size_t i = 0; i < varargs.size(); ++i) 135 if (varargs[i] != other.varargs[i]) 136 return false; 137 138 return true; 139 } 140 } 141 142 /*bool operator!=(const Expression &other) const { 143 return !(*this == other); 144 }*/ 145 }; 146 147 class ValueTable { 148 private: 149 DenseMap<Value*, uint32_t> valueNumbering; 150 DenseMap<Expression, uint32_t> expressionNumbering; 151 AliasAnalysis* AA; 152 MemoryDependenceAnalysis* MD; 153 DominatorTree* DT; 154 155 uint32_t nextValueNumber; 156 157 Expression::ExpressionOpcode getOpcode(CmpInst* C); 158 Expression create_expression(BinaryOperator* BO); 159 Expression create_expression(CmpInst* C); 160 Expression create_expression(ShuffleVectorInst* V); 161 Expression create_expression(ExtractElementInst* C); 162 Expression create_expression(InsertElementInst* V); 163 Expression create_expression(SelectInst* V); 164 Expression create_expression(CastInst* C); 165 Expression create_expression(GetElementPtrInst* G); 166 Expression create_expression(CallInst* C); 167 Expression create_expression(ExtractValueInst* C); 168 Expression create_expression(InsertValueInst* C); 169 170 uint32_t lookup_or_add_call(CallInst* C); 171 public: 172 ValueTable() : nextValueNumber(1) { } 173 uint32_t lookup_or_add(Value *V); 174 uint32_t lookup(Value *V) const; 175 void add(Value *V, uint32_t num); 176 void clear(); 177 void erase(Value *v); 178 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 179 AliasAnalysis *getAliasAnalysis() const { return AA; } 180 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 181 void setDomTree(DominatorTree* D) { DT = D; } 182 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 183 void verifyRemoved(const Value *) const; 184 }; 185} 186 187namespace llvm { 188template <> struct DenseMapInfo<Expression> { 189 static inline Expression getEmptyKey() { 190 return Expression(Expression::EMPTY); 191 } 192 193 static inline Expression getTombstoneKey() { 194 return Expression(Expression::TOMBSTONE); 195 } 196 197 static unsigned getHashValue(const Expression e) { 198 unsigned hash = e.opcode; 199 200 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 201 (unsigned)((uintptr_t)e.type >> 9)); 202 203 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 204 E = e.varargs.end(); I != E; ++I) 205 hash = *I + hash * 37; 206 207 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 208 (unsigned)((uintptr_t)e.function >> 9)) + 209 hash * 37; 210 211 return hash; 212 } 213 static bool isEqual(const Expression &LHS, const Expression &RHS) { 214 return LHS == RHS; 215 } 216}; 217 218template <> 219struct isPodLike<Expression> { static const bool value = true; }; 220 221} 222 223//===----------------------------------------------------------------------===// 224// ValueTable Internal Functions 225//===----------------------------------------------------------------------===// 226 227Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 228 if (isa<ICmpInst>(C)) { 229 switch (C->getPredicate()) { 230 default: // THIS SHOULD NEVER HAPPEN 231 llvm_unreachable("Comparison with unknown predicate?"); 232 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 233 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 234 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 235 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 236 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 237 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 238 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 239 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 240 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 241 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 242 } 243 } else { 244 switch (C->getPredicate()) { 245 default: // THIS SHOULD NEVER HAPPEN 246 llvm_unreachable("Comparison with unknown predicate?"); 247 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 248 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 249 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 250 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 251 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 252 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 253 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 254 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 255 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 256 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 257 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 258 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 259 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 260 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 261 } 262 } 263} 264 265Expression ValueTable::create_expression(CallInst* C) { 266 Expression e; 267 268 e.type = C->getType(); 269 e.function = C->getCalledFunction(); 270 e.opcode = Expression::CALL; 271 272 CallSite CS(C); 273 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end(); 274 I != E; ++I) 275 e.varargs.push_back(lookup_or_add(*I)); 276 277 return e; 278} 279 280Expression ValueTable::create_expression(BinaryOperator* BO) { 281 Expression e; 282 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 283 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 284 e.function = 0; 285 e.type = BO->getType(); 286 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode()); 287 288 return e; 289} 290 291Expression ValueTable::create_expression(CmpInst* C) { 292 Expression e; 293 294 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 295 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 296 e.function = 0; 297 e.type = C->getType(); 298 e.opcode = getOpcode(C); 299 300 return e; 301} 302 303Expression ValueTable::create_expression(CastInst* C) { 304 Expression e; 305 306 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 307 e.function = 0; 308 e.type = C->getType(); 309 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode()); 310 311 return e; 312} 313 314Expression ValueTable::create_expression(ShuffleVectorInst* S) { 315 Expression e; 316 317 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 318 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 319 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 320 e.function = 0; 321 e.type = S->getType(); 322 e.opcode = Expression::SHUFFLE; 323 324 return e; 325} 326 327Expression ValueTable::create_expression(ExtractElementInst* E) { 328 Expression e; 329 330 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 331 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 332 e.function = 0; 333 e.type = E->getType(); 334 e.opcode = Expression::EXTRACT; 335 336 return e; 337} 338 339Expression ValueTable::create_expression(InsertElementInst* I) { 340 Expression e; 341 342 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 343 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 344 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 345 e.function = 0; 346 e.type = I->getType(); 347 e.opcode = Expression::INSERT; 348 349 return e; 350} 351 352Expression ValueTable::create_expression(SelectInst* I) { 353 Expression e; 354 355 e.varargs.push_back(lookup_or_add(I->getCondition())); 356 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 357 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 358 e.function = 0; 359 e.type = I->getType(); 360 e.opcode = Expression::SELECT; 361 362 return e; 363} 364 365Expression ValueTable::create_expression(GetElementPtrInst* G) { 366 Expression e; 367 368 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 369 e.function = 0; 370 e.type = G->getType(); 371 e.opcode = Expression::GEP; 372 373 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 374 I != E; ++I) 375 e.varargs.push_back(lookup_or_add(*I)); 376 377 return e; 378} 379 380Expression ValueTable::create_expression(ExtractValueInst* E) { 381 Expression e; 382 383 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 384 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 385 II != IE; ++II) 386 e.varargs.push_back(*II); 387 e.function = 0; 388 e.type = E->getType(); 389 e.opcode = Expression::EXTRACTVALUE; 390 391 return e; 392} 393 394Expression ValueTable::create_expression(InsertValueInst* E) { 395 Expression e; 396 397 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 398 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 399 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 400 II != IE; ++II) 401 e.varargs.push_back(*II); 402 e.function = 0; 403 e.type = E->getType(); 404 e.opcode = Expression::INSERTVALUE; 405 406 return e; 407} 408 409//===----------------------------------------------------------------------===// 410// ValueTable External Functions 411//===----------------------------------------------------------------------===// 412 413/// add - Insert a value into the table with a specified value number. 414void ValueTable::add(Value *V, uint32_t num) { 415 valueNumbering.insert(std::make_pair(V, num)); 416} 417 418uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 419 if (AA->doesNotAccessMemory(C)) { 420 Expression exp = create_expression(C); 421 uint32_t& e = expressionNumbering[exp]; 422 if (!e) e = nextValueNumber++; 423 valueNumbering[C] = e; 424 return e; 425 } else if (AA->onlyReadsMemory(C)) { 426 Expression exp = create_expression(C); 427 uint32_t& e = expressionNumbering[exp]; 428 if (!e) { 429 e = nextValueNumber++; 430 valueNumbering[C] = e; 431 return e; 432 } 433 if (!MD) { 434 e = nextValueNumber++; 435 valueNumbering[C] = e; 436 return e; 437 } 438 439 MemDepResult local_dep = MD->getDependency(C); 440 441 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 442 valueNumbering[C] = nextValueNumber; 443 return nextValueNumber++; 444 } 445 446 if (local_dep.isDef()) { 447 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 448 449 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 450 valueNumbering[C] = nextValueNumber; 451 return nextValueNumber++; 452 } 453 454 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 455 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 456 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 457 if (c_vn != cd_vn) { 458 valueNumbering[C] = nextValueNumber; 459 return nextValueNumber++; 460 } 461 } 462 463 uint32_t v = lookup_or_add(local_cdep); 464 valueNumbering[C] = v; 465 return v; 466 } 467 468 // Non-local case. 469 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 470 MD->getNonLocalCallDependency(CallSite(C)); 471 // FIXME: call/call dependencies for readonly calls should return def, not 472 // clobber! Move the checking logic to MemDep! 473 CallInst* cdep = 0; 474 475 // Check to see if we have a single dominating call instruction that is 476 // identical to C. 477 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 478 const NonLocalDepEntry *I = &deps[i]; 479 // Ignore non-local dependencies. 480 if (I->getResult().isNonLocal()) 481 continue; 482 483 // We don't handle non-depedencies. If we already have a call, reject 484 // instruction dependencies. 485 if (I->getResult().isClobber() || cdep != 0) { 486 cdep = 0; 487 break; 488 } 489 490 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 491 // FIXME: All duplicated with non-local case. 492 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 493 cdep = NonLocalDepCall; 494 continue; 495 } 496 497 cdep = 0; 498 break; 499 } 500 501 if (!cdep) { 502 valueNumbering[C] = nextValueNumber; 503 return nextValueNumber++; 504 } 505 506 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 507 valueNumbering[C] = nextValueNumber; 508 return nextValueNumber++; 509 } 510 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 511 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 512 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 513 if (c_vn != cd_vn) { 514 valueNumbering[C] = nextValueNumber; 515 return nextValueNumber++; 516 } 517 } 518 519 uint32_t v = lookup_or_add(cdep); 520 valueNumbering[C] = v; 521 return v; 522 523 } else { 524 valueNumbering[C] = nextValueNumber; 525 return nextValueNumber++; 526 } 527} 528 529/// lookup_or_add - Returns the value number for the specified value, assigning 530/// it a new number if it did not have one before. 531uint32_t ValueTable::lookup_or_add(Value *V) { 532 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 533 if (VI != valueNumbering.end()) 534 return VI->second; 535 536 if (!isa<Instruction>(V)) { 537 valueNumbering[V] = nextValueNumber; 538 return nextValueNumber++; 539 } 540 541 Instruction* I = cast<Instruction>(V); 542 Expression exp; 543 switch (I->getOpcode()) { 544 case Instruction::Call: 545 return lookup_or_add_call(cast<CallInst>(I)); 546 case Instruction::Add: 547 case Instruction::FAdd: 548 case Instruction::Sub: 549 case Instruction::FSub: 550 case Instruction::Mul: 551 case Instruction::FMul: 552 case Instruction::UDiv: 553 case Instruction::SDiv: 554 case Instruction::FDiv: 555 case Instruction::URem: 556 case Instruction::SRem: 557 case Instruction::FRem: 558 case Instruction::Shl: 559 case Instruction::LShr: 560 case Instruction::AShr: 561 case Instruction::And: 562 case Instruction::Or : 563 case Instruction::Xor: 564 exp = create_expression(cast<BinaryOperator>(I)); 565 break; 566 case Instruction::ICmp: 567 case Instruction::FCmp: 568 exp = create_expression(cast<CmpInst>(I)); 569 break; 570 case Instruction::Trunc: 571 case Instruction::ZExt: 572 case Instruction::SExt: 573 case Instruction::FPToUI: 574 case Instruction::FPToSI: 575 case Instruction::UIToFP: 576 case Instruction::SIToFP: 577 case Instruction::FPTrunc: 578 case Instruction::FPExt: 579 case Instruction::PtrToInt: 580 case Instruction::IntToPtr: 581 case Instruction::BitCast: 582 exp = create_expression(cast<CastInst>(I)); 583 break; 584 case Instruction::Select: 585 exp = create_expression(cast<SelectInst>(I)); 586 break; 587 case Instruction::ExtractElement: 588 exp = create_expression(cast<ExtractElementInst>(I)); 589 break; 590 case Instruction::InsertElement: 591 exp = create_expression(cast<InsertElementInst>(I)); 592 break; 593 case Instruction::ShuffleVector: 594 exp = create_expression(cast<ShuffleVectorInst>(I)); 595 break; 596 case Instruction::ExtractValue: 597 exp = create_expression(cast<ExtractValueInst>(I)); 598 break; 599 case Instruction::InsertValue: 600 exp = create_expression(cast<InsertValueInst>(I)); 601 break; 602 case Instruction::GetElementPtr: 603 exp = create_expression(cast<GetElementPtrInst>(I)); 604 break; 605 default: 606 valueNumbering[V] = nextValueNumber; 607 return nextValueNumber++; 608 } 609 610 uint32_t& e = expressionNumbering[exp]; 611 if (!e) e = nextValueNumber++; 612 valueNumbering[V] = e; 613 return e; 614} 615 616/// lookup - Returns the value number of the specified value. Fails if 617/// the value has not yet been numbered. 618uint32_t ValueTable::lookup(Value *V) const { 619 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 620 assert(VI != valueNumbering.end() && "Value not numbered?"); 621 return VI->second; 622} 623 624/// clear - Remove all entries from the ValueTable 625void ValueTable::clear() { 626 valueNumbering.clear(); 627 expressionNumbering.clear(); 628 nextValueNumber = 1; 629} 630 631/// erase - Remove a value from the value numbering 632void ValueTable::erase(Value *V) { 633 valueNumbering.erase(V); 634} 635 636/// verifyRemoved - Verify that the value is removed from all internal data 637/// structures. 638void ValueTable::verifyRemoved(const Value *V) const { 639 for (DenseMap<Value*, uint32_t>::const_iterator 640 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 641 assert(I->first != V && "Inst still occurs in value numbering map!"); 642 } 643} 644 645//===----------------------------------------------------------------------===// 646// GVN Pass 647//===----------------------------------------------------------------------===// 648 649namespace { 650 struct ValueNumberScope { 651 ValueNumberScope* parent; 652 DenseMap<uint32_t, Value*> table; 653 654 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 655 }; 656} 657 658namespace { 659 660 class GVN : public FunctionPass { 661 bool runOnFunction(Function &F); 662 public: 663 static char ID; // Pass identification, replacement for typeid 664 explicit GVN(bool noloads = false) 665 : FunctionPass(ID), NoLoads(noloads), MD(0) { } 666 667 private: 668 bool NoLoads; 669 MemoryDependenceAnalysis *MD; 670 DominatorTree *DT; 671 672 ValueTable VN; 673 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 674 675 // List of critical edges to be split between iterations. 676 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 677 678 // This transformation requires dominator postdominator info 679 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 680 AU.addRequired<DominatorTree>(); 681 if (!NoLoads) 682 AU.addRequired<MemoryDependenceAnalysis>(); 683 AU.addRequired<AliasAnalysis>(); 684 685 AU.addPreserved<DominatorTree>(); 686 AU.addPreserved<AliasAnalysis>(); 687 } 688 689 // Helper fuctions 690 // FIXME: eliminate or document these better 691 bool processLoad(LoadInst* L, 692 SmallVectorImpl<Instruction*> &toErase); 693 bool processInstruction(Instruction *I, 694 SmallVectorImpl<Instruction*> &toErase); 695 bool processNonLocalLoad(LoadInst* L, 696 SmallVectorImpl<Instruction*> &toErase); 697 bool processBlock(BasicBlock *BB); 698 void dump(DenseMap<uint32_t, Value*>& d); 699 bool iterateOnFunction(Function &F); 700 Value *CollapsePhi(PHINode* p); 701 bool performPRE(Function& F); 702 Value *lookupNumber(BasicBlock *BB, uint32_t num); 703 void cleanupGlobalSets(); 704 void verifyRemoved(const Instruction *I) const; 705 bool splitCriticalEdges(); 706 }; 707 708 char GVN::ID = 0; 709} 710 711// createGVNPass - The public interface to this file... 712FunctionPass *llvm::createGVNPass(bool NoLoads) { 713 return new GVN(NoLoads); 714} 715 716INITIALIZE_PASS(GVN, "gvn", "Global Value Numbering", false, false) 717 718void GVN::dump(DenseMap<uint32_t, Value*>& d) { 719 errs() << "{\n"; 720 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 721 E = d.end(); I != E; ++I) { 722 errs() << I->first << "\n"; 723 I->second->dump(); 724 } 725 errs() << "}\n"; 726} 727 728static bool isSafeReplacement(PHINode* p, Instruction *inst) { 729 if (!isa<PHINode>(inst)) 730 return true; 731 732 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 733 UI != E; ++UI) 734 if (PHINode* use_phi = dyn_cast<PHINode>(*UI)) 735 if (use_phi->getParent() == inst->getParent()) 736 return false; 737 738 return true; 739} 740 741Value *GVN::CollapsePhi(PHINode *PN) { 742 Value *ConstVal = PN->hasConstantValue(DT); 743 if (!ConstVal) return 0; 744 745 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 746 if (!Inst) 747 return ConstVal; 748 749 if (DT->dominates(Inst, PN)) 750 if (isSafeReplacement(PN, Inst)) 751 return Inst; 752 return 0; 753} 754 755/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 756/// we're analyzing is fully available in the specified block. As we go, keep 757/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 758/// map is actually a tri-state map with the following values: 759/// 0) we know the block *is not* fully available. 760/// 1) we know the block *is* fully available. 761/// 2) we do not know whether the block is fully available or not, but we are 762/// currently speculating that it will be. 763/// 3) we are speculating for this block and have used that to speculate for 764/// other blocks. 765static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 766 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 767 // Optimistically assume that the block is fully available and check to see 768 // if we already know about this block in one lookup. 769 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 770 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 771 772 // If the entry already existed for this block, return the precomputed value. 773 if (!IV.second) { 774 // If this is a speculative "available" value, mark it as being used for 775 // speculation of other blocks. 776 if (IV.first->second == 2) 777 IV.first->second = 3; 778 return IV.first->second != 0; 779 } 780 781 // Otherwise, see if it is fully available in all predecessors. 782 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 783 784 // If this block has no predecessors, it isn't live-in here. 785 if (PI == PE) 786 goto SpeculationFailure; 787 788 for (; PI != PE; ++PI) 789 // If the value isn't fully available in one of our predecessors, then it 790 // isn't fully available in this block either. Undo our previous 791 // optimistic assumption and bail out. 792 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 793 goto SpeculationFailure; 794 795 return true; 796 797// SpeculationFailure - If we get here, we found out that this is not, after 798// all, a fully-available block. We have a problem if we speculated on this and 799// used the speculation to mark other blocks as available. 800SpeculationFailure: 801 char &BBVal = FullyAvailableBlocks[BB]; 802 803 // If we didn't speculate on this, just return with it set to false. 804 if (BBVal == 2) { 805 BBVal = 0; 806 return false; 807 } 808 809 // If we did speculate on this value, we could have blocks set to 1 that are 810 // incorrect. Walk the (transitive) successors of this block and mark them as 811 // 0 if set to one. 812 SmallVector<BasicBlock*, 32> BBWorklist; 813 BBWorklist.push_back(BB); 814 815 do { 816 BasicBlock *Entry = BBWorklist.pop_back_val(); 817 // Note that this sets blocks to 0 (unavailable) if they happen to not 818 // already be in FullyAvailableBlocks. This is safe. 819 char &EntryVal = FullyAvailableBlocks[Entry]; 820 if (EntryVal == 0) continue; // Already unavailable. 821 822 // Mark as unavailable. 823 EntryVal = 0; 824 825 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 826 BBWorklist.push_back(*I); 827 } while (!BBWorklist.empty()); 828 829 return false; 830} 831 832 833/// CanCoerceMustAliasedValueToLoad - Return true if 834/// CoerceAvailableValueToLoadType will succeed. 835static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 836 const Type *LoadTy, 837 const TargetData &TD) { 838 // If the loaded or stored value is an first class array or struct, don't try 839 // to transform them. We need to be able to bitcast to integer. 840 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 841 StoredVal->getType()->isStructTy() || 842 StoredVal->getType()->isArrayTy()) 843 return false; 844 845 // The store has to be at least as big as the load. 846 if (TD.getTypeSizeInBits(StoredVal->getType()) < 847 TD.getTypeSizeInBits(LoadTy)) 848 return false; 849 850 return true; 851} 852 853 854/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 855/// then a load from a must-aliased pointer of a different type, try to coerce 856/// the stored value. LoadedTy is the type of the load we want to replace and 857/// InsertPt is the place to insert new instructions. 858/// 859/// If we can't do it, return null. 860static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 861 const Type *LoadedTy, 862 Instruction *InsertPt, 863 const TargetData &TD) { 864 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 865 return 0; 866 867 const Type *StoredValTy = StoredVal->getType(); 868 869 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy); 870 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 871 872 // If the store and reload are the same size, we can always reuse it. 873 if (StoreSize == LoadSize) { 874 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) { 875 // Pointer to Pointer -> use bitcast. 876 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 877 } 878 879 // Convert source pointers to integers, which can be bitcast. 880 if (StoredValTy->isPointerTy()) { 881 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 882 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 883 } 884 885 const Type *TypeToCastTo = LoadedTy; 886 if (TypeToCastTo->isPointerTy()) 887 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 888 889 if (StoredValTy != TypeToCastTo) 890 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 891 892 // Cast to pointer if the load needs a pointer type. 893 if (LoadedTy->isPointerTy()) 894 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 895 896 return StoredVal; 897 } 898 899 // If the loaded value is smaller than the available value, then we can 900 // extract out a piece from it. If the available value is too small, then we 901 // can't do anything. 902 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 903 904 // Convert source pointers to integers, which can be manipulated. 905 if (StoredValTy->isPointerTy()) { 906 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 907 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 908 } 909 910 // Convert vectors and fp to integer, which can be manipulated. 911 if (!StoredValTy->isIntegerTy()) { 912 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 913 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 914 } 915 916 // If this is a big-endian system, we need to shift the value down to the low 917 // bits so that a truncate will work. 918 if (TD.isBigEndian()) { 919 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 920 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 921 } 922 923 // Truncate the integer to the right size now. 924 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 925 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 926 927 if (LoadedTy == NewIntTy) 928 return StoredVal; 929 930 // If the result is a pointer, inttoptr. 931 if (LoadedTy->isPointerTy()) 932 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 933 934 // Otherwise, bitcast. 935 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 936} 937 938/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 939/// be expressed as a base pointer plus a constant offset. Return the base and 940/// offset to the caller. 941static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 942 const TargetData &TD) { 943 Operator *PtrOp = dyn_cast<Operator>(Ptr); 944 if (PtrOp == 0) return Ptr; 945 946 // Just look through bitcasts. 947 if (PtrOp->getOpcode() == Instruction::BitCast) 948 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 949 950 // If this is a GEP with constant indices, we can look through it. 951 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 952 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 953 954 gep_type_iterator GTI = gep_type_begin(GEP); 955 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 956 ++I, ++GTI) { 957 ConstantInt *OpC = cast<ConstantInt>(*I); 958 if (OpC->isZero()) continue; 959 960 // Handle a struct and array indices which add their offset to the pointer. 961 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 962 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 963 } else { 964 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 965 Offset += OpC->getSExtValue()*Size; 966 } 967 } 968 969 // Re-sign extend from the pointer size if needed to get overflow edge cases 970 // right. 971 unsigned PtrSize = TD.getPointerSizeInBits(); 972 if (PtrSize < 64) 973 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 974 975 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 976} 977 978 979/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 980/// memdep query of a load that ends up being a clobbering memory write (store, 981/// memset, memcpy, memmove). This means that the write *may* provide bits used 982/// by the load but we can't be sure because the pointers don't mustalias. 983/// 984/// Check this case to see if there is anything more we can do before we give 985/// up. This returns -1 if we have to give up, or a byte number in the stored 986/// value of the piece that feeds the load. 987static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 988 Value *WritePtr, 989 uint64_t WriteSizeInBits, 990 const TargetData &TD) { 991 // If the loaded or stored value is an first class array or struct, don't try 992 // to transform them. We need to be able to bitcast to integer. 993 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 994 return -1; 995 996 int64_t StoreOffset = 0, LoadOffset = 0; 997 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 998 Value *LoadBase = 999 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1000 if (StoreBase != LoadBase) 1001 return -1; 1002 1003 // If the load and store are to the exact same address, they should have been 1004 // a must alias. AA must have gotten confused. 1005 // FIXME: Study to see if/when this happens. One case is forwarding a memset 1006 // to a load from the base of the memset. 1007#if 0 1008 if (LoadOffset == StoreOffset) { 1009 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1010 << "Base = " << *StoreBase << "\n" 1011 << "Store Ptr = " << *WritePtr << "\n" 1012 << "Store Offs = " << StoreOffset << "\n" 1013 << "Load Ptr = " << *LoadPtr << "\n"; 1014 abort(); 1015 } 1016#endif 1017 1018 // If the load and store don't overlap at all, the store doesn't provide 1019 // anything to the load. In this case, they really don't alias at all, AA 1020 // must have gotten confused. 1021 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1022 // remove this check, as it is duplicated with what we have below. 1023 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1024 1025 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1026 return -1; 1027 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1028 LoadSize >>= 3; 1029 1030 1031 bool isAAFailure = false; 1032 if (StoreOffset < LoadOffset) 1033 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1034 else 1035 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1036 1037 if (isAAFailure) { 1038#if 0 1039 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1040 << "Base = " << *StoreBase << "\n" 1041 << "Store Ptr = " << *WritePtr << "\n" 1042 << "Store Offs = " << StoreOffset << "\n" 1043 << "Load Ptr = " << *LoadPtr << "\n"; 1044 abort(); 1045#endif 1046 return -1; 1047 } 1048 1049 // If the Load isn't completely contained within the stored bits, we don't 1050 // have all the bits to feed it. We could do something crazy in the future 1051 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1052 // valuable. 1053 if (StoreOffset > LoadOffset || 1054 StoreOffset+StoreSize < LoadOffset+LoadSize) 1055 return -1; 1056 1057 // Okay, we can do this transformation. Return the number of bytes into the 1058 // store that the load is. 1059 return LoadOffset-StoreOffset; 1060} 1061 1062/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1063/// memdep query of a load that ends up being a clobbering store. 1064static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1065 StoreInst *DepSI, 1066 const TargetData &TD) { 1067 // Cannot handle reading from store of first-class aggregate yet. 1068 if (DepSI->getOperand(0)->getType()->isStructTy() || 1069 DepSI->getOperand(0)->getType()->isArrayTy()) 1070 return -1; 1071 1072 Value *StorePtr = DepSI->getPointerOperand(); 1073 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1074 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1075 StorePtr, StoreSize, TD); 1076} 1077 1078static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1079 MemIntrinsic *MI, 1080 const TargetData &TD) { 1081 // If the mem operation is a non-constant size, we can't handle it. 1082 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1083 if (SizeCst == 0) return -1; 1084 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1085 1086 // If this is memset, we just need to see if the offset is valid in the size 1087 // of the memset.. 1088 if (MI->getIntrinsicID() == Intrinsic::memset) 1089 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1090 MemSizeInBits, TD); 1091 1092 // If we have a memcpy/memmove, the only case we can handle is if this is a 1093 // copy from constant memory. In that case, we can read directly from the 1094 // constant memory. 1095 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1096 1097 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1098 if (Src == 0) return -1; 1099 1100 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1101 if (GV == 0 || !GV->isConstant()) return -1; 1102 1103 // See if the access is within the bounds of the transfer. 1104 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1105 MI->getDest(), MemSizeInBits, TD); 1106 if (Offset == -1) 1107 return Offset; 1108 1109 // Otherwise, see if we can constant fold a load from the constant with the 1110 // offset applied as appropriate. 1111 Src = ConstantExpr::getBitCast(Src, 1112 llvm::Type::getInt8PtrTy(Src->getContext())); 1113 Constant *OffsetCst = 1114 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1115 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1116 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1117 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1118 return Offset; 1119 return -1; 1120} 1121 1122 1123/// GetStoreValueForLoad - This function is called when we have a 1124/// memdep query of a load that ends up being a clobbering store. This means 1125/// that the store *may* provide bits used by the load but we can't be sure 1126/// because the pointers don't mustalias. Check this case to see if there is 1127/// anything more we can do before we give up. 1128static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1129 const Type *LoadTy, 1130 Instruction *InsertPt, const TargetData &TD){ 1131 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1132 1133 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1134 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 1135 1136 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1137 1138 // Compute which bits of the stored value are being used by the load. Convert 1139 // to an integer type to start with. 1140 if (SrcVal->getType()->isPointerTy()) 1141 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1142 if (!SrcVal->getType()->isIntegerTy()) 1143 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1144 "tmp"); 1145 1146 // Shift the bits to the least significant depending on endianness. 1147 unsigned ShiftAmt; 1148 if (TD.isLittleEndian()) 1149 ShiftAmt = Offset*8; 1150 else 1151 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1152 1153 if (ShiftAmt) 1154 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1155 1156 if (LoadSize != StoreSize) 1157 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1158 "tmp"); 1159 1160 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1161} 1162 1163/// GetMemInstValueForLoad - This function is called when we have a 1164/// memdep query of a load that ends up being a clobbering mem intrinsic. 1165static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1166 const Type *LoadTy, Instruction *InsertPt, 1167 const TargetData &TD){ 1168 LLVMContext &Ctx = LoadTy->getContext(); 1169 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1170 1171 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1172 1173 // We know that this method is only called when the mem transfer fully 1174 // provides the bits for the load. 1175 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1176 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1177 // independently of what the offset is. 1178 Value *Val = MSI->getValue(); 1179 if (LoadSize != 1) 1180 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1181 1182 Value *OneElt = Val; 1183 1184 // Splat the value out to the right number of bits. 1185 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1186 // If we can double the number of bytes set, do it. 1187 if (NumBytesSet*2 <= LoadSize) { 1188 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1189 Val = Builder.CreateOr(Val, ShVal); 1190 NumBytesSet <<= 1; 1191 continue; 1192 } 1193 1194 // Otherwise insert one byte at a time. 1195 Value *ShVal = Builder.CreateShl(Val, 1*8); 1196 Val = Builder.CreateOr(OneElt, ShVal); 1197 ++NumBytesSet; 1198 } 1199 1200 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1201 } 1202 1203 // Otherwise, this is a memcpy/memmove from a constant global. 1204 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1205 Constant *Src = cast<Constant>(MTI->getSource()); 1206 1207 // Otherwise, see if we can constant fold a load from the constant with the 1208 // offset applied as appropriate. 1209 Src = ConstantExpr::getBitCast(Src, 1210 llvm::Type::getInt8PtrTy(Src->getContext())); 1211 Constant *OffsetCst = 1212 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1213 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1214 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1215 return ConstantFoldLoadFromConstPtr(Src, &TD); 1216} 1217 1218namespace { 1219 1220struct AvailableValueInBlock { 1221 /// BB - The basic block in question. 1222 BasicBlock *BB; 1223 enum ValType { 1224 SimpleVal, // A simple offsetted value that is accessed. 1225 MemIntrin // A memory intrinsic which is loaded from. 1226 }; 1227 1228 /// V - The value that is live out of the block. 1229 PointerIntPair<Value *, 1, ValType> Val; 1230 1231 /// Offset - The byte offset in Val that is interesting for the load query. 1232 unsigned Offset; 1233 1234 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1235 unsigned Offset = 0) { 1236 AvailableValueInBlock Res; 1237 Res.BB = BB; 1238 Res.Val.setPointer(V); 1239 Res.Val.setInt(SimpleVal); 1240 Res.Offset = Offset; 1241 return Res; 1242 } 1243 1244 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1245 unsigned Offset = 0) { 1246 AvailableValueInBlock Res; 1247 Res.BB = BB; 1248 Res.Val.setPointer(MI); 1249 Res.Val.setInt(MemIntrin); 1250 Res.Offset = Offset; 1251 return Res; 1252 } 1253 1254 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1255 Value *getSimpleValue() const { 1256 assert(isSimpleValue() && "Wrong accessor"); 1257 return Val.getPointer(); 1258 } 1259 1260 MemIntrinsic *getMemIntrinValue() const { 1261 assert(!isSimpleValue() && "Wrong accessor"); 1262 return cast<MemIntrinsic>(Val.getPointer()); 1263 } 1264 1265 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1266 /// defined here to the specified type. This handles various coercion cases. 1267 Value *MaterializeAdjustedValue(const Type *LoadTy, 1268 const TargetData *TD) const { 1269 Value *Res; 1270 if (isSimpleValue()) { 1271 Res = getSimpleValue(); 1272 if (Res->getType() != LoadTy) { 1273 assert(TD && "Need target data to handle type mismatch case"); 1274 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1275 *TD); 1276 1277 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1278 << *getSimpleValue() << '\n' 1279 << *Res << '\n' << "\n\n\n"); 1280 } 1281 } else { 1282 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1283 LoadTy, BB->getTerminator(), *TD); 1284 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1285 << " " << *getMemIntrinValue() << '\n' 1286 << *Res << '\n' << "\n\n\n"); 1287 } 1288 return Res; 1289 } 1290}; 1291 1292} 1293 1294/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1295/// construct SSA form, allowing us to eliminate LI. This returns the value 1296/// that should be used at LI's definition site. 1297static Value *ConstructSSAForLoadSet(LoadInst *LI, 1298 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1299 const TargetData *TD, 1300 const DominatorTree &DT, 1301 AliasAnalysis *AA) { 1302 // Check for the fully redundant, dominating load case. In this case, we can 1303 // just use the dominating value directly. 1304 if (ValuesPerBlock.size() == 1 && 1305 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) 1306 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); 1307 1308 // Otherwise, we have to construct SSA form. 1309 SmallVector<PHINode*, 8> NewPHIs; 1310 SSAUpdater SSAUpdate(&NewPHIs); 1311 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1312 1313 const Type *LoadTy = LI->getType(); 1314 1315 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1316 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1317 BasicBlock *BB = AV.BB; 1318 1319 if (SSAUpdate.HasValueForBlock(BB)) 1320 continue; 1321 1322 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); 1323 } 1324 1325 // Perform PHI construction. 1326 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1327 1328 // If new PHI nodes were created, notify alias analysis. 1329 if (V->getType()->isPointerTy()) 1330 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1331 AA->copyValue(LI, NewPHIs[i]); 1332 1333 return V; 1334} 1335 1336static bool isLifetimeStart(const Instruction *Inst) { 1337 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1338 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1339 return false; 1340} 1341 1342/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1343/// non-local by performing PHI construction. 1344bool GVN::processNonLocalLoad(LoadInst *LI, 1345 SmallVectorImpl<Instruction*> &toErase) { 1346 // Find the non-local dependencies of the load. 1347 SmallVector<NonLocalDepResult, 64> Deps; 1348 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1349 Deps); 1350 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1351 // << Deps.size() << *LI << '\n'); 1352 1353 // If we had to process more than one hundred blocks to find the 1354 // dependencies, this load isn't worth worrying about. Optimizing 1355 // it will be too expensive. 1356 if (Deps.size() > 100) 1357 return false; 1358 1359 // If we had a phi translation failure, we'll have a single entry which is a 1360 // clobber in the current block. Reject this early. 1361 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1362 DEBUG( 1363 dbgs() << "GVN: non-local load "; 1364 WriteAsOperand(dbgs(), LI); 1365 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1366 ); 1367 return false; 1368 } 1369 1370 // Filter out useless results (non-locals, etc). Keep track of the blocks 1371 // where we have a value available in repl, also keep track of whether we see 1372 // dependencies that produce an unknown value for the load (such as a call 1373 // that could potentially clobber the load). 1374 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1375 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1376 1377 const TargetData *TD = 0; 1378 1379 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1380 BasicBlock *DepBB = Deps[i].getBB(); 1381 MemDepResult DepInfo = Deps[i].getResult(); 1382 1383 if (DepInfo.isClobber()) { 1384 // The address being loaded in this non-local block may not be the same as 1385 // the pointer operand of the load if PHI translation occurs. Make sure 1386 // to consider the right address. 1387 Value *Address = Deps[i].getAddress(); 1388 1389 // If the dependence is to a store that writes to a superset of the bits 1390 // read by the load, we can extract the bits we need for the load from the 1391 // stored value. 1392 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1393 if (TD == 0) 1394 TD = getAnalysisIfAvailable<TargetData>(); 1395 if (TD && Address) { 1396 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1397 DepSI, *TD); 1398 if (Offset != -1) { 1399 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1400 DepSI->getOperand(0), 1401 Offset)); 1402 continue; 1403 } 1404 } 1405 } 1406 1407 // If the clobbering value is a memset/memcpy/memmove, see if we can 1408 // forward a value on from it. 1409 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1410 if (TD == 0) 1411 TD = getAnalysisIfAvailable<TargetData>(); 1412 if (TD && Address) { 1413 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1414 DepMI, *TD); 1415 if (Offset != -1) { 1416 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1417 Offset)); 1418 continue; 1419 } 1420 } 1421 } 1422 1423 UnavailableBlocks.push_back(DepBB); 1424 continue; 1425 } 1426 1427 Instruction *DepInst = DepInfo.getInst(); 1428 1429 // Loading the allocation -> undef. 1430 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1431 // Loading immediately after lifetime begin -> undef. 1432 isLifetimeStart(DepInst)) { 1433 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1434 UndefValue::get(LI->getType()))); 1435 continue; 1436 } 1437 1438 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1439 // Reject loads and stores that are to the same address but are of 1440 // different types if we have to. 1441 if (S->getOperand(0)->getType() != LI->getType()) { 1442 if (TD == 0) 1443 TD = getAnalysisIfAvailable<TargetData>(); 1444 1445 // If the stored value is larger or equal to the loaded value, we can 1446 // reuse it. 1447 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1448 LI->getType(), *TD)) { 1449 UnavailableBlocks.push_back(DepBB); 1450 continue; 1451 } 1452 } 1453 1454 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1455 S->getOperand(0))); 1456 continue; 1457 } 1458 1459 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1460 // If the types mismatch and we can't handle it, reject reuse of the load. 1461 if (LD->getType() != LI->getType()) { 1462 if (TD == 0) 1463 TD = getAnalysisIfAvailable<TargetData>(); 1464 1465 // If the stored value is larger or equal to the loaded value, we can 1466 // reuse it. 1467 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1468 UnavailableBlocks.push_back(DepBB); 1469 continue; 1470 } 1471 } 1472 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1473 continue; 1474 } 1475 1476 UnavailableBlocks.push_back(DepBB); 1477 continue; 1478 } 1479 1480 // If we have no predecessors that produce a known value for this load, exit 1481 // early. 1482 if (ValuesPerBlock.empty()) return false; 1483 1484 // If all of the instructions we depend on produce a known value for this 1485 // load, then it is fully redundant and we can use PHI insertion to compute 1486 // its value. Insert PHIs and remove the fully redundant value now. 1487 if (UnavailableBlocks.empty()) { 1488 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1489 1490 // Perform PHI construction. 1491 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1492 VN.getAliasAnalysis()); 1493 LI->replaceAllUsesWith(V); 1494 1495 if (isa<PHINode>(V)) 1496 V->takeName(LI); 1497 if (V->getType()->isPointerTy()) 1498 MD->invalidateCachedPointerInfo(V); 1499 VN.erase(LI); 1500 toErase.push_back(LI); 1501 ++NumGVNLoad; 1502 return true; 1503 } 1504 1505 if (!EnablePRE || !EnableLoadPRE) 1506 return false; 1507 1508 // Okay, we have *some* definitions of the value. This means that the value 1509 // is available in some of our (transitive) predecessors. Lets think about 1510 // doing PRE of this load. This will involve inserting a new load into the 1511 // predecessor when it's not available. We could do this in general, but 1512 // prefer to not increase code size. As such, we only do this when we know 1513 // that we only have to insert *one* load (which means we're basically moving 1514 // the load, not inserting a new one). 1515 1516 SmallPtrSet<BasicBlock *, 4> Blockers; 1517 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1518 Blockers.insert(UnavailableBlocks[i]); 1519 1520 // Lets find first basic block with more than one predecessor. Walk backwards 1521 // through predecessors if needed. 1522 BasicBlock *LoadBB = LI->getParent(); 1523 BasicBlock *TmpBB = LoadBB; 1524 1525 bool isSinglePred = false; 1526 bool allSingleSucc = true; 1527 while (TmpBB->getSinglePredecessor()) { 1528 isSinglePred = true; 1529 TmpBB = TmpBB->getSinglePredecessor(); 1530 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1531 return false; 1532 if (Blockers.count(TmpBB)) 1533 return false; 1534 1535 // If any of these blocks has more than one successor (i.e. if the edge we 1536 // just traversed was critical), then there are other paths through this 1537 // block along which the load may not be anticipated. Hoisting the load 1538 // above this block would be adding the load to execution paths along 1539 // which it was not previously executed. 1540 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1541 return false; 1542 } 1543 1544 assert(TmpBB); 1545 LoadBB = TmpBB; 1546 1547 // FIXME: It is extremely unclear what this loop is doing, other than 1548 // artificially restricting loadpre. 1549 if (isSinglePred) { 1550 bool isHot = false; 1551 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1552 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1553 if (AV.isSimpleValue()) 1554 // "Hot" Instruction is in some loop (because it dominates its dep. 1555 // instruction). 1556 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1557 if (DT->dominates(LI, I)) { 1558 isHot = true; 1559 break; 1560 } 1561 } 1562 1563 // We are interested only in "hot" instructions. We don't want to do any 1564 // mis-optimizations here. 1565 if (!isHot) 1566 return false; 1567 } 1568 1569 // Check to see how many predecessors have the loaded value fully 1570 // available. 1571 DenseMap<BasicBlock*, Value*> PredLoads; 1572 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1573 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1574 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1575 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1576 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1577 1578 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1579 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1580 PI != E; ++PI) { 1581 BasicBlock *Pred = *PI; 1582 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1583 continue; 1584 } 1585 PredLoads[Pred] = 0; 1586 1587 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1588 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1589 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1590 << Pred->getName() << "': " << *LI << '\n'); 1591 return false; 1592 } 1593 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1594 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1595 } 1596 } 1597 if (!NeedToSplit.empty()) { 1598 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1599 return false; 1600 } 1601 1602 // Decide whether PRE is profitable for this load. 1603 unsigned NumUnavailablePreds = PredLoads.size(); 1604 assert(NumUnavailablePreds != 0 && 1605 "Fully available value should be eliminated above!"); 1606 1607 // If this load is unavailable in multiple predecessors, reject it. 1608 // FIXME: If we could restructure the CFG, we could make a common pred with 1609 // all the preds that don't have an available LI and insert a new load into 1610 // that one block. 1611 if (NumUnavailablePreds != 1) 1612 return false; 1613 1614 // Check if the load can safely be moved to all the unavailable predecessors. 1615 bool CanDoPRE = true; 1616 SmallVector<Instruction*, 8> NewInsts; 1617 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1618 E = PredLoads.end(); I != E; ++I) { 1619 BasicBlock *UnavailablePred = I->first; 1620 1621 // Do PHI translation to get its value in the predecessor if necessary. The 1622 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1623 1624 // If all preds have a single successor, then we know it is safe to insert 1625 // the load on the pred (?!?), so we can insert code to materialize the 1626 // pointer if it is not available. 1627 PHITransAddr Address(LI->getOperand(0), TD); 1628 Value *LoadPtr = 0; 1629 if (allSingleSucc) { 1630 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1631 *DT, NewInsts); 1632 } else { 1633 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1634 LoadPtr = Address.getAddr(); 1635 } 1636 1637 // If we couldn't find or insert a computation of this phi translated value, 1638 // we fail PRE. 1639 if (LoadPtr == 0) { 1640 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1641 << *LI->getOperand(0) << "\n"); 1642 CanDoPRE = false; 1643 break; 1644 } 1645 1646 // Make sure it is valid to move this load here. We have to watch out for: 1647 // @1 = getelementptr (i8* p, ... 1648 // test p and branch if == 0 1649 // load @1 1650 // It is valid to have the getelementptr before the test, even if p can be 0, 1651 // as getelementptr only does address arithmetic. 1652 // If we are not pushing the value through any multiple-successor blocks 1653 // we do not have this case. Otherwise, check that the load is safe to 1654 // put anywhere; this can be improved, but should be conservatively safe. 1655 if (!allSingleSucc && 1656 // FIXME: REEVALUTE THIS. 1657 !isSafeToLoadUnconditionally(LoadPtr, 1658 UnavailablePred->getTerminator(), 1659 LI->getAlignment(), TD)) { 1660 CanDoPRE = false; 1661 break; 1662 } 1663 1664 I->second = LoadPtr; 1665 } 1666 1667 if (!CanDoPRE) { 1668 while (!NewInsts.empty()) 1669 NewInsts.pop_back_val()->eraseFromParent(); 1670 return false; 1671 } 1672 1673 // Okay, we can eliminate this load by inserting a reload in the predecessor 1674 // and using PHI construction to get the value in the other predecessors, do 1675 // it. 1676 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1677 DEBUG(if (!NewInsts.empty()) 1678 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1679 << *NewInsts.back() << '\n'); 1680 1681 // Assign value numbers to the new instructions. 1682 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1683 // FIXME: We really _ought_ to insert these value numbers into their 1684 // parent's availability map. However, in doing so, we risk getting into 1685 // ordering issues. If a block hasn't been processed yet, we would be 1686 // marking a value as AVAIL-IN, which isn't what we intend. 1687 VN.lookup_or_add(NewInsts[i]); 1688 } 1689 1690 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1691 E = PredLoads.end(); I != E; ++I) { 1692 BasicBlock *UnavailablePred = I->first; 1693 Value *LoadPtr = I->second; 1694 1695 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1696 LI->getAlignment(), 1697 UnavailablePred->getTerminator()); 1698 1699 // Add the newly created load. 1700 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1701 NewLoad)); 1702 MD->invalidateCachedPointerInfo(LoadPtr); 1703 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1704 } 1705 1706 // Perform PHI construction. 1707 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1708 VN.getAliasAnalysis()); 1709 LI->replaceAllUsesWith(V); 1710 if (isa<PHINode>(V)) 1711 V->takeName(LI); 1712 if (V->getType()->isPointerTy()) 1713 MD->invalidateCachedPointerInfo(V); 1714 VN.erase(LI); 1715 toErase.push_back(LI); 1716 ++NumPRELoad; 1717 return true; 1718} 1719 1720/// processLoad - Attempt to eliminate a load, first by eliminating it 1721/// locally, and then attempting non-local elimination if that fails. 1722bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1723 if (!MD) 1724 return false; 1725 1726 if (L->isVolatile()) 1727 return false; 1728 1729 // ... to a pointer that has been loaded from before... 1730 MemDepResult Dep = MD->getDependency(L); 1731 1732 // If the value isn't available, don't do anything! 1733 if (Dep.isClobber()) { 1734 // Check to see if we have something like this: 1735 // store i32 123, i32* %P 1736 // %A = bitcast i32* %P to i8* 1737 // %B = gep i8* %A, i32 1 1738 // %C = load i8* %B 1739 // 1740 // We could do that by recognizing if the clobber instructions are obviously 1741 // a common base + constant offset, and if the previous store (or memset) 1742 // completely covers this load. This sort of thing can happen in bitfield 1743 // access code. 1744 Value *AvailVal = 0; 1745 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1746 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1747 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1748 L->getPointerOperand(), 1749 DepSI, *TD); 1750 if (Offset != -1) 1751 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1752 L->getType(), L, *TD); 1753 } 1754 1755 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1756 // a value on from it. 1757 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1758 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1759 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1760 L->getPointerOperand(), 1761 DepMI, *TD); 1762 if (Offset != -1) 1763 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1764 } 1765 } 1766 1767 if (AvailVal) { 1768 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1769 << *AvailVal << '\n' << *L << "\n\n\n"); 1770 1771 // Replace the load! 1772 L->replaceAllUsesWith(AvailVal); 1773 if (AvailVal->getType()->isPointerTy()) 1774 MD->invalidateCachedPointerInfo(AvailVal); 1775 VN.erase(L); 1776 toErase.push_back(L); 1777 ++NumGVNLoad; 1778 return true; 1779 } 1780 1781 DEBUG( 1782 // fast print dep, using operator<< on instruction would be too slow 1783 dbgs() << "GVN: load "; 1784 WriteAsOperand(dbgs(), L); 1785 Instruction *I = Dep.getInst(); 1786 dbgs() << " is clobbered by " << *I << '\n'; 1787 ); 1788 return false; 1789 } 1790 1791 // If it is defined in another block, try harder. 1792 if (Dep.isNonLocal()) 1793 return processNonLocalLoad(L, toErase); 1794 1795 Instruction *DepInst = Dep.getInst(); 1796 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1797 Value *StoredVal = DepSI->getOperand(0); 1798 1799 // The store and load are to a must-aliased pointer, but they may not 1800 // actually have the same type. See if we know how to reuse the stored 1801 // value (depending on its type). 1802 const TargetData *TD = 0; 1803 if (StoredVal->getType() != L->getType()) { 1804 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1805 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1806 L, *TD); 1807 if (StoredVal == 0) 1808 return false; 1809 1810 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1811 << '\n' << *L << "\n\n\n"); 1812 } 1813 else 1814 return false; 1815 } 1816 1817 // Remove it! 1818 L->replaceAllUsesWith(StoredVal); 1819 if (StoredVal->getType()->isPointerTy()) 1820 MD->invalidateCachedPointerInfo(StoredVal); 1821 VN.erase(L); 1822 toErase.push_back(L); 1823 ++NumGVNLoad; 1824 return true; 1825 } 1826 1827 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1828 Value *AvailableVal = DepLI; 1829 1830 // The loads are of a must-aliased pointer, but they may not actually have 1831 // the same type. See if we know how to reuse the previously loaded value 1832 // (depending on its type). 1833 const TargetData *TD = 0; 1834 if (DepLI->getType() != L->getType()) { 1835 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1836 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1837 if (AvailableVal == 0) 1838 return false; 1839 1840 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1841 << "\n" << *L << "\n\n\n"); 1842 } 1843 else 1844 return false; 1845 } 1846 1847 // Remove it! 1848 L->replaceAllUsesWith(AvailableVal); 1849 if (DepLI->getType()->isPointerTy()) 1850 MD->invalidateCachedPointerInfo(DepLI); 1851 VN.erase(L); 1852 toErase.push_back(L); 1853 ++NumGVNLoad; 1854 return true; 1855 } 1856 1857 // If this load really doesn't depend on anything, then we must be loading an 1858 // undef value. This can happen when loading for a fresh allocation with no 1859 // intervening stores, for example. 1860 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1861 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1862 VN.erase(L); 1863 toErase.push_back(L); 1864 ++NumGVNLoad; 1865 return true; 1866 } 1867 1868 // If this load occurs either right after a lifetime begin, 1869 // then the loaded value is undefined. 1870 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1871 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1872 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1873 VN.erase(L); 1874 toErase.push_back(L); 1875 ++NumGVNLoad; 1876 return true; 1877 } 1878 } 1879 1880 return false; 1881} 1882 1883Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1884 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1885 if (I == localAvail.end()) 1886 return 0; 1887 1888 ValueNumberScope *Locals = I->second; 1889 while (Locals) { 1890 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1891 if (I != Locals->table.end()) 1892 return I->second; 1893 Locals = Locals->parent; 1894 } 1895 1896 return 0; 1897} 1898 1899 1900/// processInstruction - When calculating availability, handle an instruction 1901/// by inserting it into the appropriate sets 1902bool GVN::processInstruction(Instruction *I, 1903 SmallVectorImpl<Instruction*> &toErase) { 1904 // Ignore dbg info intrinsics. 1905 if (isa<DbgInfoIntrinsic>(I)) 1906 return false; 1907 1908 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1909 bool Changed = processLoad(LI, toErase); 1910 1911 if (!Changed) { 1912 unsigned Num = VN.lookup_or_add(LI); 1913 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1914 } 1915 1916 return Changed; 1917 } 1918 1919 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1920 unsigned Num = VN.lookup_or_add(I); 1921 1922 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1923 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1924 1925 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1926 return false; 1927 1928 Value *BranchCond = BI->getCondition(); 1929 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1930 1931 BasicBlock *TrueSucc = BI->getSuccessor(0); 1932 BasicBlock *FalseSucc = BI->getSuccessor(1); 1933 1934 if (TrueSucc->getSinglePredecessor()) 1935 localAvail[TrueSucc]->table[CondVN] = 1936 ConstantInt::getTrue(TrueSucc->getContext()); 1937 if (FalseSucc->getSinglePredecessor()) 1938 localAvail[FalseSucc]->table[CondVN] = 1939 ConstantInt::getFalse(TrueSucc->getContext()); 1940 1941 return false; 1942 1943 // Allocations are always uniquely numbered, so we can save time and memory 1944 // by fast failing them. 1945 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1946 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1947 return false; 1948 } 1949 1950 // Collapse PHI nodes 1951 if (PHINode* p = dyn_cast<PHINode>(I)) { 1952 Value *constVal = CollapsePhi(p); 1953 1954 if (constVal) { 1955 p->replaceAllUsesWith(constVal); 1956 if (MD && constVal->getType()->isPointerTy()) 1957 MD->invalidateCachedPointerInfo(constVal); 1958 VN.erase(p); 1959 1960 toErase.push_back(p); 1961 } else { 1962 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1963 } 1964 1965 // If the number we were assigned was a brand new VN, then we don't 1966 // need to do a lookup to see if the number already exists 1967 // somewhere in the domtree: it can't! 1968 } else if (Num == NextNum) { 1969 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1970 1971 // Perform fast-path value-number based elimination of values inherited from 1972 // dominators. 1973 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1974 // Remove it! 1975 VN.erase(I); 1976 I->replaceAllUsesWith(repl); 1977 if (MD && repl->getType()->isPointerTy()) 1978 MD->invalidateCachedPointerInfo(repl); 1979 toErase.push_back(I); 1980 return true; 1981 1982 } else { 1983 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1984 } 1985 1986 return false; 1987} 1988 1989/// runOnFunction - This is the main transformation entry point for a function. 1990bool GVN::runOnFunction(Function& F) { 1991 if (!NoLoads) 1992 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1993 DT = &getAnalysis<DominatorTree>(); 1994 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1995 VN.setMemDep(MD); 1996 VN.setDomTree(DT); 1997 1998 bool Changed = false; 1999 bool ShouldContinue = true; 2000 2001 // Merge unconditional branches, allowing PRE to catch more 2002 // optimization opportunities. 2003 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2004 BasicBlock *BB = FI; 2005 ++FI; 2006 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2007 if (removedBlock) ++NumGVNBlocks; 2008 2009 Changed |= removedBlock; 2010 } 2011 2012 unsigned Iteration = 0; 2013 2014 while (ShouldContinue) { 2015 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2016 ShouldContinue = iterateOnFunction(F); 2017 if (splitCriticalEdges()) 2018 ShouldContinue = true; 2019 Changed |= ShouldContinue; 2020 ++Iteration; 2021 } 2022 2023 if (EnablePRE) { 2024 bool PREChanged = true; 2025 while (PREChanged) { 2026 PREChanged = performPRE(F); 2027 Changed |= PREChanged; 2028 } 2029 } 2030 // FIXME: Should perform GVN again after PRE does something. PRE can move 2031 // computations into blocks where they become fully redundant. Note that 2032 // we can't do this until PRE's critical edge splitting updates memdep. 2033 // Actually, when this happens, we should just fully integrate PRE into GVN. 2034 2035 cleanupGlobalSets(); 2036 2037 return Changed; 2038} 2039 2040 2041bool GVN::processBlock(BasicBlock *BB) { 2042 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2043 // incrementing BI before processing an instruction). 2044 SmallVector<Instruction*, 8> toErase; 2045 bool ChangedFunction = false; 2046 2047 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2048 BI != BE;) { 2049 ChangedFunction |= processInstruction(BI, toErase); 2050 if (toErase.empty()) { 2051 ++BI; 2052 continue; 2053 } 2054 2055 // If we need some instructions deleted, do it now. 2056 NumGVNInstr += toErase.size(); 2057 2058 // Avoid iterator invalidation. 2059 bool AtStart = BI == BB->begin(); 2060 if (!AtStart) 2061 --BI; 2062 2063 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2064 E = toErase.end(); I != E; ++I) { 2065 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2066 if (MD) MD->removeInstruction(*I); 2067 (*I)->eraseFromParent(); 2068 DEBUG(verifyRemoved(*I)); 2069 } 2070 toErase.clear(); 2071 2072 if (AtStart) 2073 BI = BB->begin(); 2074 else 2075 ++BI; 2076 } 2077 2078 return ChangedFunction; 2079} 2080 2081/// performPRE - Perform a purely local form of PRE that looks for diamond 2082/// control flow patterns and attempts to perform simple PRE at the join point. 2083bool GVN::performPRE(Function &F) { 2084 bool Changed = false; 2085 DenseMap<BasicBlock*, Value*> predMap; 2086 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2087 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2088 BasicBlock *CurrentBlock = *DI; 2089 2090 // Nothing to PRE in the entry block. 2091 if (CurrentBlock == &F.getEntryBlock()) continue; 2092 2093 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2094 BE = CurrentBlock->end(); BI != BE; ) { 2095 Instruction *CurInst = BI++; 2096 2097 if (isa<AllocaInst>(CurInst) || 2098 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2099 CurInst->getType()->isVoidTy() || 2100 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2101 isa<DbgInfoIntrinsic>(CurInst)) 2102 continue; 2103 2104 // We don't currently value number ANY inline asm calls. 2105 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2106 if (CallI->isInlineAsm()) 2107 continue; 2108 2109 uint32_t ValNo = VN.lookup(CurInst); 2110 2111 // Look for the predecessors for PRE opportunities. We're 2112 // only trying to solve the basic diamond case, where 2113 // a value is computed in the successor and one predecessor, 2114 // but not the other. We also explicitly disallow cases 2115 // where the successor is its own predecessor, because they're 2116 // more complicated to get right. 2117 unsigned NumWith = 0; 2118 unsigned NumWithout = 0; 2119 BasicBlock *PREPred = 0; 2120 predMap.clear(); 2121 2122 for (pred_iterator PI = pred_begin(CurrentBlock), 2123 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2124 BasicBlock *P = *PI; 2125 // We're not interested in PRE where the block is its 2126 // own predecessor, or in blocks with predecessors 2127 // that are not reachable. 2128 if (P == CurrentBlock) { 2129 NumWithout = 2; 2130 break; 2131 } else if (!localAvail.count(P)) { 2132 NumWithout = 2; 2133 break; 2134 } 2135 2136 DenseMap<uint32_t, Value*>::iterator predV = 2137 localAvail[P]->table.find(ValNo); 2138 if (predV == localAvail[P]->table.end()) { 2139 PREPred = P; 2140 ++NumWithout; 2141 } else if (predV->second == CurInst) { 2142 NumWithout = 2; 2143 } else { 2144 predMap[P] = predV->second; 2145 ++NumWith; 2146 } 2147 } 2148 2149 // Don't do PRE when it might increase code size, i.e. when 2150 // we would need to insert instructions in more than one pred. 2151 if (NumWithout != 1 || NumWith == 0) 2152 continue; 2153 2154 // Don't do PRE across indirect branch. 2155 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2156 continue; 2157 2158 // We can't do PRE safely on a critical edge, so instead we schedule 2159 // the edge to be split and perform the PRE the next time we iterate 2160 // on the function. 2161 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2162 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2163 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2164 continue; 2165 } 2166 2167 // Instantiate the expression in the predecessor that lacked it. 2168 // Because we are going top-down through the block, all value numbers 2169 // will be available in the predecessor by the time we need them. Any 2170 // that weren't originally present will have been instantiated earlier 2171 // in this loop. 2172 Instruction *PREInstr = CurInst->clone(); 2173 bool success = true; 2174 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2175 Value *Op = PREInstr->getOperand(i); 2176 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2177 continue; 2178 2179 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2180 PREInstr->setOperand(i, V); 2181 } else { 2182 success = false; 2183 break; 2184 } 2185 } 2186 2187 // Fail out if we encounter an operand that is not available in 2188 // the PRE predecessor. This is typically because of loads which 2189 // are not value numbered precisely. 2190 if (!success) { 2191 delete PREInstr; 2192 DEBUG(verifyRemoved(PREInstr)); 2193 continue; 2194 } 2195 2196 PREInstr->insertBefore(PREPred->getTerminator()); 2197 PREInstr->setName(CurInst->getName() + ".pre"); 2198 predMap[PREPred] = PREInstr; 2199 VN.add(PREInstr, ValNo); 2200 ++NumGVNPRE; 2201 2202 // Update the availability map to include the new instruction. 2203 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2204 2205 // Create a PHI to make the value available in this block. 2206 PHINode* Phi = PHINode::Create(CurInst->getType(), 2207 CurInst->getName() + ".pre-phi", 2208 CurrentBlock->begin()); 2209 for (pred_iterator PI = pred_begin(CurrentBlock), 2210 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2211 BasicBlock *P = *PI; 2212 Phi->addIncoming(predMap[P], P); 2213 } 2214 2215 VN.add(Phi, ValNo); 2216 localAvail[CurrentBlock]->table[ValNo] = Phi; 2217 2218 CurInst->replaceAllUsesWith(Phi); 2219 if (MD && Phi->getType()->isPointerTy()) 2220 MD->invalidateCachedPointerInfo(Phi); 2221 VN.erase(CurInst); 2222 2223 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2224 if (MD) MD->removeInstruction(CurInst); 2225 CurInst->eraseFromParent(); 2226 DEBUG(verifyRemoved(CurInst)); 2227 Changed = true; 2228 } 2229 } 2230 2231 if (splitCriticalEdges()) 2232 Changed = true; 2233 2234 return Changed; 2235} 2236 2237/// splitCriticalEdges - Split critical edges found during the previous 2238/// iteration that may enable further optimization. 2239bool GVN::splitCriticalEdges() { 2240 if (toSplit.empty()) 2241 return false; 2242 do { 2243 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2244 SplitCriticalEdge(Edge.first, Edge.second, this); 2245 } while (!toSplit.empty()); 2246 if (MD) MD->invalidateCachedPredecessors(); 2247 return true; 2248} 2249 2250/// iterateOnFunction - Executes one iteration of GVN 2251bool GVN::iterateOnFunction(Function &F) { 2252 cleanupGlobalSets(); 2253 2254 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2255 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2256 if (DI->getIDom()) 2257 localAvail[DI->getBlock()] = 2258 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2259 else 2260 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2261 } 2262 2263 // Top-down walk of the dominator tree 2264 bool Changed = false; 2265#if 0 2266 // Needed for value numbering with phi construction to work. 2267 ReversePostOrderTraversal<Function*> RPOT(&F); 2268 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2269 RE = RPOT.end(); RI != RE; ++RI) 2270 Changed |= processBlock(*RI); 2271#else 2272 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2273 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2274 Changed |= processBlock(DI->getBlock()); 2275#endif 2276 2277 return Changed; 2278} 2279 2280void GVN::cleanupGlobalSets() { 2281 VN.clear(); 2282 2283 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2284 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2285 delete I->second; 2286 localAvail.clear(); 2287} 2288 2289/// verifyRemoved - Verify that the specified instruction does not occur in our 2290/// internal data structures. 2291void GVN::verifyRemoved(const Instruction *Inst) const { 2292 VN.verifyRemoved(Inst); 2293 2294 // Walk through the value number scope to make sure the instruction isn't 2295 // ferreted away in it. 2296 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2297 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2298 const ValueNumberScope *VNS = I->second; 2299 2300 while (VNS) { 2301 for (DenseMap<uint32_t, Value*>::const_iterator 2302 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2303 assert(II->second != Inst && "Inst still in value numbering scope!"); 2304 } 2305 2306 VNS = VNS->parent; 2307 } 2308 } 2309} 2310