GVN.cpp revision 68c26396c07b4ad96657d4510f06f7646785278d
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Operator.h" 28#include "llvm/Value.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/DepthFirstIterator.h" 31#include "llvm/ADT/PostOrderIterator.h" 32#include "llvm/ADT/SmallPtrSet.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/ConstantFolding.h" 37#include "llvm/Analysis/Dominators.h" 38#include "llvm/Analysis/InstructionSimplify.h" 39#include "llvm/Analysis/Loads.h" 40#include "llvm/Analysis/MemoryBuiltins.h" 41#include "llvm/Analysis/MemoryDependenceAnalysis.h" 42#include "llvm/Analysis/PHITransAddr.h" 43#include "llvm/Support/Allocator.h" 44#include "llvm/Support/CFG.h" 45#include "llvm/Support/CommandLine.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/ErrorHandling.h" 48#include "llvm/Support/GetElementPtrTypeIterator.h" 49#include "llvm/Support/IRBuilder.h" 50#include "llvm/Support/raw_ostream.h" 51#include "llvm/Target/TargetData.h" 52#include "llvm/Transforms/Utils/BasicBlockUtils.h" 53#include "llvm/Transforms/Utils/Local.h" 54#include "llvm/Transforms/Utils/SSAUpdater.h" 55#include <list> 56using namespace llvm; 57 58STATISTIC(NumGVNInstr, "Number of instructions deleted"); 59STATISTIC(NumGVNLoad, "Number of loads deleted"); 60STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 61STATISTIC(NumGVNBlocks, "Number of blocks merged"); 62STATISTIC(NumPRELoad, "Number of loads PRE'd"); 63 64static cl::opt<bool> EnablePRE("enable-pre", 65 cl::init(true), cl::Hidden); 66static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 67 68//===----------------------------------------------------------------------===// 69// ValueTable Class 70//===----------------------------------------------------------------------===// 71 72/// This class holds the mapping between values and value numbers. It is used 73/// as an efficient mechanism to determine the expression-wise equivalence of 74/// two values. 75namespace { 76 struct Expression { 77 enum ExpressionOpcode { 78 ADD = Instruction::Add, 79 FADD = Instruction::FAdd, 80 SUB = Instruction::Sub, 81 FSUB = Instruction::FSub, 82 MUL = Instruction::Mul, 83 FMUL = Instruction::FMul, 84 UDIV = Instruction::UDiv, 85 SDIV = Instruction::SDiv, 86 FDIV = Instruction::FDiv, 87 UREM = Instruction::URem, 88 SREM = Instruction::SRem, 89 FREM = Instruction::FRem, 90 SHL = Instruction::Shl, 91 LSHR = Instruction::LShr, 92 ASHR = Instruction::AShr, 93 AND = Instruction::And, 94 OR = Instruction::Or, 95 XOR = Instruction::Xor, 96 TRUNC = Instruction::Trunc, 97 ZEXT = Instruction::ZExt, 98 SEXT = Instruction::SExt, 99 FPTOUI = Instruction::FPToUI, 100 FPTOSI = Instruction::FPToSI, 101 UITOFP = Instruction::UIToFP, 102 SITOFP = Instruction::SIToFP, 103 FPTRUNC = Instruction::FPTrunc, 104 FPEXT = Instruction::FPExt, 105 PTRTOINT = Instruction::PtrToInt, 106 INTTOPTR = Instruction::IntToPtr, 107 BITCAST = Instruction::BitCast, 108 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 109 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 110 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 111 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 112 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 113 SHUFFLE, SELECT, GEP, CALL, CONSTANT, 114 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 115 116 ExpressionOpcode opcode; 117 const Type* type; 118 SmallVector<uint32_t, 4> varargs; 119 Value *function; 120 121 Expression() { } 122 Expression(ExpressionOpcode o) : opcode(o) { } 123 124 bool operator==(const Expression &other) const { 125 if (opcode != other.opcode) 126 return false; 127 else if (opcode == EMPTY || opcode == TOMBSTONE) 128 return true; 129 else if (type != other.type) 130 return false; 131 else if (function != other.function) 132 return false; 133 else { 134 if (varargs.size() != other.varargs.size()) 135 return false; 136 137 for (size_t i = 0; i < varargs.size(); ++i) 138 if (varargs[i] != other.varargs[i]) 139 return false; 140 141 return true; 142 } 143 } 144 145 /*bool operator!=(const Expression &other) const { 146 return !(*this == other); 147 }*/ 148 }; 149 150 class ValueTable { 151 private: 152 DenseMap<Value*, uint32_t> valueNumbering; 153 DenseMap<Expression, uint32_t> expressionNumbering; 154 AliasAnalysis* AA; 155 MemoryDependenceAnalysis* MD; 156 DominatorTree* DT; 157 158 uint32_t nextValueNumber; 159 160 Expression::ExpressionOpcode getOpcode(CmpInst* C); 161 Expression create_expression(BinaryOperator* BO); 162 Expression create_expression(CmpInst* C); 163 Expression create_expression(ShuffleVectorInst* V); 164 Expression create_expression(ExtractElementInst* C); 165 Expression create_expression(InsertElementInst* V); 166 Expression create_expression(SelectInst* V); 167 Expression create_expression(CastInst* C); 168 Expression create_expression(GetElementPtrInst* G); 169 Expression create_expression(CallInst* C); 170 Expression create_expression(ExtractValueInst* C); 171 Expression create_expression(InsertValueInst* C); 172 173 uint32_t lookup_or_add_call(CallInst* C); 174 public: 175 ValueTable() : nextValueNumber(1) { } 176 uint32_t lookup_or_add(Value *V); 177 uint32_t lookup(Value *V) const; 178 void add(Value *V, uint32_t num); 179 void clear(); 180 void erase(Value *v); 181 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 182 AliasAnalysis *getAliasAnalysis() const { return AA; } 183 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 184 void setDomTree(DominatorTree* D) { DT = D; } 185 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 186 void verifyRemoved(const Value *) const; 187 }; 188} 189 190namespace llvm { 191template <> struct DenseMapInfo<Expression> { 192 static inline Expression getEmptyKey() { 193 return Expression(Expression::EMPTY); 194 } 195 196 static inline Expression getTombstoneKey() { 197 return Expression(Expression::TOMBSTONE); 198 } 199 200 static unsigned getHashValue(const Expression e) { 201 unsigned hash = e.opcode; 202 203 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 204 (unsigned)((uintptr_t)e.type >> 9)); 205 206 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 207 E = e.varargs.end(); I != E; ++I) 208 hash = *I + hash * 37; 209 210 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 211 (unsigned)((uintptr_t)e.function >> 9)) + 212 hash * 37; 213 214 return hash; 215 } 216 static bool isEqual(const Expression &LHS, const Expression &RHS) { 217 return LHS == RHS; 218 } 219}; 220 221template <> 222struct isPodLike<Expression> { static const bool value = true; }; 223 224} 225 226//===----------------------------------------------------------------------===// 227// ValueTable Internal Functions 228//===----------------------------------------------------------------------===// 229 230Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 231 if (isa<ICmpInst>(C)) { 232 switch (C->getPredicate()) { 233 default: // THIS SHOULD NEVER HAPPEN 234 llvm_unreachable("Comparison with unknown predicate?"); 235 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 236 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 237 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 238 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 239 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 240 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 241 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 242 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 243 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 244 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 245 } 246 } else { 247 switch (C->getPredicate()) { 248 default: // THIS SHOULD NEVER HAPPEN 249 llvm_unreachable("Comparison with unknown predicate?"); 250 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 251 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 252 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 253 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 254 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 255 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 256 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 257 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 258 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 259 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 260 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 261 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 262 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 263 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 264 } 265 } 266} 267 268Expression ValueTable::create_expression(CallInst* C) { 269 Expression e; 270 271 e.type = C->getType(); 272 e.function = C->getCalledFunction(); 273 e.opcode = Expression::CALL; 274 275 CallSite CS(C); 276 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end(); 277 I != E; ++I) 278 e.varargs.push_back(lookup_or_add(*I)); 279 280 return e; 281} 282 283Expression ValueTable::create_expression(BinaryOperator* BO) { 284 Expression e; 285 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 286 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 287 e.function = 0; 288 e.type = BO->getType(); 289 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode()); 290 291 return e; 292} 293 294Expression ValueTable::create_expression(CmpInst* C) { 295 Expression e; 296 297 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 298 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 299 e.function = 0; 300 e.type = C->getType(); 301 e.opcode = getOpcode(C); 302 303 return e; 304} 305 306Expression ValueTable::create_expression(CastInst* C) { 307 Expression e; 308 309 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 310 e.function = 0; 311 e.type = C->getType(); 312 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode()); 313 314 return e; 315} 316 317Expression ValueTable::create_expression(ShuffleVectorInst* S) { 318 Expression e; 319 320 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 321 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 322 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 323 e.function = 0; 324 e.type = S->getType(); 325 e.opcode = Expression::SHUFFLE; 326 327 return e; 328} 329 330Expression ValueTable::create_expression(ExtractElementInst* E) { 331 Expression e; 332 333 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 334 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 335 e.function = 0; 336 e.type = E->getType(); 337 e.opcode = Expression::EXTRACT; 338 339 return e; 340} 341 342Expression ValueTable::create_expression(InsertElementInst* I) { 343 Expression e; 344 345 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 346 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 347 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 348 e.function = 0; 349 e.type = I->getType(); 350 e.opcode = Expression::INSERT; 351 352 return e; 353} 354 355Expression ValueTable::create_expression(SelectInst* I) { 356 Expression e; 357 358 e.varargs.push_back(lookup_or_add(I->getCondition())); 359 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 360 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 361 e.function = 0; 362 e.type = I->getType(); 363 e.opcode = Expression::SELECT; 364 365 return e; 366} 367 368Expression ValueTable::create_expression(GetElementPtrInst* G) { 369 Expression e; 370 371 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 372 e.function = 0; 373 e.type = G->getType(); 374 e.opcode = Expression::GEP; 375 376 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 377 I != E; ++I) 378 e.varargs.push_back(lookup_or_add(*I)); 379 380 return e; 381} 382 383Expression ValueTable::create_expression(ExtractValueInst* E) { 384 Expression e; 385 386 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 387 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 388 II != IE; ++II) 389 e.varargs.push_back(*II); 390 e.function = 0; 391 e.type = E->getType(); 392 e.opcode = Expression::EXTRACTVALUE; 393 394 return e; 395} 396 397Expression ValueTable::create_expression(InsertValueInst* E) { 398 Expression e; 399 400 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 401 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 402 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 403 II != IE; ++II) 404 e.varargs.push_back(*II); 405 e.function = 0; 406 e.type = E->getType(); 407 e.opcode = Expression::INSERTVALUE; 408 409 return e; 410} 411 412//===----------------------------------------------------------------------===// 413// ValueTable External Functions 414//===----------------------------------------------------------------------===// 415 416/// add - Insert a value into the table with a specified value number. 417void ValueTable::add(Value *V, uint32_t num) { 418 valueNumbering.insert(std::make_pair(V, num)); 419} 420 421uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 422 if (AA->doesNotAccessMemory(C)) { 423 Expression exp = create_expression(C); 424 uint32_t& e = expressionNumbering[exp]; 425 if (!e) e = nextValueNumber++; 426 valueNumbering[C] = e; 427 return e; 428 } else if (AA->onlyReadsMemory(C)) { 429 Expression exp = create_expression(C); 430 uint32_t& e = expressionNumbering[exp]; 431 if (!e) { 432 e = nextValueNumber++; 433 valueNumbering[C] = e; 434 return e; 435 } 436 if (!MD) { 437 e = nextValueNumber++; 438 valueNumbering[C] = e; 439 return e; 440 } 441 442 MemDepResult local_dep = MD->getDependency(C); 443 444 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 445 valueNumbering[C] = nextValueNumber; 446 return nextValueNumber++; 447 } 448 449 if (local_dep.isDef()) { 450 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 451 452 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 453 valueNumbering[C] = nextValueNumber; 454 return nextValueNumber++; 455 } 456 457 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 458 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 459 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 460 if (c_vn != cd_vn) { 461 valueNumbering[C] = nextValueNumber; 462 return nextValueNumber++; 463 } 464 } 465 466 uint32_t v = lookup_or_add(local_cdep); 467 valueNumbering[C] = v; 468 return v; 469 } 470 471 // Non-local case. 472 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 473 MD->getNonLocalCallDependency(CallSite(C)); 474 // FIXME: call/call dependencies for readonly calls should return def, not 475 // clobber! Move the checking logic to MemDep! 476 CallInst* cdep = 0; 477 478 // Check to see if we have a single dominating call instruction that is 479 // identical to C. 480 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 481 const NonLocalDepEntry *I = &deps[i]; 482 // Ignore non-local dependencies. 483 if (I->getResult().isNonLocal()) 484 continue; 485 486 // We don't handle non-depedencies. If we already have a call, reject 487 // instruction dependencies. 488 if (I->getResult().isClobber() || cdep != 0) { 489 cdep = 0; 490 break; 491 } 492 493 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 494 // FIXME: All duplicated with non-local case. 495 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 496 cdep = NonLocalDepCall; 497 continue; 498 } 499 500 cdep = 0; 501 break; 502 } 503 504 if (!cdep) { 505 valueNumbering[C] = nextValueNumber; 506 return nextValueNumber++; 507 } 508 509 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 510 valueNumbering[C] = nextValueNumber; 511 return nextValueNumber++; 512 } 513 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 514 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 515 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 516 if (c_vn != cd_vn) { 517 valueNumbering[C] = nextValueNumber; 518 return nextValueNumber++; 519 } 520 } 521 522 uint32_t v = lookup_or_add(cdep); 523 valueNumbering[C] = v; 524 return v; 525 526 } else { 527 valueNumbering[C] = nextValueNumber; 528 return nextValueNumber++; 529 } 530} 531 532/// lookup_or_add - Returns the value number for the specified value, assigning 533/// it a new number if it did not have one before. 534uint32_t ValueTable::lookup_or_add(Value *V) { 535 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 536 if (VI != valueNumbering.end()) 537 return VI->second; 538 539 if (!isa<Instruction>(V)) { 540 valueNumbering[V] = nextValueNumber; 541 return nextValueNumber++; 542 } 543 544 Instruction* I = cast<Instruction>(V); 545 Expression exp; 546 switch (I->getOpcode()) { 547 case Instruction::Call: 548 return lookup_or_add_call(cast<CallInst>(I)); 549 case Instruction::Add: 550 case Instruction::FAdd: 551 case Instruction::Sub: 552 case Instruction::FSub: 553 case Instruction::Mul: 554 case Instruction::FMul: 555 case Instruction::UDiv: 556 case Instruction::SDiv: 557 case Instruction::FDiv: 558 case Instruction::URem: 559 case Instruction::SRem: 560 case Instruction::FRem: 561 case Instruction::Shl: 562 case Instruction::LShr: 563 case Instruction::AShr: 564 case Instruction::And: 565 case Instruction::Or : 566 case Instruction::Xor: 567 exp = create_expression(cast<BinaryOperator>(I)); 568 break; 569 case Instruction::ICmp: 570 case Instruction::FCmp: 571 exp = create_expression(cast<CmpInst>(I)); 572 break; 573 case Instruction::Trunc: 574 case Instruction::ZExt: 575 case Instruction::SExt: 576 case Instruction::FPToUI: 577 case Instruction::FPToSI: 578 case Instruction::UIToFP: 579 case Instruction::SIToFP: 580 case Instruction::FPTrunc: 581 case Instruction::FPExt: 582 case Instruction::PtrToInt: 583 case Instruction::IntToPtr: 584 case Instruction::BitCast: 585 exp = create_expression(cast<CastInst>(I)); 586 break; 587 case Instruction::Select: 588 exp = create_expression(cast<SelectInst>(I)); 589 break; 590 case Instruction::ExtractElement: 591 exp = create_expression(cast<ExtractElementInst>(I)); 592 break; 593 case Instruction::InsertElement: 594 exp = create_expression(cast<InsertElementInst>(I)); 595 break; 596 case Instruction::ShuffleVector: 597 exp = create_expression(cast<ShuffleVectorInst>(I)); 598 break; 599 case Instruction::ExtractValue: 600 exp = create_expression(cast<ExtractValueInst>(I)); 601 break; 602 case Instruction::InsertValue: 603 exp = create_expression(cast<InsertValueInst>(I)); 604 break; 605 case Instruction::GetElementPtr: 606 exp = create_expression(cast<GetElementPtrInst>(I)); 607 break; 608 default: 609 valueNumbering[V] = nextValueNumber; 610 return nextValueNumber++; 611 } 612 613 uint32_t& e = expressionNumbering[exp]; 614 if (!e) e = nextValueNumber++; 615 valueNumbering[V] = e; 616 return e; 617} 618 619/// lookup - Returns the value number of the specified value. Fails if 620/// the value has not yet been numbered. 621uint32_t ValueTable::lookup(Value *V) const { 622 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 623 assert(VI != valueNumbering.end() && "Value not numbered?"); 624 return VI->second; 625} 626 627/// clear - Remove all entries from the ValueTable 628void ValueTable::clear() { 629 valueNumbering.clear(); 630 expressionNumbering.clear(); 631 nextValueNumber = 1; 632} 633 634/// erase - Remove a value from the value numbering 635void ValueTable::erase(Value *V) { 636 valueNumbering.erase(V); 637} 638 639/// verifyRemoved - Verify that the value is removed from all internal data 640/// structures. 641void ValueTable::verifyRemoved(const Value *V) const { 642 for (DenseMap<Value*, uint32_t>::const_iterator 643 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 644 assert(I->first != V && "Inst still occurs in value numbering map!"); 645 } 646} 647 648//===----------------------------------------------------------------------===// 649// GVN Pass 650//===----------------------------------------------------------------------===// 651 652namespace { 653 struct ValueNumberScope { 654 ValueNumberScope* parent; 655 DenseMap<uint32_t, Value*> table; 656 657 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 658 }; 659} 660 661namespace { 662 663 class GVN : public FunctionPass { 664 bool runOnFunction(Function &F); 665 public: 666 static char ID; // Pass identification, replacement for typeid 667 explicit GVN(bool noloads = false) 668 : FunctionPass(ID), NoLoads(noloads), MD(0) { 669 initializeGVNPass(*PassRegistry::getPassRegistry()); 670 } 671 672 private: 673 bool NoLoads; 674 MemoryDependenceAnalysis *MD; 675 DominatorTree *DT; 676 const TargetData* TD; 677 678 ValueTable VN; 679 680 /// NumberTable - A mapping from value numers to lists of Value*'s that 681 /// have that value number. Use lookupNumber to query it. 682 DenseMap<uint32_t, std::pair<Value*, void*> > NumberTable; 683 BumpPtrAllocator TableAllocator; 684 685 /// insert_table - Push a new Value to the NumberTable onto the list for 686 /// its value number. 687 void insert_table(uint32_t N, Value *V) { 688 std::pair<Value*, void*>& Curr = NumberTable[N]; 689 if (!Curr.first) { 690 Curr.first = V; 691 return; 692 } 693 694 std::pair<Value*, void*>* Node = 695 TableAllocator.Allocate<std::pair<Value*, void*> >(); 696 Node->first = V; 697 Node->second = Curr.second; 698 Curr.second = Node; 699 } 700 701 /// erase_table - Scan the list of values corresponding to a given value 702 /// number, and remove the given value if encountered. 703 void erase_table(uint32_t N, Value *V) { 704 std::pair<Value*, void*>* Prev = 0; 705 std::pair<Value*, void*>* Curr = &NumberTable[N]; 706 707 while (Curr->first != V) { 708 Prev = Curr; 709 Curr = static_cast<std::pair<Value*, void*>*>(Curr->second); 710 } 711 712 if (Prev) { 713 Prev->second = Curr->second; 714 } else { 715 if (!Curr->second) { 716 Curr->first = 0; 717 } else { 718 std::pair<Value*, void*>* Next = 719 static_cast<std::pair<Value*, void*>*>(Curr->second); 720 Curr->first = Next->first; 721 Curr->second = Next->second; 722 } 723 } 724 } 725 726 // List of critical edges to be split between iterations. 727 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 728 729 // This transformation requires dominator postdominator info 730 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 731 AU.addRequired<DominatorTree>(); 732 if (!NoLoads) 733 AU.addRequired<MemoryDependenceAnalysis>(); 734 AU.addRequired<AliasAnalysis>(); 735 736 AU.addPreserved<DominatorTree>(); 737 AU.addPreserved<AliasAnalysis>(); 738 } 739 740 // Helper fuctions 741 // FIXME: eliminate or document these better 742 bool processLoad(LoadInst* L, 743 SmallVectorImpl<Instruction*> &toErase); 744 bool processInstruction(Instruction *I, 745 SmallVectorImpl<Instruction*> &toErase); 746 bool processNonLocalLoad(LoadInst* L, 747 SmallVectorImpl<Instruction*> &toErase); 748 bool processBlock(BasicBlock *BB); 749 void dump(DenseMap<uint32_t, Value*>& d); 750 bool iterateOnFunction(Function &F); 751 bool performPRE(Function& F); 752 Value *lookupNumber(BasicBlock *BB, uint32_t num); 753 void cleanupGlobalSets(); 754 void verifyRemoved(const Instruction *I) const; 755 bool splitCriticalEdges(); 756 }; 757 758 char GVN::ID = 0; 759} 760 761// createGVNPass - The public interface to this file... 762FunctionPass *llvm::createGVNPass(bool NoLoads) { 763 return new GVN(NoLoads); 764} 765 766INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false) 767INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 768INITIALIZE_PASS_DEPENDENCY(DominatorTree) 769INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 770INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) 771 772void GVN::dump(DenseMap<uint32_t, Value*>& d) { 773 errs() << "{\n"; 774 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 775 E = d.end(); I != E; ++I) { 776 errs() << I->first << "\n"; 777 I->second->dump(); 778 } 779 errs() << "}\n"; 780} 781 782/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 783/// we're analyzing is fully available in the specified block. As we go, keep 784/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 785/// map is actually a tri-state map with the following values: 786/// 0) we know the block *is not* fully available. 787/// 1) we know the block *is* fully available. 788/// 2) we do not know whether the block is fully available or not, but we are 789/// currently speculating that it will be. 790/// 3) we are speculating for this block and have used that to speculate for 791/// other blocks. 792static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 793 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 794 // Optimistically assume that the block is fully available and check to see 795 // if we already know about this block in one lookup. 796 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 797 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 798 799 // If the entry already existed for this block, return the precomputed value. 800 if (!IV.second) { 801 // If this is a speculative "available" value, mark it as being used for 802 // speculation of other blocks. 803 if (IV.first->second == 2) 804 IV.first->second = 3; 805 return IV.first->second != 0; 806 } 807 808 // Otherwise, see if it is fully available in all predecessors. 809 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 810 811 // If this block has no predecessors, it isn't live-in here. 812 if (PI == PE) 813 goto SpeculationFailure; 814 815 for (; PI != PE; ++PI) 816 // If the value isn't fully available in one of our predecessors, then it 817 // isn't fully available in this block either. Undo our previous 818 // optimistic assumption and bail out. 819 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 820 goto SpeculationFailure; 821 822 return true; 823 824// SpeculationFailure - If we get here, we found out that this is not, after 825// all, a fully-available block. We have a problem if we speculated on this and 826// used the speculation to mark other blocks as available. 827SpeculationFailure: 828 char &BBVal = FullyAvailableBlocks[BB]; 829 830 // If we didn't speculate on this, just return with it set to false. 831 if (BBVal == 2) { 832 BBVal = 0; 833 return false; 834 } 835 836 // If we did speculate on this value, we could have blocks set to 1 that are 837 // incorrect. Walk the (transitive) successors of this block and mark them as 838 // 0 if set to one. 839 SmallVector<BasicBlock*, 32> BBWorklist; 840 BBWorklist.push_back(BB); 841 842 do { 843 BasicBlock *Entry = BBWorklist.pop_back_val(); 844 // Note that this sets blocks to 0 (unavailable) if they happen to not 845 // already be in FullyAvailableBlocks. This is safe. 846 char &EntryVal = FullyAvailableBlocks[Entry]; 847 if (EntryVal == 0) continue; // Already unavailable. 848 849 // Mark as unavailable. 850 EntryVal = 0; 851 852 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 853 BBWorklist.push_back(*I); 854 } while (!BBWorklist.empty()); 855 856 return false; 857} 858 859 860/// CanCoerceMustAliasedValueToLoad - Return true if 861/// CoerceAvailableValueToLoadType will succeed. 862static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 863 const Type *LoadTy, 864 const TargetData &TD) { 865 // If the loaded or stored value is an first class array or struct, don't try 866 // to transform them. We need to be able to bitcast to integer. 867 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 868 StoredVal->getType()->isStructTy() || 869 StoredVal->getType()->isArrayTy()) 870 return false; 871 872 // The store has to be at least as big as the load. 873 if (TD.getTypeSizeInBits(StoredVal->getType()) < 874 TD.getTypeSizeInBits(LoadTy)) 875 return false; 876 877 return true; 878} 879 880 881/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 882/// then a load from a must-aliased pointer of a different type, try to coerce 883/// the stored value. LoadedTy is the type of the load we want to replace and 884/// InsertPt is the place to insert new instructions. 885/// 886/// If we can't do it, return null. 887static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 888 const Type *LoadedTy, 889 Instruction *InsertPt, 890 const TargetData &TD) { 891 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 892 return 0; 893 894 const Type *StoredValTy = StoredVal->getType(); 895 896 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy); 897 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 898 899 // If the store and reload are the same size, we can always reuse it. 900 if (StoreSize == LoadSize) { 901 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) { 902 // Pointer to Pointer -> use bitcast. 903 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 904 } 905 906 // Convert source pointers to integers, which can be bitcast. 907 if (StoredValTy->isPointerTy()) { 908 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 909 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 910 } 911 912 const Type *TypeToCastTo = LoadedTy; 913 if (TypeToCastTo->isPointerTy()) 914 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 915 916 if (StoredValTy != TypeToCastTo) 917 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 918 919 // Cast to pointer if the load needs a pointer type. 920 if (LoadedTy->isPointerTy()) 921 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 922 923 return StoredVal; 924 } 925 926 // If the loaded value is smaller than the available value, then we can 927 // extract out a piece from it. If the available value is too small, then we 928 // can't do anything. 929 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 930 931 // Convert source pointers to integers, which can be manipulated. 932 if (StoredValTy->isPointerTy()) { 933 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 934 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 935 } 936 937 // Convert vectors and fp to integer, which can be manipulated. 938 if (!StoredValTy->isIntegerTy()) { 939 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 940 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 941 } 942 943 // If this is a big-endian system, we need to shift the value down to the low 944 // bits so that a truncate will work. 945 if (TD.isBigEndian()) { 946 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 947 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 948 } 949 950 // Truncate the integer to the right size now. 951 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 952 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 953 954 if (LoadedTy == NewIntTy) 955 return StoredVal; 956 957 // If the result is a pointer, inttoptr. 958 if (LoadedTy->isPointerTy()) 959 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 960 961 // Otherwise, bitcast. 962 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 963} 964 965/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 966/// be expressed as a base pointer plus a constant offset. Return the base and 967/// offset to the caller. 968static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 969 const TargetData &TD) { 970 Operator *PtrOp = dyn_cast<Operator>(Ptr); 971 if (PtrOp == 0) return Ptr; 972 973 // Just look through bitcasts. 974 if (PtrOp->getOpcode() == Instruction::BitCast) 975 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 976 977 // If this is a GEP with constant indices, we can look through it. 978 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 979 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 980 981 gep_type_iterator GTI = gep_type_begin(GEP); 982 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 983 ++I, ++GTI) { 984 ConstantInt *OpC = cast<ConstantInt>(*I); 985 if (OpC->isZero()) continue; 986 987 // Handle a struct and array indices which add their offset to the pointer. 988 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 989 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 990 } else { 991 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 992 Offset += OpC->getSExtValue()*Size; 993 } 994 } 995 996 // Re-sign extend from the pointer size if needed to get overflow edge cases 997 // right. 998 unsigned PtrSize = TD.getPointerSizeInBits(); 999 if (PtrSize < 64) 1000 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 1001 1002 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 1003} 1004 1005 1006/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 1007/// memdep query of a load that ends up being a clobbering memory write (store, 1008/// memset, memcpy, memmove). This means that the write *may* provide bits used 1009/// by the load but we can't be sure because the pointers don't mustalias. 1010/// 1011/// Check this case to see if there is anything more we can do before we give 1012/// up. This returns -1 if we have to give up, or a byte number in the stored 1013/// value of the piece that feeds the load. 1014static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 1015 Value *WritePtr, 1016 uint64_t WriteSizeInBits, 1017 const TargetData &TD) { 1018 // If the loaded or stored value is an first class array or struct, don't try 1019 // to transform them. We need to be able to bitcast to integer. 1020 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 1021 return -1; 1022 1023 int64_t StoreOffset = 0, LoadOffset = 0; 1024 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1025 Value *LoadBase = 1026 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1027 if (StoreBase != LoadBase) 1028 return -1; 1029 1030 // If the load and store are to the exact same address, they should have been 1031 // a must alias. AA must have gotten confused. 1032 // FIXME: Study to see if/when this happens. One case is forwarding a memset 1033 // to a load from the base of the memset. 1034#if 0 1035 if (LoadOffset == StoreOffset) { 1036 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1037 << "Base = " << *StoreBase << "\n" 1038 << "Store Ptr = " << *WritePtr << "\n" 1039 << "Store Offs = " << StoreOffset << "\n" 1040 << "Load Ptr = " << *LoadPtr << "\n"; 1041 abort(); 1042 } 1043#endif 1044 1045 // If the load and store don't overlap at all, the store doesn't provide 1046 // anything to the load. In this case, they really don't alias at all, AA 1047 // must have gotten confused. 1048 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1049 // remove this check, as it is duplicated with what we have below. 1050 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1051 1052 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1053 return -1; 1054 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1055 LoadSize >>= 3; 1056 1057 1058 bool isAAFailure = false; 1059 if (StoreOffset < LoadOffset) 1060 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1061 else 1062 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1063 1064 if (isAAFailure) { 1065#if 0 1066 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1067 << "Base = " << *StoreBase << "\n" 1068 << "Store Ptr = " << *WritePtr << "\n" 1069 << "Store Offs = " << StoreOffset << "\n" 1070 << "Load Ptr = " << *LoadPtr << "\n"; 1071 abort(); 1072#endif 1073 return -1; 1074 } 1075 1076 // If the Load isn't completely contained within the stored bits, we don't 1077 // have all the bits to feed it. We could do something crazy in the future 1078 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1079 // valuable. 1080 if (StoreOffset > LoadOffset || 1081 StoreOffset+StoreSize < LoadOffset+LoadSize) 1082 return -1; 1083 1084 // Okay, we can do this transformation. Return the number of bytes into the 1085 // store that the load is. 1086 return LoadOffset-StoreOffset; 1087} 1088 1089/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1090/// memdep query of a load that ends up being a clobbering store. 1091static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1092 StoreInst *DepSI, 1093 const TargetData &TD) { 1094 // Cannot handle reading from store of first-class aggregate yet. 1095 if (DepSI->getValueOperand()->getType()->isStructTy() || 1096 DepSI->getValueOperand()->getType()->isArrayTy()) 1097 return -1; 1098 1099 Value *StorePtr = DepSI->getPointerOperand(); 1100 uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType()); 1101 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1102 StorePtr, StoreSize, TD); 1103} 1104 1105static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1106 MemIntrinsic *MI, 1107 const TargetData &TD) { 1108 // If the mem operation is a non-constant size, we can't handle it. 1109 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1110 if (SizeCst == 0) return -1; 1111 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1112 1113 // If this is memset, we just need to see if the offset is valid in the size 1114 // of the memset.. 1115 if (MI->getIntrinsicID() == Intrinsic::memset) 1116 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1117 MemSizeInBits, TD); 1118 1119 // If we have a memcpy/memmove, the only case we can handle is if this is a 1120 // copy from constant memory. In that case, we can read directly from the 1121 // constant memory. 1122 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1123 1124 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1125 if (Src == 0) return -1; 1126 1127 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1128 if (GV == 0 || !GV->isConstant()) return -1; 1129 1130 // See if the access is within the bounds of the transfer. 1131 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1132 MI->getDest(), MemSizeInBits, TD); 1133 if (Offset == -1) 1134 return Offset; 1135 1136 // Otherwise, see if we can constant fold a load from the constant with the 1137 // offset applied as appropriate. 1138 Src = ConstantExpr::getBitCast(Src, 1139 llvm::Type::getInt8PtrTy(Src->getContext())); 1140 Constant *OffsetCst = 1141 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1142 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1143 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1144 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1145 return Offset; 1146 return -1; 1147} 1148 1149 1150/// GetStoreValueForLoad - This function is called when we have a 1151/// memdep query of a load that ends up being a clobbering store. This means 1152/// that the store *may* provide bits used by the load but we can't be sure 1153/// because the pointers don't mustalias. Check this case to see if there is 1154/// anything more we can do before we give up. 1155static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1156 const Type *LoadTy, 1157 Instruction *InsertPt, const TargetData &TD){ 1158 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1159 1160 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1161 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 1162 1163 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1164 1165 // Compute which bits of the stored value are being used by the load. Convert 1166 // to an integer type to start with. 1167 if (SrcVal->getType()->isPointerTy()) 1168 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1169 if (!SrcVal->getType()->isIntegerTy()) 1170 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1171 "tmp"); 1172 1173 // Shift the bits to the least significant depending on endianness. 1174 unsigned ShiftAmt; 1175 if (TD.isLittleEndian()) 1176 ShiftAmt = Offset*8; 1177 else 1178 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1179 1180 if (ShiftAmt) 1181 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1182 1183 if (LoadSize != StoreSize) 1184 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1185 "tmp"); 1186 1187 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1188} 1189 1190/// GetMemInstValueForLoad - This function is called when we have a 1191/// memdep query of a load that ends up being a clobbering mem intrinsic. 1192static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1193 const Type *LoadTy, Instruction *InsertPt, 1194 const TargetData &TD){ 1195 LLVMContext &Ctx = LoadTy->getContext(); 1196 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1197 1198 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1199 1200 // We know that this method is only called when the mem transfer fully 1201 // provides the bits for the load. 1202 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1203 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1204 // independently of what the offset is. 1205 Value *Val = MSI->getValue(); 1206 if (LoadSize != 1) 1207 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1208 1209 Value *OneElt = Val; 1210 1211 // Splat the value out to the right number of bits. 1212 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1213 // If we can double the number of bytes set, do it. 1214 if (NumBytesSet*2 <= LoadSize) { 1215 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1216 Val = Builder.CreateOr(Val, ShVal); 1217 NumBytesSet <<= 1; 1218 continue; 1219 } 1220 1221 // Otherwise insert one byte at a time. 1222 Value *ShVal = Builder.CreateShl(Val, 1*8); 1223 Val = Builder.CreateOr(OneElt, ShVal); 1224 ++NumBytesSet; 1225 } 1226 1227 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1228 } 1229 1230 // Otherwise, this is a memcpy/memmove from a constant global. 1231 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1232 Constant *Src = cast<Constant>(MTI->getSource()); 1233 1234 // Otherwise, see if we can constant fold a load from the constant with the 1235 // offset applied as appropriate. 1236 Src = ConstantExpr::getBitCast(Src, 1237 llvm::Type::getInt8PtrTy(Src->getContext())); 1238 Constant *OffsetCst = 1239 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1240 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1241 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1242 return ConstantFoldLoadFromConstPtr(Src, &TD); 1243} 1244 1245namespace { 1246 1247struct AvailableValueInBlock { 1248 /// BB - The basic block in question. 1249 BasicBlock *BB; 1250 enum ValType { 1251 SimpleVal, // A simple offsetted value that is accessed. 1252 MemIntrin // A memory intrinsic which is loaded from. 1253 }; 1254 1255 /// V - The value that is live out of the block. 1256 PointerIntPair<Value *, 1, ValType> Val; 1257 1258 /// Offset - The byte offset in Val that is interesting for the load query. 1259 unsigned Offset; 1260 1261 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1262 unsigned Offset = 0) { 1263 AvailableValueInBlock Res; 1264 Res.BB = BB; 1265 Res.Val.setPointer(V); 1266 Res.Val.setInt(SimpleVal); 1267 Res.Offset = Offset; 1268 return Res; 1269 } 1270 1271 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1272 unsigned Offset = 0) { 1273 AvailableValueInBlock Res; 1274 Res.BB = BB; 1275 Res.Val.setPointer(MI); 1276 Res.Val.setInt(MemIntrin); 1277 Res.Offset = Offset; 1278 return Res; 1279 } 1280 1281 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1282 Value *getSimpleValue() const { 1283 assert(isSimpleValue() && "Wrong accessor"); 1284 return Val.getPointer(); 1285 } 1286 1287 MemIntrinsic *getMemIntrinValue() const { 1288 assert(!isSimpleValue() && "Wrong accessor"); 1289 return cast<MemIntrinsic>(Val.getPointer()); 1290 } 1291 1292 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1293 /// defined here to the specified type. This handles various coercion cases. 1294 Value *MaterializeAdjustedValue(const Type *LoadTy, 1295 const TargetData *TD) const { 1296 Value *Res; 1297 if (isSimpleValue()) { 1298 Res = getSimpleValue(); 1299 if (Res->getType() != LoadTy) { 1300 assert(TD && "Need target data to handle type mismatch case"); 1301 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1302 *TD); 1303 1304 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1305 << *getSimpleValue() << '\n' 1306 << *Res << '\n' << "\n\n\n"); 1307 } 1308 } else { 1309 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1310 LoadTy, BB->getTerminator(), *TD); 1311 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1312 << " " << *getMemIntrinValue() << '\n' 1313 << *Res << '\n' << "\n\n\n"); 1314 } 1315 return Res; 1316 } 1317}; 1318 1319} 1320 1321/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1322/// construct SSA form, allowing us to eliminate LI. This returns the value 1323/// that should be used at LI's definition site. 1324static Value *ConstructSSAForLoadSet(LoadInst *LI, 1325 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1326 const TargetData *TD, 1327 const DominatorTree &DT, 1328 AliasAnalysis *AA) { 1329 // Check for the fully redundant, dominating load case. In this case, we can 1330 // just use the dominating value directly. 1331 if (ValuesPerBlock.size() == 1 && 1332 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) 1333 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); 1334 1335 // Otherwise, we have to construct SSA form. 1336 SmallVector<PHINode*, 8> NewPHIs; 1337 SSAUpdater SSAUpdate(&NewPHIs); 1338 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1339 1340 const Type *LoadTy = LI->getType(); 1341 1342 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1343 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1344 BasicBlock *BB = AV.BB; 1345 1346 if (SSAUpdate.HasValueForBlock(BB)) 1347 continue; 1348 1349 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); 1350 } 1351 1352 // Perform PHI construction. 1353 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1354 1355 // If new PHI nodes were created, notify alias analysis. 1356 if (V->getType()->isPointerTy()) 1357 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1358 AA->copyValue(LI, NewPHIs[i]); 1359 1360 return V; 1361} 1362 1363static bool isLifetimeStart(const Instruction *Inst) { 1364 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1365 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1366 return false; 1367} 1368 1369/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1370/// non-local by performing PHI construction. 1371bool GVN::processNonLocalLoad(LoadInst *LI, 1372 SmallVectorImpl<Instruction*> &toErase) { 1373 // Find the non-local dependencies of the load. 1374 SmallVector<NonLocalDepResult, 64> Deps; 1375 AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI); 1376 MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps); 1377 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1378 // << Deps.size() << *LI << '\n'); 1379 1380 // If we had to process more than one hundred blocks to find the 1381 // dependencies, this load isn't worth worrying about. Optimizing 1382 // it will be too expensive. 1383 if (Deps.size() > 100) 1384 return false; 1385 1386 // If we had a phi translation failure, we'll have a single entry which is a 1387 // clobber in the current block. Reject this early. 1388 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1389 DEBUG( 1390 dbgs() << "GVN: non-local load "; 1391 WriteAsOperand(dbgs(), LI); 1392 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1393 ); 1394 return false; 1395 } 1396 1397 // Filter out useless results (non-locals, etc). Keep track of the blocks 1398 // where we have a value available in repl, also keep track of whether we see 1399 // dependencies that produce an unknown value for the load (such as a call 1400 // that could potentially clobber the load). 1401 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1402 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1403 1404 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1405 BasicBlock *DepBB = Deps[i].getBB(); 1406 MemDepResult DepInfo = Deps[i].getResult(); 1407 1408 if (DepInfo.isClobber()) { 1409 // The address being loaded in this non-local block may not be the same as 1410 // the pointer operand of the load if PHI translation occurs. Make sure 1411 // to consider the right address. 1412 Value *Address = Deps[i].getAddress(); 1413 1414 // If the dependence is to a store that writes to a superset of the bits 1415 // read by the load, we can extract the bits we need for the load from the 1416 // stored value. 1417 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1418 if (TD && Address) { 1419 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1420 DepSI, *TD); 1421 if (Offset != -1) { 1422 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1423 DepSI->getValueOperand(), 1424 Offset)); 1425 continue; 1426 } 1427 } 1428 } 1429 1430 // If the clobbering value is a memset/memcpy/memmove, see if we can 1431 // forward a value on from it. 1432 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1433 if (TD && Address) { 1434 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1435 DepMI, *TD); 1436 if (Offset != -1) { 1437 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1438 Offset)); 1439 continue; 1440 } 1441 } 1442 } 1443 1444 UnavailableBlocks.push_back(DepBB); 1445 continue; 1446 } 1447 1448 Instruction *DepInst = DepInfo.getInst(); 1449 1450 // Loading the allocation -> undef. 1451 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1452 // Loading immediately after lifetime begin -> undef. 1453 isLifetimeStart(DepInst)) { 1454 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1455 UndefValue::get(LI->getType()))); 1456 continue; 1457 } 1458 1459 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1460 // Reject loads and stores that are to the same address but are of 1461 // different types if we have to. 1462 if (S->getValueOperand()->getType() != LI->getType()) { 1463 // If the stored value is larger or equal to the loaded value, we can 1464 // reuse it. 1465 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), 1466 LI->getType(), *TD)) { 1467 UnavailableBlocks.push_back(DepBB); 1468 continue; 1469 } 1470 } 1471 1472 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1473 S->getValueOperand())); 1474 continue; 1475 } 1476 1477 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1478 // If the types mismatch and we can't handle it, reject reuse of the load. 1479 if (LD->getType() != LI->getType()) { 1480 // If the stored value is larger or equal to the loaded value, we can 1481 // reuse it. 1482 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1483 UnavailableBlocks.push_back(DepBB); 1484 continue; 1485 } 1486 } 1487 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1488 continue; 1489 } 1490 1491 UnavailableBlocks.push_back(DepBB); 1492 continue; 1493 } 1494 1495 // If we have no predecessors that produce a known value for this load, exit 1496 // early. 1497 if (ValuesPerBlock.empty()) return false; 1498 1499 // If all of the instructions we depend on produce a known value for this 1500 // load, then it is fully redundant and we can use PHI insertion to compute 1501 // its value. Insert PHIs and remove the fully redundant value now. 1502 if (UnavailableBlocks.empty()) { 1503 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1504 1505 // Perform PHI construction. 1506 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1507 VN.getAliasAnalysis()); 1508 LI->replaceAllUsesWith(V); 1509 1510 if (isa<PHINode>(V)) 1511 V->takeName(LI); 1512 if (V->getType()->isPointerTy()) 1513 MD->invalidateCachedPointerInfo(V); 1514 VN.erase(LI); 1515 toErase.push_back(LI); 1516 ++NumGVNLoad; 1517 return true; 1518 } 1519 1520 if (!EnablePRE || !EnableLoadPRE) 1521 return false; 1522 1523 // Okay, we have *some* definitions of the value. This means that the value 1524 // is available in some of our (transitive) predecessors. Lets think about 1525 // doing PRE of this load. This will involve inserting a new load into the 1526 // predecessor when it's not available. We could do this in general, but 1527 // prefer to not increase code size. As such, we only do this when we know 1528 // that we only have to insert *one* load (which means we're basically moving 1529 // the load, not inserting a new one). 1530 1531 SmallPtrSet<BasicBlock *, 4> Blockers; 1532 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1533 Blockers.insert(UnavailableBlocks[i]); 1534 1535 // Lets find first basic block with more than one predecessor. Walk backwards 1536 // through predecessors if needed. 1537 BasicBlock *LoadBB = LI->getParent(); 1538 BasicBlock *TmpBB = LoadBB; 1539 1540 bool isSinglePred = false; 1541 bool allSingleSucc = true; 1542 while (TmpBB->getSinglePredecessor()) { 1543 isSinglePred = true; 1544 TmpBB = TmpBB->getSinglePredecessor(); 1545 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1546 return false; 1547 if (Blockers.count(TmpBB)) 1548 return false; 1549 1550 // If any of these blocks has more than one successor (i.e. if the edge we 1551 // just traversed was critical), then there are other paths through this 1552 // block along which the load may not be anticipated. Hoisting the load 1553 // above this block would be adding the load to execution paths along 1554 // which it was not previously executed. 1555 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1556 return false; 1557 } 1558 1559 assert(TmpBB); 1560 LoadBB = TmpBB; 1561 1562 // FIXME: It is extremely unclear what this loop is doing, other than 1563 // artificially restricting loadpre. 1564 if (isSinglePred) { 1565 bool isHot = false; 1566 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1567 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1568 if (AV.isSimpleValue()) 1569 // "Hot" Instruction is in some loop (because it dominates its dep. 1570 // instruction). 1571 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1572 if (DT->dominates(LI, I)) { 1573 isHot = true; 1574 break; 1575 } 1576 } 1577 1578 // We are interested only in "hot" instructions. We don't want to do any 1579 // mis-optimizations here. 1580 if (!isHot) 1581 return false; 1582 } 1583 1584 // Check to see how many predecessors have the loaded value fully 1585 // available. 1586 DenseMap<BasicBlock*, Value*> PredLoads; 1587 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1588 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1589 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1590 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1591 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1592 1593 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1594 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1595 PI != E; ++PI) { 1596 BasicBlock *Pred = *PI; 1597 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1598 continue; 1599 } 1600 PredLoads[Pred] = 0; 1601 1602 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1603 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1604 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1605 << Pred->getName() << "': " << *LI << '\n'); 1606 return false; 1607 } 1608 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1609 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1610 } 1611 } 1612 if (!NeedToSplit.empty()) { 1613 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1614 return false; 1615 } 1616 1617 // Decide whether PRE is profitable for this load. 1618 unsigned NumUnavailablePreds = PredLoads.size(); 1619 assert(NumUnavailablePreds != 0 && 1620 "Fully available value should be eliminated above!"); 1621 1622 // If this load is unavailable in multiple predecessors, reject it. 1623 // FIXME: If we could restructure the CFG, we could make a common pred with 1624 // all the preds that don't have an available LI and insert a new load into 1625 // that one block. 1626 if (NumUnavailablePreds != 1) 1627 return false; 1628 1629 // Check if the load can safely be moved to all the unavailable predecessors. 1630 bool CanDoPRE = true; 1631 SmallVector<Instruction*, 8> NewInsts; 1632 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1633 E = PredLoads.end(); I != E; ++I) { 1634 BasicBlock *UnavailablePred = I->first; 1635 1636 // Do PHI translation to get its value in the predecessor if necessary. The 1637 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1638 1639 // If all preds have a single successor, then we know it is safe to insert 1640 // the load on the pred (?!?), so we can insert code to materialize the 1641 // pointer if it is not available. 1642 PHITransAddr Address(LI->getPointerOperand(), TD); 1643 Value *LoadPtr = 0; 1644 if (allSingleSucc) { 1645 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1646 *DT, NewInsts); 1647 } else { 1648 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1649 LoadPtr = Address.getAddr(); 1650 } 1651 1652 // If we couldn't find or insert a computation of this phi translated value, 1653 // we fail PRE. 1654 if (LoadPtr == 0) { 1655 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1656 << *LI->getPointerOperand() << "\n"); 1657 CanDoPRE = false; 1658 break; 1659 } 1660 1661 // Make sure it is valid to move this load here. We have to watch out for: 1662 // @1 = getelementptr (i8* p, ... 1663 // test p and branch if == 0 1664 // load @1 1665 // It is valid to have the getelementptr before the test, even if p can be 0, 1666 // as getelementptr only does address arithmetic. 1667 // If we are not pushing the value through any multiple-successor blocks 1668 // we do not have this case. Otherwise, check that the load is safe to 1669 // put anywhere; this can be improved, but should be conservatively safe. 1670 if (!allSingleSucc && 1671 // FIXME: REEVALUTE THIS. 1672 !isSafeToLoadUnconditionally(LoadPtr, 1673 UnavailablePred->getTerminator(), 1674 LI->getAlignment(), TD)) { 1675 CanDoPRE = false; 1676 break; 1677 } 1678 1679 I->second = LoadPtr; 1680 } 1681 1682 if (!CanDoPRE) { 1683 while (!NewInsts.empty()) 1684 NewInsts.pop_back_val()->eraseFromParent(); 1685 return false; 1686 } 1687 1688 // Okay, we can eliminate this load by inserting a reload in the predecessor 1689 // and using PHI construction to get the value in the other predecessors, do 1690 // it. 1691 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1692 DEBUG(if (!NewInsts.empty()) 1693 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1694 << *NewInsts.back() << '\n'); 1695 1696 // Assign value numbers to the new instructions. 1697 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1698 // FIXME: We really _ought_ to insert these value numbers into their 1699 // parent's availability map. However, in doing so, we risk getting into 1700 // ordering issues. If a block hasn't been processed yet, we would be 1701 // marking a value as AVAIL-IN, which isn't what we intend. 1702 VN.lookup_or_add(NewInsts[i]); 1703 } 1704 1705 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1706 E = PredLoads.end(); I != E; ++I) { 1707 BasicBlock *UnavailablePred = I->first; 1708 Value *LoadPtr = I->second; 1709 1710 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1711 LI->getAlignment(), 1712 UnavailablePred->getTerminator()); 1713 1714 // Add the newly created load. 1715 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1716 NewLoad)); 1717 MD->invalidateCachedPointerInfo(LoadPtr); 1718 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1719 } 1720 1721 // Perform PHI construction. 1722 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1723 VN.getAliasAnalysis()); 1724 LI->replaceAllUsesWith(V); 1725 if (isa<PHINode>(V)) 1726 V->takeName(LI); 1727 if (V->getType()->isPointerTy()) 1728 MD->invalidateCachedPointerInfo(V); 1729 VN.erase(LI); 1730 toErase.push_back(LI); 1731 ++NumPRELoad; 1732 return true; 1733} 1734 1735/// processLoad - Attempt to eliminate a load, first by eliminating it 1736/// locally, and then attempting non-local elimination if that fails. 1737bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1738 if (!MD) 1739 return false; 1740 1741 if (L->isVolatile()) 1742 return false; 1743 1744 // ... to a pointer that has been loaded from before... 1745 MemDepResult Dep = MD->getDependency(L); 1746 1747 // If the value isn't available, don't do anything! 1748 if (Dep.isClobber()) { 1749 // Check to see if we have something like this: 1750 // store i32 123, i32* %P 1751 // %A = bitcast i32* %P to i8* 1752 // %B = gep i8* %A, i32 1 1753 // %C = load i8* %B 1754 // 1755 // We could do that by recognizing if the clobber instructions are obviously 1756 // a common base + constant offset, and if the previous store (or memset) 1757 // completely covers this load. This sort of thing can happen in bitfield 1758 // access code. 1759 Value *AvailVal = 0; 1760 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1761 if (TD) { 1762 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1763 L->getPointerOperand(), 1764 DepSI, *TD); 1765 if (Offset != -1) 1766 AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, 1767 L->getType(), L, *TD); 1768 } 1769 1770 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1771 // a value on from it. 1772 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1773 if (TD) { 1774 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1775 L->getPointerOperand(), 1776 DepMI, *TD); 1777 if (Offset != -1) 1778 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1779 } 1780 } 1781 1782 if (AvailVal) { 1783 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1784 << *AvailVal << '\n' << *L << "\n\n\n"); 1785 1786 // Replace the load! 1787 L->replaceAllUsesWith(AvailVal); 1788 if (AvailVal->getType()->isPointerTy()) 1789 MD->invalidateCachedPointerInfo(AvailVal); 1790 VN.erase(L); 1791 toErase.push_back(L); 1792 ++NumGVNLoad; 1793 return true; 1794 } 1795 1796 DEBUG( 1797 // fast print dep, using operator<< on instruction would be too slow 1798 dbgs() << "GVN: load "; 1799 WriteAsOperand(dbgs(), L); 1800 Instruction *I = Dep.getInst(); 1801 dbgs() << " is clobbered by " << *I << '\n'; 1802 ); 1803 return false; 1804 } 1805 1806 // If it is defined in another block, try harder. 1807 if (Dep.isNonLocal()) 1808 return processNonLocalLoad(L, toErase); 1809 1810 Instruction *DepInst = Dep.getInst(); 1811 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1812 Value *StoredVal = DepSI->getValueOperand(); 1813 1814 // The store and load are to a must-aliased pointer, but they may not 1815 // actually have the same type. See if we know how to reuse the stored 1816 // value (depending on its type). 1817 if (StoredVal->getType() != L->getType()) { 1818 if (TD) { 1819 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1820 L, *TD); 1821 if (StoredVal == 0) 1822 return false; 1823 1824 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1825 << '\n' << *L << "\n\n\n"); 1826 } 1827 else 1828 return false; 1829 } 1830 1831 // Remove it! 1832 L->replaceAllUsesWith(StoredVal); 1833 if (StoredVal->getType()->isPointerTy()) 1834 MD->invalidateCachedPointerInfo(StoredVal); 1835 VN.erase(L); 1836 toErase.push_back(L); 1837 ++NumGVNLoad; 1838 return true; 1839 } 1840 1841 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1842 Value *AvailableVal = DepLI; 1843 1844 // The loads are of a must-aliased pointer, but they may not actually have 1845 // the same type. See if we know how to reuse the previously loaded value 1846 // (depending on its type). 1847 if (DepLI->getType() != L->getType()) { 1848 if (TD) { 1849 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1850 if (AvailableVal == 0) 1851 return false; 1852 1853 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1854 << "\n" << *L << "\n\n\n"); 1855 } 1856 else 1857 return false; 1858 } 1859 1860 // Remove it! 1861 L->replaceAllUsesWith(AvailableVal); 1862 if (DepLI->getType()->isPointerTy()) 1863 MD->invalidateCachedPointerInfo(DepLI); 1864 VN.erase(L); 1865 toErase.push_back(L); 1866 ++NumGVNLoad; 1867 return true; 1868 } 1869 1870 // If this load really doesn't depend on anything, then we must be loading an 1871 // undef value. This can happen when loading for a fresh allocation with no 1872 // intervening stores, for example. 1873 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1874 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1875 VN.erase(L); 1876 toErase.push_back(L); 1877 ++NumGVNLoad; 1878 return true; 1879 } 1880 1881 // If this load occurs either right after a lifetime begin, 1882 // then the loaded value is undefined. 1883 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1884 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1885 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1886 VN.erase(L); 1887 toErase.push_back(L); 1888 ++NumGVNLoad; 1889 return true; 1890 } 1891 } 1892 1893 return false; 1894} 1895 1896// lookupNumber - In order to find a leader for a given value number at a 1897// specific basic block, we first obtain the list of all Values for that number, 1898// and then scan the list to find one whose block dominates the block in 1899// question. This is fast because dominator tree queries consist of only 1900// a few comparisons of DFS numbers. 1901Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1902 std::pair<Value*, void*> Vals = NumberTable[num]; 1903 if (!Vals.first) return 0; 1904 Instruction *Inst = dyn_cast<Instruction>(Vals.first); 1905 if (!Inst) return Vals.first; 1906 BasicBlock *Parent = Inst->getParent(); 1907 if (DT->dominates(Parent, BB)) 1908 return Inst; 1909 1910 std::pair<Value*, void*>* Next = 1911 static_cast<std::pair<Value*, void*>*>(Vals.second); 1912 while (Next) { 1913 Instruction *CurrInst = dyn_cast<Instruction>(Next->first); 1914 if (!CurrInst) return Next->first; 1915 1916 BasicBlock *Parent = CurrInst->getParent(); 1917 if (DT->dominates(Parent, BB)) 1918 return CurrInst; 1919 1920 Next = static_cast<std::pair<Value*, void*>*>(Next->second); 1921 } 1922 1923 return 0; 1924} 1925 1926 1927/// processInstruction - When calculating availability, handle an instruction 1928/// by inserting it into the appropriate sets 1929bool GVN::processInstruction(Instruction *I, 1930 SmallVectorImpl<Instruction*> &toErase) { 1931 // Ignore dbg info intrinsics. 1932 if (isa<DbgInfoIntrinsic>(I)) 1933 return false; 1934 1935 // If the instruction can be easily simplified then do so now in preference 1936 // to value numbering it. Value numbering often exposes redundancies, for 1937 // example if it determines that %y is equal to %x then the instruction 1938 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 1939 if (Value *V = SimplifyInstruction(I, TD, DT)) { 1940 I->replaceAllUsesWith(V); 1941 if (MD && V->getType()->isPointerTy()) 1942 MD->invalidateCachedPointerInfo(V); 1943 VN.erase(I); 1944 toErase.push_back(I); 1945 return true; 1946 } 1947 1948 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1949 bool Changed = processLoad(LI, toErase); 1950 1951 if (!Changed) { 1952 unsigned Num = VN.lookup_or_add(LI); 1953 insert_table(Num, LI); 1954 } 1955 1956 return Changed; 1957 } 1958 1959 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1960 unsigned Num = VN.lookup_or_add(I); 1961 1962 // Allocations are always uniquely numbered, so we can save time and memory 1963 // by fast failing them. 1964 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1965 insert_table(Num, I); 1966 return false; 1967 } 1968 1969 if (isa<PHINode>(I)) { 1970 insert_table(Num, I); 1971 1972 // If the number we were assigned was a brand new VN, then we don't 1973 // need to do a lookup to see if the number already exists 1974 // somewhere in the domtree: it can't! 1975 } else if (Num == NextNum) { 1976 insert_table(Num, I); 1977 1978 // Perform fast-path value-number based elimination of values inherited from 1979 // dominators. 1980 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1981 // Remove it! 1982 VN.erase(I); 1983 I->replaceAllUsesWith(repl); 1984 if (MD && repl->getType()->isPointerTy()) 1985 MD->invalidateCachedPointerInfo(repl); 1986 toErase.push_back(I); 1987 return true; 1988 1989 } else { 1990 insert_table(Num, I); 1991 } 1992 1993 return false; 1994} 1995 1996/// runOnFunction - This is the main transformation entry point for a function. 1997bool GVN::runOnFunction(Function& F) { 1998 if (!NoLoads) 1999 MD = &getAnalysis<MemoryDependenceAnalysis>(); 2000 DT = &getAnalysis<DominatorTree>(); 2001 TD = getAnalysisIfAvailable<TargetData>(); 2002 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 2003 VN.setMemDep(MD); 2004 VN.setDomTree(DT); 2005 2006 bool Changed = false; 2007 bool ShouldContinue = true; 2008 2009 // Merge unconditional branches, allowing PRE to catch more 2010 // optimization opportunities. 2011 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2012 BasicBlock *BB = FI; 2013 ++FI; 2014 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2015 if (removedBlock) ++NumGVNBlocks; 2016 2017 Changed |= removedBlock; 2018 } 2019 2020 unsigned Iteration = 0; 2021 2022 while (ShouldContinue) { 2023 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2024 ShouldContinue = iterateOnFunction(F); 2025 if (splitCriticalEdges()) 2026 ShouldContinue = true; 2027 Changed |= ShouldContinue; 2028 ++Iteration; 2029 } 2030 2031 if (EnablePRE) { 2032 bool PREChanged = true; 2033 while (PREChanged) { 2034 PREChanged = performPRE(F); 2035 Changed |= PREChanged; 2036 } 2037 } 2038 // FIXME: Should perform GVN again after PRE does something. PRE can move 2039 // computations into blocks where they become fully redundant. Note that 2040 // we can't do this until PRE's critical edge splitting updates memdep. 2041 // Actually, when this happens, we should just fully integrate PRE into GVN. 2042 2043 cleanupGlobalSets(); 2044 2045 return Changed; 2046} 2047 2048 2049bool GVN::processBlock(BasicBlock *BB) { 2050 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2051 // incrementing BI before processing an instruction). 2052 SmallVector<Instruction*, 8> toErase; 2053 bool ChangedFunction = false; 2054 2055 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2056 BI != BE;) { 2057 ChangedFunction |= processInstruction(BI, toErase); 2058 if (toErase.empty()) { 2059 ++BI; 2060 continue; 2061 } 2062 2063 // If we need some instructions deleted, do it now. 2064 NumGVNInstr += toErase.size(); 2065 2066 // Avoid iterator invalidation. 2067 bool AtStart = BI == BB->begin(); 2068 if (!AtStart) 2069 --BI; 2070 2071 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2072 E = toErase.end(); I != E; ++I) { 2073 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2074 if (MD) MD->removeInstruction(*I); 2075 (*I)->eraseFromParent(); 2076 DEBUG(verifyRemoved(*I)); 2077 } 2078 toErase.clear(); 2079 2080 if (AtStart) 2081 BI = BB->begin(); 2082 else 2083 ++BI; 2084 } 2085 2086 return ChangedFunction; 2087} 2088 2089/// performPRE - Perform a purely local form of PRE that looks for diamond 2090/// control flow patterns and attempts to perform simple PRE at the join point. 2091bool GVN::performPRE(Function &F) { 2092 bool Changed = false; 2093 DenseMap<BasicBlock*, Value*> predMap; 2094 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2095 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2096 BasicBlock *CurrentBlock = *DI; 2097 2098 // Nothing to PRE in the entry block. 2099 if (CurrentBlock == &F.getEntryBlock()) continue; 2100 2101 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2102 BE = CurrentBlock->end(); BI != BE; ) { 2103 Instruction *CurInst = BI++; 2104 2105 if (isa<AllocaInst>(CurInst) || 2106 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2107 CurInst->getType()->isVoidTy() || 2108 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2109 isa<DbgInfoIntrinsic>(CurInst)) 2110 continue; 2111 2112 // We don't currently value number ANY inline asm calls. 2113 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2114 if (CallI->isInlineAsm()) 2115 continue; 2116 2117 uint32_t ValNo = VN.lookup(CurInst); 2118 2119 // Look for the predecessors for PRE opportunities. We're 2120 // only trying to solve the basic diamond case, where 2121 // a value is computed in the successor and one predecessor, 2122 // but not the other. We also explicitly disallow cases 2123 // where the successor is its own predecessor, because they're 2124 // more complicated to get right. 2125 unsigned NumWith = 0; 2126 unsigned NumWithout = 0; 2127 BasicBlock *PREPred = 0; 2128 predMap.clear(); 2129 2130 for (pred_iterator PI = pred_begin(CurrentBlock), 2131 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2132 BasicBlock *P = *PI; 2133 // We're not interested in PRE where the block is its 2134 // own predecessor, or in blocks with predecessors 2135 // that are not reachable. 2136 if (P == CurrentBlock) { 2137 NumWithout = 2; 2138 break; 2139 } else if (!DT->dominates(&F.getEntryBlock(), P)) { 2140 NumWithout = 2; 2141 break; 2142 } 2143 2144 Value* predV = lookupNumber(P, ValNo); 2145 if (predV == 0) { 2146 PREPred = P; 2147 ++NumWithout; 2148 } else if (predV == CurInst) { 2149 NumWithout = 2; 2150 } else { 2151 predMap[P] = predV; 2152 ++NumWith; 2153 } 2154 } 2155 2156 // Don't do PRE when it might increase code size, i.e. when 2157 // we would need to insert instructions in more than one pred. 2158 if (NumWithout != 1 || NumWith == 0) 2159 continue; 2160 2161 // Don't do PRE across indirect branch. 2162 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2163 continue; 2164 2165 // We can't do PRE safely on a critical edge, so instead we schedule 2166 // the edge to be split and perform the PRE the next time we iterate 2167 // on the function. 2168 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2169 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2170 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2171 continue; 2172 } 2173 2174 // Instantiate the expression in the predecessor that lacked it. 2175 // Because we are going top-down through the block, all value numbers 2176 // will be available in the predecessor by the time we need them. Any 2177 // that weren't originally present will have been instantiated earlier 2178 // in this loop. 2179 Instruction *PREInstr = CurInst->clone(); 2180 bool success = true; 2181 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2182 Value *Op = PREInstr->getOperand(i); 2183 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2184 continue; 2185 2186 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2187 PREInstr->setOperand(i, V); 2188 } else { 2189 success = false; 2190 break; 2191 } 2192 } 2193 2194 // Fail out if we encounter an operand that is not available in 2195 // the PRE predecessor. This is typically because of loads which 2196 // are not value numbered precisely. 2197 if (!success) { 2198 delete PREInstr; 2199 DEBUG(verifyRemoved(PREInstr)); 2200 continue; 2201 } 2202 2203 PREInstr->insertBefore(PREPred->getTerminator()); 2204 PREInstr->setName(CurInst->getName() + ".pre"); 2205 predMap[PREPred] = PREInstr; 2206 VN.add(PREInstr, ValNo); 2207 ++NumGVNPRE; 2208 2209 // Update the availability map to include the new instruction. 2210 insert_table(ValNo, PREInstr); 2211 2212 // Create a PHI to make the value available in this block. 2213 PHINode* Phi = PHINode::Create(CurInst->getType(), 2214 CurInst->getName() + ".pre-phi", 2215 CurrentBlock->begin()); 2216 for (pred_iterator PI = pred_begin(CurrentBlock), 2217 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2218 BasicBlock *P = *PI; 2219 Phi->addIncoming(predMap[P], P); 2220 } 2221 2222 VN.add(Phi, ValNo); 2223 insert_table(ValNo, Phi); 2224 2225 CurInst->replaceAllUsesWith(Phi); 2226 if (MD && Phi->getType()->isPointerTy()) 2227 MD->invalidateCachedPointerInfo(Phi); 2228 VN.erase(CurInst); 2229 erase_table(ValNo, CurInst); 2230 2231 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2232 if (MD) MD->removeInstruction(CurInst); 2233 CurInst->eraseFromParent(); 2234 DEBUG(verifyRemoved(CurInst)); 2235 Changed = true; 2236 } 2237 } 2238 2239 if (splitCriticalEdges()) 2240 Changed = true; 2241 2242 return Changed; 2243} 2244 2245/// splitCriticalEdges - Split critical edges found during the previous 2246/// iteration that may enable further optimization. 2247bool GVN::splitCriticalEdges() { 2248 if (toSplit.empty()) 2249 return false; 2250 do { 2251 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2252 SplitCriticalEdge(Edge.first, Edge.second, this); 2253 } while (!toSplit.empty()); 2254 if (MD) MD->invalidateCachedPredecessors(); 2255 return true; 2256} 2257 2258/// iterateOnFunction - Executes one iteration of GVN 2259bool GVN::iterateOnFunction(Function &F) { 2260 cleanupGlobalSets(); 2261 2262 // Top-down walk of the dominator tree 2263 bool Changed = false; 2264#if 0 2265 // Needed for value numbering with phi construction to work. 2266 ReversePostOrderTraversal<Function*> RPOT(&F); 2267 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2268 RE = RPOT.end(); RI != RE; ++RI) 2269 Changed |= processBlock(*RI); 2270#else 2271 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2272 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2273 Changed |= processBlock(DI->getBlock()); 2274#endif 2275 2276 return Changed; 2277} 2278 2279void GVN::cleanupGlobalSets() { 2280 VN.clear(); 2281 NumberTable.clear(); 2282 TableAllocator.Reset(); 2283} 2284 2285/// verifyRemoved - Verify that the specified instruction does not occur in our 2286/// internal data structures. 2287void GVN::verifyRemoved(const Instruction *Inst) const { 2288 VN.verifyRemoved(Inst); 2289 2290 // Walk through the value number scope to make sure the instruction isn't 2291 // ferreted away in it. 2292 for (DenseMap<uint32_t, std::pair<Value*, void*> >::const_iterator 2293 I = NumberTable.begin(), E = NumberTable.end(); I != E; ++I) { 2294 std::pair<Value*, void*> const * Node = &I->second; 2295 assert(Node->first != Inst && "Inst still in value numbering scope!"); 2296 2297 while (Node->second) { 2298 Node = static_cast<std::pair<Value*, void*>*>(Node->second); 2299 assert(Node->first != Inst && "Inst still in value numbering scope!"); 2300 } 2301 } 2302} 2303