GVN.cpp revision fcf8d7c73d7517e26f9f9d1a9af22ad4314e4984
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Operator.h" 28#include "llvm/Value.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/DepthFirstIterator.h" 31#include "llvm/ADT/PostOrderIterator.h" 32#include "llvm/ADT/SmallPtrSet.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/ConstantFolding.h" 37#include "llvm/Analysis/Dominators.h" 38#include "llvm/Analysis/InstructionSimplify.h" 39#include "llvm/Analysis/Loads.h" 40#include "llvm/Analysis/MemoryBuiltins.h" 41#include "llvm/Analysis/MemoryDependenceAnalysis.h" 42#include "llvm/Analysis/PHITransAddr.h" 43#include "llvm/Support/Allocator.h" 44#include "llvm/Support/CFG.h" 45#include "llvm/Support/CommandLine.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/ErrorHandling.h" 48#include "llvm/Support/GetElementPtrTypeIterator.h" 49#include "llvm/Support/IRBuilder.h" 50#include "llvm/Support/raw_ostream.h" 51#include "llvm/Target/TargetData.h" 52#include "llvm/Transforms/Utils/BasicBlockUtils.h" 53#include "llvm/Transforms/Utils/Local.h" 54#include "llvm/Transforms/Utils/SSAUpdater.h" 55#include <list> 56using namespace llvm; 57 58STATISTIC(NumGVNInstr, "Number of instructions deleted"); 59STATISTIC(NumGVNLoad, "Number of loads deleted"); 60STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 61STATISTIC(NumGVNBlocks, "Number of blocks merged"); 62STATISTIC(NumPRELoad, "Number of loads PRE'd"); 63 64static cl::opt<bool> EnablePRE("enable-pre", 65 cl::init(true), cl::Hidden); 66static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 67 68//===----------------------------------------------------------------------===// 69// ValueTable Class 70//===----------------------------------------------------------------------===// 71 72/// This class holds the mapping between values and value numbers. It is used 73/// as an efficient mechanism to determine the expression-wise equivalence of 74/// two values. 75namespace { 76 struct Expression { 77 enum ExpressionOpcode { 78 ADD = Instruction::Add, 79 FADD = Instruction::FAdd, 80 SUB = Instruction::Sub, 81 FSUB = Instruction::FSub, 82 MUL = Instruction::Mul, 83 FMUL = Instruction::FMul, 84 UDIV = Instruction::UDiv, 85 SDIV = Instruction::SDiv, 86 FDIV = Instruction::FDiv, 87 UREM = Instruction::URem, 88 SREM = Instruction::SRem, 89 FREM = Instruction::FRem, 90 SHL = Instruction::Shl, 91 LSHR = Instruction::LShr, 92 ASHR = Instruction::AShr, 93 AND = Instruction::And, 94 OR = Instruction::Or, 95 XOR = Instruction::Xor, 96 TRUNC = Instruction::Trunc, 97 ZEXT = Instruction::ZExt, 98 SEXT = Instruction::SExt, 99 FPTOUI = Instruction::FPToUI, 100 FPTOSI = Instruction::FPToSI, 101 UITOFP = Instruction::UIToFP, 102 SITOFP = Instruction::SIToFP, 103 FPTRUNC = Instruction::FPTrunc, 104 FPEXT = Instruction::FPExt, 105 PTRTOINT = Instruction::PtrToInt, 106 INTTOPTR = Instruction::IntToPtr, 107 BITCAST = Instruction::BitCast, 108 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 109 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 110 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 111 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 112 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 113 SHUFFLE, SELECT, GEP, CALL, CONSTANT, 114 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 115 116 ExpressionOpcode opcode; 117 const Type* type; 118 SmallVector<uint32_t, 4> varargs; 119 Value *function; 120 121 Expression() { } 122 Expression(ExpressionOpcode o) : opcode(o) { } 123 124 bool operator==(const Expression &other) const { 125 if (opcode != other.opcode) 126 return false; 127 else if (opcode == EMPTY || opcode == TOMBSTONE) 128 return true; 129 else if (type != other.type) 130 return false; 131 else if (function != other.function) 132 return false; 133 else { 134 if (varargs.size() != other.varargs.size()) 135 return false; 136 137 for (size_t i = 0; i < varargs.size(); ++i) 138 if (varargs[i] != other.varargs[i]) 139 return false; 140 141 return true; 142 } 143 } 144 145 /*bool operator!=(const Expression &other) const { 146 return !(*this == other); 147 }*/ 148 }; 149 150 class ValueTable { 151 private: 152 DenseMap<Value*, uint32_t> valueNumbering; 153 DenseMap<Expression, uint32_t> expressionNumbering; 154 AliasAnalysis* AA; 155 MemoryDependenceAnalysis* MD; 156 DominatorTree* DT; 157 158 uint32_t nextValueNumber; 159 160 Expression::ExpressionOpcode getOpcode(CmpInst* C); 161 Expression create_expression(BinaryOperator* BO); 162 Expression create_expression(CmpInst* C); 163 Expression create_expression(ShuffleVectorInst* V); 164 Expression create_expression(ExtractElementInst* C); 165 Expression create_expression(InsertElementInst* V); 166 Expression create_expression(SelectInst* V); 167 Expression create_expression(CastInst* C); 168 Expression create_expression(GetElementPtrInst* G); 169 Expression create_expression(CallInst* C); 170 Expression create_expression(ExtractValueInst* C); 171 Expression create_expression(InsertValueInst* C); 172 173 uint32_t lookup_or_add_call(CallInst* C); 174 public: 175 ValueTable() : nextValueNumber(1) { } 176 uint32_t lookup_or_add(Value *V); 177 uint32_t lookup(Value *V) const; 178 void add(Value *V, uint32_t num); 179 void clear(); 180 void erase(Value *v); 181 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 182 AliasAnalysis *getAliasAnalysis() const { return AA; } 183 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 184 void setDomTree(DominatorTree* D) { DT = D; } 185 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 186 void verifyRemoved(const Value *) const; 187 }; 188} 189 190namespace llvm { 191template <> struct DenseMapInfo<Expression> { 192 static inline Expression getEmptyKey() { 193 return Expression(Expression::EMPTY); 194 } 195 196 static inline Expression getTombstoneKey() { 197 return Expression(Expression::TOMBSTONE); 198 } 199 200 static unsigned getHashValue(const Expression e) { 201 unsigned hash = e.opcode; 202 203 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 204 (unsigned)((uintptr_t)e.type >> 9)); 205 206 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 207 E = e.varargs.end(); I != E; ++I) 208 hash = *I + hash * 37; 209 210 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 211 (unsigned)((uintptr_t)e.function >> 9)) + 212 hash * 37; 213 214 return hash; 215 } 216 static bool isEqual(const Expression &LHS, const Expression &RHS) { 217 return LHS == RHS; 218 } 219}; 220 221template <> 222struct isPodLike<Expression> { static const bool value = true; }; 223 224} 225 226//===----------------------------------------------------------------------===// 227// ValueTable Internal Functions 228//===----------------------------------------------------------------------===// 229 230Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 231 if (isa<ICmpInst>(C)) { 232 switch (C->getPredicate()) { 233 default: // THIS SHOULD NEVER HAPPEN 234 llvm_unreachable("Comparison with unknown predicate?"); 235 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 236 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 237 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 238 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 239 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 240 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 241 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 242 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 243 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 244 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 245 } 246 } else { 247 switch (C->getPredicate()) { 248 default: // THIS SHOULD NEVER HAPPEN 249 llvm_unreachable("Comparison with unknown predicate?"); 250 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 251 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 252 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 253 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 254 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 255 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 256 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 257 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 258 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 259 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 260 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 261 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 262 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 263 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 264 } 265 } 266} 267 268Expression ValueTable::create_expression(CallInst* C) { 269 Expression e; 270 271 e.type = C->getType(); 272 e.function = C->getCalledFunction(); 273 e.opcode = Expression::CALL; 274 275 CallSite CS(C); 276 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end(); 277 I != E; ++I) 278 e.varargs.push_back(lookup_or_add(*I)); 279 280 return e; 281} 282 283Expression ValueTable::create_expression(BinaryOperator* BO) { 284 Expression e; 285 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 286 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 287 e.function = 0; 288 e.type = BO->getType(); 289 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode()); 290 291 return e; 292} 293 294Expression ValueTable::create_expression(CmpInst* C) { 295 Expression e; 296 297 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 298 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 299 e.function = 0; 300 e.type = C->getType(); 301 e.opcode = getOpcode(C); 302 303 return e; 304} 305 306Expression ValueTable::create_expression(CastInst* C) { 307 Expression e; 308 309 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 310 e.function = 0; 311 e.type = C->getType(); 312 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode()); 313 314 return e; 315} 316 317Expression ValueTable::create_expression(ShuffleVectorInst* S) { 318 Expression e; 319 320 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 321 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 322 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 323 e.function = 0; 324 e.type = S->getType(); 325 e.opcode = Expression::SHUFFLE; 326 327 return e; 328} 329 330Expression ValueTable::create_expression(ExtractElementInst* E) { 331 Expression e; 332 333 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 334 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 335 e.function = 0; 336 e.type = E->getType(); 337 e.opcode = Expression::EXTRACT; 338 339 return e; 340} 341 342Expression ValueTable::create_expression(InsertElementInst* I) { 343 Expression e; 344 345 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 346 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 347 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 348 e.function = 0; 349 e.type = I->getType(); 350 e.opcode = Expression::INSERT; 351 352 return e; 353} 354 355Expression ValueTable::create_expression(SelectInst* I) { 356 Expression e; 357 358 e.varargs.push_back(lookup_or_add(I->getCondition())); 359 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 360 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 361 e.function = 0; 362 e.type = I->getType(); 363 e.opcode = Expression::SELECT; 364 365 return e; 366} 367 368Expression ValueTable::create_expression(GetElementPtrInst* G) { 369 Expression e; 370 371 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 372 e.function = 0; 373 e.type = G->getType(); 374 e.opcode = Expression::GEP; 375 376 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 377 I != E; ++I) 378 e.varargs.push_back(lookup_or_add(*I)); 379 380 return e; 381} 382 383Expression ValueTable::create_expression(ExtractValueInst* E) { 384 Expression e; 385 386 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 387 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 388 II != IE; ++II) 389 e.varargs.push_back(*II); 390 e.function = 0; 391 e.type = E->getType(); 392 e.opcode = Expression::EXTRACTVALUE; 393 394 return e; 395} 396 397Expression ValueTable::create_expression(InsertValueInst* E) { 398 Expression e; 399 400 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 401 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 402 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 403 II != IE; ++II) 404 e.varargs.push_back(*II); 405 e.function = 0; 406 e.type = E->getType(); 407 e.opcode = Expression::INSERTVALUE; 408 409 return e; 410} 411 412//===----------------------------------------------------------------------===// 413// ValueTable External Functions 414//===----------------------------------------------------------------------===// 415 416/// add - Insert a value into the table with a specified value number. 417void ValueTable::add(Value *V, uint32_t num) { 418 valueNumbering.insert(std::make_pair(V, num)); 419} 420 421uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 422 if (AA->doesNotAccessMemory(C)) { 423 Expression exp = create_expression(C); 424 uint32_t& e = expressionNumbering[exp]; 425 if (!e) e = nextValueNumber++; 426 valueNumbering[C] = e; 427 return e; 428 } else if (AA->onlyReadsMemory(C)) { 429 Expression exp = create_expression(C); 430 uint32_t& e = expressionNumbering[exp]; 431 if (!e) { 432 e = nextValueNumber++; 433 valueNumbering[C] = e; 434 return e; 435 } 436 if (!MD) { 437 e = nextValueNumber++; 438 valueNumbering[C] = e; 439 return e; 440 } 441 442 MemDepResult local_dep = MD->getDependency(C); 443 444 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 445 valueNumbering[C] = nextValueNumber; 446 return nextValueNumber++; 447 } 448 449 if (local_dep.isDef()) { 450 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 451 452 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 453 valueNumbering[C] = nextValueNumber; 454 return nextValueNumber++; 455 } 456 457 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 458 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 459 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 460 if (c_vn != cd_vn) { 461 valueNumbering[C] = nextValueNumber; 462 return nextValueNumber++; 463 } 464 } 465 466 uint32_t v = lookup_or_add(local_cdep); 467 valueNumbering[C] = v; 468 return v; 469 } 470 471 // Non-local case. 472 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 473 MD->getNonLocalCallDependency(CallSite(C)); 474 // FIXME: call/call dependencies for readonly calls should return def, not 475 // clobber! Move the checking logic to MemDep! 476 CallInst* cdep = 0; 477 478 // Check to see if we have a single dominating call instruction that is 479 // identical to C. 480 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 481 const NonLocalDepEntry *I = &deps[i]; 482 // Ignore non-local dependencies. 483 if (I->getResult().isNonLocal()) 484 continue; 485 486 // We don't handle non-depedencies. If we already have a call, reject 487 // instruction dependencies. 488 if (I->getResult().isClobber() || cdep != 0) { 489 cdep = 0; 490 break; 491 } 492 493 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 494 // FIXME: All duplicated with non-local case. 495 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 496 cdep = NonLocalDepCall; 497 continue; 498 } 499 500 cdep = 0; 501 break; 502 } 503 504 if (!cdep) { 505 valueNumbering[C] = nextValueNumber; 506 return nextValueNumber++; 507 } 508 509 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 510 valueNumbering[C] = nextValueNumber; 511 return nextValueNumber++; 512 } 513 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 514 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 515 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 516 if (c_vn != cd_vn) { 517 valueNumbering[C] = nextValueNumber; 518 return nextValueNumber++; 519 } 520 } 521 522 uint32_t v = lookup_or_add(cdep); 523 valueNumbering[C] = v; 524 return v; 525 526 } else { 527 valueNumbering[C] = nextValueNumber; 528 return nextValueNumber++; 529 } 530} 531 532/// lookup_or_add - Returns the value number for the specified value, assigning 533/// it a new number if it did not have one before. 534uint32_t ValueTable::lookup_or_add(Value *V) { 535 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 536 if (VI != valueNumbering.end()) 537 return VI->second; 538 539 if (!isa<Instruction>(V)) { 540 valueNumbering[V] = nextValueNumber; 541 return nextValueNumber++; 542 } 543 544 Instruction* I = cast<Instruction>(V); 545 Expression exp; 546 switch (I->getOpcode()) { 547 case Instruction::Call: 548 return lookup_or_add_call(cast<CallInst>(I)); 549 case Instruction::Add: 550 case Instruction::FAdd: 551 case Instruction::Sub: 552 case Instruction::FSub: 553 case Instruction::Mul: 554 case Instruction::FMul: 555 case Instruction::UDiv: 556 case Instruction::SDiv: 557 case Instruction::FDiv: 558 case Instruction::URem: 559 case Instruction::SRem: 560 case Instruction::FRem: 561 case Instruction::Shl: 562 case Instruction::LShr: 563 case Instruction::AShr: 564 case Instruction::And: 565 case Instruction::Or : 566 case Instruction::Xor: 567 exp = create_expression(cast<BinaryOperator>(I)); 568 break; 569 case Instruction::ICmp: 570 case Instruction::FCmp: 571 exp = create_expression(cast<CmpInst>(I)); 572 break; 573 case Instruction::Trunc: 574 case Instruction::ZExt: 575 case Instruction::SExt: 576 case Instruction::FPToUI: 577 case Instruction::FPToSI: 578 case Instruction::UIToFP: 579 case Instruction::SIToFP: 580 case Instruction::FPTrunc: 581 case Instruction::FPExt: 582 case Instruction::PtrToInt: 583 case Instruction::IntToPtr: 584 case Instruction::BitCast: 585 exp = create_expression(cast<CastInst>(I)); 586 break; 587 case Instruction::Select: 588 exp = create_expression(cast<SelectInst>(I)); 589 break; 590 case Instruction::ExtractElement: 591 exp = create_expression(cast<ExtractElementInst>(I)); 592 break; 593 case Instruction::InsertElement: 594 exp = create_expression(cast<InsertElementInst>(I)); 595 break; 596 case Instruction::ShuffleVector: 597 exp = create_expression(cast<ShuffleVectorInst>(I)); 598 break; 599 case Instruction::ExtractValue: 600 exp = create_expression(cast<ExtractValueInst>(I)); 601 break; 602 case Instruction::InsertValue: 603 exp = create_expression(cast<InsertValueInst>(I)); 604 break; 605 case Instruction::GetElementPtr: 606 exp = create_expression(cast<GetElementPtrInst>(I)); 607 break; 608 default: 609 valueNumbering[V] = nextValueNumber; 610 return nextValueNumber++; 611 } 612 613 uint32_t& e = expressionNumbering[exp]; 614 if (!e) e = nextValueNumber++; 615 valueNumbering[V] = e; 616 return e; 617} 618 619/// lookup - Returns the value number of the specified value. Fails if 620/// the value has not yet been numbered. 621uint32_t ValueTable::lookup(Value *V) const { 622 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 623 assert(VI != valueNumbering.end() && "Value not numbered?"); 624 return VI->second; 625} 626 627/// clear - Remove all entries from the ValueTable 628void ValueTable::clear() { 629 valueNumbering.clear(); 630 expressionNumbering.clear(); 631 nextValueNumber = 1; 632} 633 634/// erase - Remove a value from the value numbering 635void ValueTable::erase(Value *V) { 636 valueNumbering.erase(V); 637} 638 639/// verifyRemoved - Verify that the value is removed from all internal data 640/// structures. 641void ValueTable::verifyRemoved(const Value *V) const { 642 for (DenseMap<Value*, uint32_t>::const_iterator 643 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 644 assert(I->first != V && "Inst still occurs in value numbering map!"); 645 } 646} 647 648//===----------------------------------------------------------------------===// 649// GVN Pass 650//===----------------------------------------------------------------------===// 651 652namespace { 653 struct ValueNumberScope { 654 ValueNumberScope* parent; 655 DenseMap<uint32_t, Value*> table; 656 657 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 658 }; 659} 660 661namespace { 662 663 class GVN : public FunctionPass { 664 bool runOnFunction(Function &F); 665 public: 666 static char ID; // Pass identification, replacement for typeid 667 explicit GVN(bool noloads = false) 668 : FunctionPass(ID), NoLoads(noloads), MD(0) { 669 initializeGVNPass(*PassRegistry::getPassRegistry()); 670 } 671 672 private: 673 bool NoLoads; 674 MemoryDependenceAnalysis *MD; 675 DominatorTree *DT; 676 const TargetData* TD; 677 678 ValueTable VN; 679 680 /// NumberTable - A mapping from value numers to lists of Value*'s that 681 /// have that value number. Use lookupNumber to query it. 682 DenseMap<uint32_t, std::pair<Value*, void*> > NumberTable; 683 BumpPtrAllocator TableAllocator; 684 685 /// insert_table - Push a new Value to the NumberTable onto the list for 686 /// its value number. 687 void insert_table(uint32_t N, Value *V) { 688 std::pair<Value*, void*>& Curr = NumberTable[N]; 689 if (!Curr.first) { 690 Curr.first = V; 691 return; 692 } 693 694 std::pair<Value*, void*>* Node = 695 TableAllocator.Allocate<std::pair<Value*, void*> >(); 696 Node->first = V; 697 Node->second = Curr.second; 698 Curr.second = Node; 699 } 700 701 /// erase_table - Scan the list of values corresponding to a given value 702 /// number, and remove the given value if encountered. 703 void erase_table(uint32_t N, Value *V) { 704 std::pair<Value*, void*>* Prev = 0; 705 std::pair<Value*, void*>* Curr = &NumberTable[N]; 706 707 while (Curr->first != V) { 708 Prev = Curr; 709 Curr = static_cast<std::pair<Value*, void*>*>(Curr->second); 710 } 711 712 if (Prev) { 713 Prev->second = Curr->second; 714 } else { 715 if (!Curr->second) { 716 Curr->first = 0; 717 } else { 718 std::pair<Value*, void*>* Next = 719 static_cast<std::pair<Value*, void*>*>(Curr->second); 720 Curr->first = Next->first; 721 Curr->second = Next->second; 722 } 723 } 724 } 725 726 // List of critical edges to be split between iterations. 727 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 728 729 // This transformation requires dominator postdominator info 730 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 731 AU.addRequired<DominatorTree>(); 732 if (!NoLoads) 733 AU.addRequired<MemoryDependenceAnalysis>(); 734 AU.addRequired<AliasAnalysis>(); 735 736 AU.addPreserved<DominatorTree>(); 737 AU.addPreserved<AliasAnalysis>(); 738 } 739 740 // Helper fuctions 741 // FIXME: eliminate or document these better 742 bool processLoad(LoadInst* L, 743 SmallVectorImpl<Instruction*> &toErase); 744 bool processInstruction(Instruction *I, 745 SmallVectorImpl<Instruction*> &toErase); 746 bool processNonLocalLoad(LoadInst* L, 747 SmallVectorImpl<Instruction*> &toErase); 748 bool processBlock(BasicBlock *BB); 749 void dump(DenseMap<uint32_t, Value*>& d); 750 bool iterateOnFunction(Function &F); 751 bool performPRE(Function& F); 752 Value *lookupNumber(BasicBlock *BB, uint32_t num); 753 void cleanupGlobalSets(); 754 void verifyRemoved(const Instruction *I) const; 755 bool splitCriticalEdges(); 756 }; 757 758 char GVN::ID = 0; 759} 760 761// createGVNPass - The public interface to this file... 762FunctionPass *llvm::createGVNPass(bool NoLoads) { 763 return new GVN(NoLoads); 764} 765 766INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false) 767INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 768INITIALIZE_PASS_DEPENDENCY(DominatorTree) 769INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 770INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) 771 772void GVN::dump(DenseMap<uint32_t, Value*>& d) { 773 errs() << "{\n"; 774 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 775 E = d.end(); I != E; ++I) { 776 errs() << I->first << "\n"; 777 I->second->dump(); 778 } 779 errs() << "}\n"; 780} 781 782/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 783/// we're analyzing is fully available in the specified block. As we go, keep 784/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 785/// map is actually a tri-state map with the following values: 786/// 0) we know the block *is not* fully available. 787/// 1) we know the block *is* fully available. 788/// 2) we do not know whether the block is fully available or not, but we are 789/// currently speculating that it will be. 790/// 3) we are speculating for this block and have used that to speculate for 791/// other blocks. 792static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 793 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 794 // Optimistically assume that the block is fully available and check to see 795 // if we already know about this block in one lookup. 796 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 797 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 798 799 // If the entry already existed for this block, return the precomputed value. 800 if (!IV.second) { 801 // If this is a speculative "available" value, mark it as being used for 802 // speculation of other blocks. 803 if (IV.first->second == 2) 804 IV.first->second = 3; 805 return IV.first->second != 0; 806 } 807 808 // Otherwise, see if it is fully available in all predecessors. 809 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 810 811 // If this block has no predecessors, it isn't live-in here. 812 if (PI == PE) 813 goto SpeculationFailure; 814 815 for (; PI != PE; ++PI) 816 // If the value isn't fully available in one of our predecessors, then it 817 // isn't fully available in this block either. Undo our previous 818 // optimistic assumption and bail out. 819 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 820 goto SpeculationFailure; 821 822 return true; 823 824// SpeculationFailure - If we get here, we found out that this is not, after 825// all, a fully-available block. We have a problem if we speculated on this and 826// used the speculation to mark other blocks as available. 827SpeculationFailure: 828 char &BBVal = FullyAvailableBlocks[BB]; 829 830 // If we didn't speculate on this, just return with it set to false. 831 if (BBVal == 2) { 832 BBVal = 0; 833 return false; 834 } 835 836 // If we did speculate on this value, we could have blocks set to 1 that are 837 // incorrect. Walk the (transitive) successors of this block and mark them as 838 // 0 if set to one. 839 SmallVector<BasicBlock*, 32> BBWorklist; 840 BBWorklist.push_back(BB); 841 842 do { 843 BasicBlock *Entry = BBWorklist.pop_back_val(); 844 // Note that this sets blocks to 0 (unavailable) if they happen to not 845 // already be in FullyAvailableBlocks. This is safe. 846 char &EntryVal = FullyAvailableBlocks[Entry]; 847 if (EntryVal == 0) continue; // Already unavailable. 848 849 // Mark as unavailable. 850 EntryVal = 0; 851 852 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 853 BBWorklist.push_back(*I); 854 } while (!BBWorklist.empty()); 855 856 return false; 857} 858 859 860/// CanCoerceMustAliasedValueToLoad - Return true if 861/// CoerceAvailableValueToLoadType will succeed. 862static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 863 const Type *LoadTy, 864 const TargetData &TD) { 865 // If the loaded or stored value is an first class array or struct, don't try 866 // to transform them. We need to be able to bitcast to integer. 867 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 868 StoredVal->getType()->isStructTy() || 869 StoredVal->getType()->isArrayTy()) 870 return false; 871 872 // The store has to be at least as big as the load. 873 if (TD.getTypeSizeInBits(StoredVal->getType()) < 874 TD.getTypeSizeInBits(LoadTy)) 875 return false; 876 877 return true; 878} 879 880 881/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 882/// then a load from a must-aliased pointer of a different type, try to coerce 883/// the stored value. LoadedTy is the type of the load we want to replace and 884/// InsertPt is the place to insert new instructions. 885/// 886/// If we can't do it, return null. 887static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 888 const Type *LoadedTy, 889 Instruction *InsertPt, 890 const TargetData &TD) { 891 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 892 return 0; 893 894 const Type *StoredValTy = StoredVal->getType(); 895 896 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy); 897 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 898 899 // If the store and reload are the same size, we can always reuse it. 900 if (StoreSize == LoadSize) { 901 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) { 902 // Pointer to Pointer -> use bitcast. 903 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 904 } 905 906 // Convert source pointers to integers, which can be bitcast. 907 if (StoredValTy->isPointerTy()) { 908 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 909 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 910 } 911 912 const Type *TypeToCastTo = LoadedTy; 913 if (TypeToCastTo->isPointerTy()) 914 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 915 916 if (StoredValTy != TypeToCastTo) 917 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 918 919 // Cast to pointer if the load needs a pointer type. 920 if (LoadedTy->isPointerTy()) 921 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 922 923 return StoredVal; 924 } 925 926 // If the loaded value is smaller than the available value, then we can 927 // extract out a piece from it. If the available value is too small, then we 928 // can't do anything. 929 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 930 931 // Convert source pointers to integers, which can be manipulated. 932 if (StoredValTy->isPointerTy()) { 933 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 934 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 935 } 936 937 // Convert vectors and fp to integer, which can be manipulated. 938 if (!StoredValTy->isIntegerTy()) { 939 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 940 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 941 } 942 943 // If this is a big-endian system, we need to shift the value down to the low 944 // bits so that a truncate will work. 945 if (TD.isBigEndian()) { 946 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 947 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 948 } 949 950 // Truncate the integer to the right size now. 951 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 952 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 953 954 if (LoadedTy == NewIntTy) 955 return StoredVal; 956 957 // If the result is a pointer, inttoptr. 958 if (LoadedTy->isPointerTy()) 959 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 960 961 // Otherwise, bitcast. 962 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 963} 964 965/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 966/// be expressed as a base pointer plus a constant offset. Return the base and 967/// offset to the caller. 968static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 969 const TargetData &TD) { 970 Operator *PtrOp = dyn_cast<Operator>(Ptr); 971 if (PtrOp == 0) return Ptr; 972 973 // Just look through bitcasts. 974 if (PtrOp->getOpcode() == Instruction::BitCast) 975 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 976 977 // If this is a GEP with constant indices, we can look through it. 978 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 979 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 980 981 gep_type_iterator GTI = gep_type_begin(GEP); 982 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 983 ++I, ++GTI) { 984 ConstantInt *OpC = cast<ConstantInt>(*I); 985 if (OpC->isZero()) continue; 986 987 // Handle a struct and array indices which add their offset to the pointer. 988 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 989 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 990 } else { 991 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 992 Offset += OpC->getSExtValue()*Size; 993 } 994 } 995 996 // Re-sign extend from the pointer size if needed to get overflow edge cases 997 // right. 998 unsigned PtrSize = TD.getPointerSizeInBits(); 999 if (PtrSize < 64) 1000 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 1001 1002 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 1003} 1004 1005 1006/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 1007/// memdep query of a load that ends up being a clobbering memory write (store, 1008/// memset, memcpy, memmove). This means that the write *may* provide bits used 1009/// by the load but we can't be sure because the pointers don't mustalias. 1010/// 1011/// Check this case to see if there is anything more we can do before we give 1012/// up. This returns -1 if we have to give up, or a byte number in the stored 1013/// value of the piece that feeds the load. 1014static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 1015 Value *WritePtr, 1016 uint64_t WriteSizeInBits, 1017 const TargetData &TD) { 1018 // If the loaded or stored value is an first class array or struct, don't try 1019 // to transform them. We need to be able to bitcast to integer. 1020 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 1021 return -1; 1022 1023 int64_t StoreOffset = 0, LoadOffset = 0; 1024 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1025 Value *LoadBase = 1026 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1027 if (StoreBase != LoadBase) 1028 return -1; 1029 1030 // If the load and store are to the exact same address, they should have been 1031 // a must alias. AA must have gotten confused. 1032 // FIXME: Study to see if/when this happens. One case is forwarding a memset 1033 // to a load from the base of the memset. 1034#if 0 1035 if (LoadOffset == StoreOffset) { 1036 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1037 << "Base = " << *StoreBase << "\n" 1038 << "Store Ptr = " << *WritePtr << "\n" 1039 << "Store Offs = " << StoreOffset << "\n" 1040 << "Load Ptr = " << *LoadPtr << "\n"; 1041 abort(); 1042 } 1043#endif 1044 1045 // If the load and store don't overlap at all, the store doesn't provide 1046 // anything to the load. In this case, they really don't alias at all, AA 1047 // must have gotten confused. 1048 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1049 1050 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1051 return -1; 1052 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1053 LoadSize >>= 3; 1054 1055 1056 bool isAAFailure = false; 1057 if (StoreOffset < LoadOffset) 1058 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1059 else 1060 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1061 1062 if (isAAFailure) { 1063#if 0 1064 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1065 << "Base = " << *StoreBase << "\n" 1066 << "Store Ptr = " << *WritePtr << "\n" 1067 << "Store Offs = " << StoreOffset << "\n" 1068 << "Load Ptr = " << *LoadPtr << "\n"; 1069 abort(); 1070#endif 1071 return -1; 1072 } 1073 1074 // If the Load isn't completely contained within the stored bits, we don't 1075 // have all the bits to feed it. We could do something crazy in the future 1076 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1077 // valuable. 1078 if (StoreOffset > LoadOffset || 1079 StoreOffset+StoreSize < LoadOffset+LoadSize) 1080 return -1; 1081 1082 // Okay, we can do this transformation. Return the number of bytes into the 1083 // store that the load is. 1084 return LoadOffset-StoreOffset; 1085} 1086 1087/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1088/// memdep query of a load that ends up being a clobbering store. 1089static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1090 StoreInst *DepSI, 1091 const TargetData &TD) { 1092 // Cannot handle reading from store of first-class aggregate yet. 1093 if (DepSI->getValueOperand()->getType()->isStructTy() || 1094 DepSI->getValueOperand()->getType()->isArrayTy()) 1095 return -1; 1096 1097 Value *StorePtr = DepSI->getPointerOperand(); 1098 uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType()); 1099 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1100 StorePtr, StoreSize, TD); 1101} 1102 1103static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1104 MemIntrinsic *MI, 1105 const TargetData &TD) { 1106 // If the mem operation is a non-constant size, we can't handle it. 1107 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1108 if (SizeCst == 0) return -1; 1109 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1110 1111 // If this is memset, we just need to see if the offset is valid in the size 1112 // of the memset.. 1113 if (MI->getIntrinsicID() == Intrinsic::memset) 1114 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1115 MemSizeInBits, TD); 1116 1117 // If we have a memcpy/memmove, the only case we can handle is if this is a 1118 // copy from constant memory. In that case, we can read directly from the 1119 // constant memory. 1120 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1121 1122 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1123 if (Src == 0) return -1; 1124 1125 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1126 if (GV == 0 || !GV->isConstant()) return -1; 1127 1128 // See if the access is within the bounds of the transfer. 1129 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1130 MI->getDest(), MemSizeInBits, TD); 1131 if (Offset == -1) 1132 return Offset; 1133 1134 // Otherwise, see if we can constant fold a load from the constant with the 1135 // offset applied as appropriate. 1136 Src = ConstantExpr::getBitCast(Src, 1137 llvm::Type::getInt8PtrTy(Src->getContext())); 1138 Constant *OffsetCst = 1139 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1140 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1141 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1142 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1143 return Offset; 1144 return -1; 1145} 1146 1147 1148/// GetStoreValueForLoad - This function is called when we have a 1149/// memdep query of a load that ends up being a clobbering store. This means 1150/// that the store *may* provide bits used by the load but we can't be sure 1151/// because the pointers don't mustalias. Check this case to see if there is 1152/// anything more we can do before we give up. 1153static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1154 const Type *LoadTy, 1155 Instruction *InsertPt, const TargetData &TD){ 1156 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1157 1158 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1159 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 1160 1161 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1162 1163 // Compute which bits of the stored value are being used by the load. Convert 1164 // to an integer type to start with. 1165 if (SrcVal->getType()->isPointerTy()) 1166 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1167 if (!SrcVal->getType()->isIntegerTy()) 1168 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1169 "tmp"); 1170 1171 // Shift the bits to the least significant depending on endianness. 1172 unsigned ShiftAmt; 1173 if (TD.isLittleEndian()) 1174 ShiftAmt = Offset*8; 1175 else 1176 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1177 1178 if (ShiftAmt) 1179 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1180 1181 if (LoadSize != StoreSize) 1182 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1183 "tmp"); 1184 1185 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1186} 1187 1188/// GetMemInstValueForLoad - This function is called when we have a 1189/// memdep query of a load that ends up being a clobbering mem intrinsic. 1190static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1191 const Type *LoadTy, Instruction *InsertPt, 1192 const TargetData &TD){ 1193 LLVMContext &Ctx = LoadTy->getContext(); 1194 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1195 1196 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1197 1198 // We know that this method is only called when the mem transfer fully 1199 // provides the bits for the load. 1200 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1201 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1202 // independently of what the offset is. 1203 Value *Val = MSI->getValue(); 1204 if (LoadSize != 1) 1205 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1206 1207 Value *OneElt = Val; 1208 1209 // Splat the value out to the right number of bits. 1210 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1211 // If we can double the number of bytes set, do it. 1212 if (NumBytesSet*2 <= LoadSize) { 1213 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1214 Val = Builder.CreateOr(Val, ShVal); 1215 NumBytesSet <<= 1; 1216 continue; 1217 } 1218 1219 // Otherwise insert one byte at a time. 1220 Value *ShVal = Builder.CreateShl(Val, 1*8); 1221 Val = Builder.CreateOr(OneElt, ShVal); 1222 ++NumBytesSet; 1223 } 1224 1225 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1226 } 1227 1228 // Otherwise, this is a memcpy/memmove from a constant global. 1229 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1230 Constant *Src = cast<Constant>(MTI->getSource()); 1231 1232 // Otherwise, see if we can constant fold a load from the constant with the 1233 // offset applied as appropriate. 1234 Src = ConstantExpr::getBitCast(Src, 1235 llvm::Type::getInt8PtrTy(Src->getContext())); 1236 Constant *OffsetCst = 1237 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1238 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1239 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1240 return ConstantFoldLoadFromConstPtr(Src, &TD); 1241} 1242 1243namespace { 1244 1245struct AvailableValueInBlock { 1246 /// BB - The basic block in question. 1247 BasicBlock *BB; 1248 enum ValType { 1249 SimpleVal, // A simple offsetted value that is accessed. 1250 MemIntrin // A memory intrinsic which is loaded from. 1251 }; 1252 1253 /// V - The value that is live out of the block. 1254 PointerIntPair<Value *, 1, ValType> Val; 1255 1256 /// Offset - The byte offset in Val that is interesting for the load query. 1257 unsigned Offset; 1258 1259 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1260 unsigned Offset = 0) { 1261 AvailableValueInBlock Res; 1262 Res.BB = BB; 1263 Res.Val.setPointer(V); 1264 Res.Val.setInt(SimpleVal); 1265 Res.Offset = Offset; 1266 return Res; 1267 } 1268 1269 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1270 unsigned Offset = 0) { 1271 AvailableValueInBlock Res; 1272 Res.BB = BB; 1273 Res.Val.setPointer(MI); 1274 Res.Val.setInt(MemIntrin); 1275 Res.Offset = Offset; 1276 return Res; 1277 } 1278 1279 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1280 Value *getSimpleValue() const { 1281 assert(isSimpleValue() && "Wrong accessor"); 1282 return Val.getPointer(); 1283 } 1284 1285 MemIntrinsic *getMemIntrinValue() const { 1286 assert(!isSimpleValue() && "Wrong accessor"); 1287 return cast<MemIntrinsic>(Val.getPointer()); 1288 } 1289 1290 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1291 /// defined here to the specified type. This handles various coercion cases. 1292 Value *MaterializeAdjustedValue(const Type *LoadTy, 1293 const TargetData *TD) const { 1294 Value *Res; 1295 if (isSimpleValue()) { 1296 Res = getSimpleValue(); 1297 if (Res->getType() != LoadTy) { 1298 assert(TD && "Need target data to handle type mismatch case"); 1299 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1300 *TD); 1301 1302 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1303 << *getSimpleValue() << '\n' 1304 << *Res << '\n' << "\n\n\n"); 1305 } 1306 } else { 1307 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1308 LoadTy, BB->getTerminator(), *TD); 1309 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1310 << " " << *getMemIntrinValue() << '\n' 1311 << *Res << '\n' << "\n\n\n"); 1312 } 1313 return Res; 1314 } 1315}; 1316 1317} 1318 1319/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1320/// construct SSA form, allowing us to eliminate LI. This returns the value 1321/// that should be used at LI's definition site. 1322static Value *ConstructSSAForLoadSet(LoadInst *LI, 1323 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1324 const TargetData *TD, 1325 const DominatorTree &DT, 1326 AliasAnalysis *AA) { 1327 // Check for the fully redundant, dominating load case. In this case, we can 1328 // just use the dominating value directly. 1329 if (ValuesPerBlock.size() == 1 && 1330 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) 1331 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); 1332 1333 // Otherwise, we have to construct SSA form. 1334 SmallVector<PHINode*, 8> NewPHIs; 1335 SSAUpdater SSAUpdate(&NewPHIs); 1336 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1337 1338 const Type *LoadTy = LI->getType(); 1339 1340 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1341 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1342 BasicBlock *BB = AV.BB; 1343 1344 if (SSAUpdate.HasValueForBlock(BB)) 1345 continue; 1346 1347 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); 1348 } 1349 1350 // Perform PHI construction. 1351 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1352 1353 // If new PHI nodes were created, notify alias analysis. 1354 if (V->getType()->isPointerTy()) 1355 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1356 AA->copyValue(LI, NewPHIs[i]); 1357 1358 return V; 1359} 1360 1361static bool isLifetimeStart(const Instruction *Inst) { 1362 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1363 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1364 return false; 1365} 1366 1367/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1368/// non-local by performing PHI construction. 1369bool GVN::processNonLocalLoad(LoadInst *LI, 1370 SmallVectorImpl<Instruction*> &toErase) { 1371 // Find the non-local dependencies of the load. 1372 SmallVector<NonLocalDepResult, 64> Deps; 1373 AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI); 1374 MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps); 1375 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1376 // << Deps.size() << *LI << '\n'); 1377 1378 // If we had to process more than one hundred blocks to find the 1379 // dependencies, this load isn't worth worrying about. Optimizing 1380 // it will be too expensive. 1381 if (Deps.size() > 100) 1382 return false; 1383 1384 // If we had a phi translation failure, we'll have a single entry which is a 1385 // clobber in the current block. Reject this early. 1386 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1387 DEBUG( 1388 dbgs() << "GVN: non-local load "; 1389 WriteAsOperand(dbgs(), LI); 1390 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1391 ); 1392 return false; 1393 } 1394 1395 // Filter out useless results (non-locals, etc). Keep track of the blocks 1396 // where we have a value available in repl, also keep track of whether we see 1397 // dependencies that produce an unknown value for the load (such as a call 1398 // that could potentially clobber the load). 1399 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1400 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1401 1402 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1403 BasicBlock *DepBB = Deps[i].getBB(); 1404 MemDepResult DepInfo = Deps[i].getResult(); 1405 1406 if (DepInfo.isClobber()) { 1407 // The address being loaded in this non-local block may not be the same as 1408 // the pointer operand of the load if PHI translation occurs. Make sure 1409 // to consider the right address. 1410 Value *Address = Deps[i].getAddress(); 1411 1412 // If the dependence is to a store that writes to a superset of the bits 1413 // read by the load, we can extract the bits we need for the load from the 1414 // stored value. 1415 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1416 if (TD && Address) { 1417 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1418 DepSI, *TD); 1419 if (Offset != -1) { 1420 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1421 DepSI->getValueOperand(), 1422 Offset)); 1423 continue; 1424 } 1425 } 1426 } 1427 1428 // If the clobbering value is a memset/memcpy/memmove, see if we can 1429 // forward a value on from it. 1430 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1431 if (TD && Address) { 1432 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1433 DepMI, *TD); 1434 if (Offset != -1) { 1435 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1436 Offset)); 1437 continue; 1438 } 1439 } 1440 } 1441 1442 UnavailableBlocks.push_back(DepBB); 1443 continue; 1444 } 1445 1446 Instruction *DepInst = DepInfo.getInst(); 1447 1448 // Loading the allocation -> undef. 1449 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1450 // Loading immediately after lifetime begin -> undef. 1451 isLifetimeStart(DepInst)) { 1452 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1453 UndefValue::get(LI->getType()))); 1454 continue; 1455 } 1456 1457 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1458 // Reject loads and stores that are to the same address but are of 1459 // different types if we have to. 1460 if (S->getValueOperand()->getType() != LI->getType()) { 1461 // If the stored value is larger or equal to the loaded value, we can 1462 // reuse it. 1463 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), 1464 LI->getType(), *TD)) { 1465 UnavailableBlocks.push_back(DepBB); 1466 continue; 1467 } 1468 } 1469 1470 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1471 S->getValueOperand())); 1472 continue; 1473 } 1474 1475 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1476 // If the types mismatch and we can't handle it, reject reuse of the load. 1477 if (LD->getType() != LI->getType()) { 1478 // If the stored value is larger or equal to the loaded value, we can 1479 // reuse it. 1480 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1481 UnavailableBlocks.push_back(DepBB); 1482 continue; 1483 } 1484 } 1485 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1486 continue; 1487 } 1488 1489 UnavailableBlocks.push_back(DepBB); 1490 continue; 1491 } 1492 1493 // If we have no predecessors that produce a known value for this load, exit 1494 // early. 1495 if (ValuesPerBlock.empty()) return false; 1496 1497 // If all of the instructions we depend on produce a known value for this 1498 // load, then it is fully redundant and we can use PHI insertion to compute 1499 // its value. Insert PHIs and remove the fully redundant value now. 1500 if (UnavailableBlocks.empty()) { 1501 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1502 1503 // Perform PHI construction. 1504 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1505 VN.getAliasAnalysis()); 1506 LI->replaceAllUsesWith(V); 1507 1508 if (isa<PHINode>(V)) 1509 V->takeName(LI); 1510 if (V->getType()->isPointerTy()) 1511 MD->invalidateCachedPointerInfo(V); 1512 VN.erase(LI); 1513 toErase.push_back(LI); 1514 ++NumGVNLoad; 1515 return true; 1516 } 1517 1518 if (!EnablePRE || !EnableLoadPRE) 1519 return false; 1520 1521 // Okay, we have *some* definitions of the value. This means that the value 1522 // is available in some of our (transitive) predecessors. Lets think about 1523 // doing PRE of this load. This will involve inserting a new load into the 1524 // predecessor when it's not available. We could do this in general, but 1525 // prefer to not increase code size. As such, we only do this when we know 1526 // that we only have to insert *one* load (which means we're basically moving 1527 // the load, not inserting a new one). 1528 1529 SmallPtrSet<BasicBlock *, 4> Blockers; 1530 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1531 Blockers.insert(UnavailableBlocks[i]); 1532 1533 // Lets find first basic block with more than one predecessor. Walk backwards 1534 // through predecessors if needed. 1535 BasicBlock *LoadBB = LI->getParent(); 1536 BasicBlock *TmpBB = LoadBB; 1537 1538 bool isSinglePred = false; 1539 bool allSingleSucc = true; 1540 while (TmpBB->getSinglePredecessor()) { 1541 isSinglePred = true; 1542 TmpBB = TmpBB->getSinglePredecessor(); 1543 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1544 return false; 1545 if (Blockers.count(TmpBB)) 1546 return false; 1547 1548 // If any of these blocks has more than one successor (i.e. if the edge we 1549 // just traversed was critical), then there are other paths through this 1550 // block along which the load may not be anticipated. Hoisting the load 1551 // above this block would be adding the load to execution paths along 1552 // which it was not previously executed. 1553 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1554 return false; 1555 } 1556 1557 assert(TmpBB); 1558 LoadBB = TmpBB; 1559 1560 // FIXME: It is extremely unclear what this loop is doing, other than 1561 // artificially restricting loadpre. 1562 if (isSinglePred) { 1563 bool isHot = false; 1564 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1565 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1566 if (AV.isSimpleValue()) 1567 // "Hot" Instruction is in some loop (because it dominates its dep. 1568 // instruction). 1569 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1570 if (DT->dominates(LI, I)) { 1571 isHot = true; 1572 break; 1573 } 1574 } 1575 1576 // We are interested only in "hot" instructions. We don't want to do any 1577 // mis-optimizations here. 1578 if (!isHot) 1579 return false; 1580 } 1581 1582 // Check to see how many predecessors have the loaded value fully 1583 // available. 1584 DenseMap<BasicBlock*, Value*> PredLoads; 1585 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1586 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1587 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1588 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1589 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1590 1591 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1592 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1593 PI != E; ++PI) { 1594 BasicBlock *Pred = *PI; 1595 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1596 continue; 1597 } 1598 PredLoads[Pred] = 0; 1599 1600 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1601 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1602 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1603 << Pred->getName() << "': " << *LI << '\n'); 1604 return false; 1605 } 1606 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1607 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1608 } 1609 } 1610 if (!NeedToSplit.empty()) { 1611 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1612 return false; 1613 } 1614 1615 // Decide whether PRE is profitable for this load. 1616 unsigned NumUnavailablePreds = PredLoads.size(); 1617 assert(NumUnavailablePreds != 0 && 1618 "Fully available value should be eliminated above!"); 1619 1620 // If this load is unavailable in multiple predecessors, reject it. 1621 // FIXME: If we could restructure the CFG, we could make a common pred with 1622 // all the preds that don't have an available LI and insert a new load into 1623 // that one block. 1624 if (NumUnavailablePreds != 1) 1625 return false; 1626 1627 // Check if the load can safely be moved to all the unavailable predecessors. 1628 bool CanDoPRE = true; 1629 SmallVector<Instruction*, 8> NewInsts; 1630 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1631 E = PredLoads.end(); I != E; ++I) { 1632 BasicBlock *UnavailablePred = I->first; 1633 1634 // Do PHI translation to get its value in the predecessor if necessary. The 1635 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1636 1637 // If all preds have a single successor, then we know it is safe to insert 1638 // the load on the pred (?!?), so we can insert code to materialize the 1639 // pointer if it is not available. 1640 PHITransAddr Address(LI->getPointerOperand(), TD); 1641 Value *LoadPtr = 0; 1642 if (allSingleSucc) { 1643 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1644 *DT, NewInsts); 1645 } else { 1646 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1647 LoadPtr = Address.getAddr(); 1648 } 1649 1650 // If we couldn't find or insert a computation of this phi translated value, 1651 // we fail PRE. 1652 if (LoadPtr == 0) { 1653 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1654 << *LI->getPointerOperand() << "\n"); 1655 CanDoPRE = false; 1656 break; 1657 } 1658 1659 // Make sure it is valid to move this load here. We have to watch out for: 1660 // @1 = getelementptr (i8* p, ... 1661 // test p and branch if == 0 1662 // load @1 1663 // It is valid to have the getelementptr before the test, even if p can be 0, 1664 // as getelementptr only does address arithmetic. 1665 // If we are not pushing the value through any multiple-successor blocks 1666 // we do not have this case. Otherwise, check that the load is safe to 1667 // put anywhere; this can be improved, but should be conservatively safe. 1668 if (!allSingleSucc && 1669 // FIXME: REEVALUTE THIS. 1670 !isSafeToLoadUnconditionally(LoadPtr, 1671 UnavailablePred->getTerminator(), 1672 LI->getAlignment(), TD)) { 1673 CanDoPRE = false; 1674 break; 1675 } 1676 1677 I->second = LoadPtr; 1678 } 1679 1680 if (!CanDoPRE) { 1681 while (!NewInsts.empty()) 1682 NewInsts.pop_back_val()->eraseFromParent(); 1683 return false; 1684 } 1685 1686 // Okay, we can eliminate this load by inserting a reload in the predecessor 1687 // and using PHI construction to get the value in the other predecessors, do 1688 // it. 1689 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1690 DEBUG(if (!NewInsts.empty()) 1691 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1692 << *NewInsts.back() << '\n'); 1693 1694 // Assign value numbers to the new instructions. 1695 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1696 // FIXME: We really _ought_ to insert these value numbers into their 1697 // parent's availability map. However, in doing so, we risk getting into 1698 // ordering issues. If a block hasn't been processed yet, we would be 1699 // marking a value as AVAIL-IN, which isn't what we intend. 1700 VN.lookup_or_add(NewInsts[i]); 1701 } 1702 1703 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1704 E = PredLoads.end(); I != E; ++I) { 1705 BasicBlock *UnavailablePred = I->first; 1706 Value *LoadPtr = I->second; 1707 1708 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1709 LI->getAlignment(), 1710 UnavailablePred->getTerminator()); 1711 1712 // Add the newly created load. 1713 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1714 NewLoad)); 1715 MD->invalidateCachedPointerInfo(LoadPtr); 1716 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1717 } 1718 1719 // Perform PHI construction. 1720 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1721 VN.getAliasAnalysis()); 1722 LI->replaceAllUsesWith(V); 1723 if (isa<PHINode>(V)) 1724 V->takeName(LI); 1725 if (V->getType()->isPointerTy()) 1726 MD->invalidateCachedPointerInfo(V); 1727 VN.erase(LI); 1728 toErase.push_back(LI); 1729 ++NumPRELoad; 1730 return true; 1731} 1732 1733/// processLoad - Attempt to eliminate a load, first by eliminating it 1734/// locally, and then attempting non-local elimination if that fails. 1735bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1736 if (!MD) 1737 return false; 1738 1739 if (L->isVolatile()) 1740 return false; 1741 1742 // ... to a pointer that has been loaded from before... 1743 MemDepResult Dep = MD->getDependency(L); 1744 1745 // If the value isn't available, don't do anything! 1746 if (Dep.isClobber()) { 1747 // Check to see if we have something like this: 1748 // store i32 123, i32* %P 1749 // %A = bitcast i32* %P to i8* 1750 // %B = gep i8* %A, i32 1 1751 // %C = load i8* %B 1752 // 1753 // We could do that by recognizing if the clobber instructions are obviously 1754 // a common base + constant offset, and if the previous store (or memset) 1755 // completely covers this load. This sort of thing can happen in bitfield 1756 // access code. 1757 Value *AvailVal = 0; 1758 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1759 if (TD) { 1760 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1761 L->getPointerOperand(), 1762 DepSI, *TD); 1763 if (Offset != -1) 1764 AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, 1765 L->getType(), L, *TD); 1766 } 1767 1768 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1769 // a value on from it. 1770 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1771 if (TD) { 1772 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1773 L->getPointerOperand(), 1774 DepMI, *TD); 1775 if (Offset != -1) 1776 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1777 } 1778 } 1779 1780 if (AvailVal) { 1781 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1782 << *AvailVal << '\n' << *L << "\n\n\n"); 1783 1784 // Replace the load! 1785 L->replaceAllUsesWith(AvailVal); 1786 if (AvailVal->getType()->isPointerTy()) 1787 MD->invalidateCachedPointerInfo(AvailVal); 1788 VN.erase(L); 1789 toErase.push_back(L); 1790 ++NumGVNLoad; 1791 return true; 1792 } 1793 1794 DEBUG( 1795 // fast print dep, using operator<< on instruction would be too slow 1796 dbgs() << "GVN: load "; 1797 WriteAsOperand(dbgs(), L); 1798 Instruction *I = Dep.getInst(); 1799 dbgs() << " is clobbered by " << *I << '\n'; 1800 ); 1801 return false; 1802 } 1803 1804 // If it is defined in another block, try harder. 1805 if (Dep.isNonLocal()) 1806 return processNonLocalLoad(L, toErase); 1807 1808 Instruction *DepInst = Dep.getInst(); 1809 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1810 Value *StoredVal = DepSI->getValueOperand(); 1811 1812 // The store and load are to a must-aliased pointer, but they may not 1813 // actually have the same type. See if we know how to reuse the stored 1814 // value (depending on its type). 1815 if (StoredVal->getType() != L->getType()) { 1816 if (TD) { 1817 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1818 L, *TD); 1819 if (StoredVal == 0) 1820 return false; 1821 1822 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1823 << '\n' << *L << "\n\n\n"); 1824 } 1825 else 1826 return false; 1827 } 1828 1829 // Remove it! 1830 L->replaceAllUsesWith(StoredVal); 1831 if (StoredVal->getType()->isPointerTy()) 1832 MD->invalidateCachedPointerInfo(StoredVal); 1833 VN.erase(L); 1834 toErase.push_back(L); 1835 ++NumGVNLoad; 1836 return true; 1837 } 1838 1839 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1840 Value *AvailableVal = DepLI; 1841 1842 // The loads are of a must-aliased pointer, but they may not actually have 1843 // the same type. See if we know how to reuse the previously loaded value 1844 // (depending on its type). 1845 if (DepLI->getType() != L->getType()) { 1846 if (TD) { 1847 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1848 if (AvailableVal == 0) 1849 return false; 1850 1851 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1852 << "\n" << *L << "\n\n\n"); 1853 } 1854 else 1855 return false; 1856 } 1857 1858 // Remove it! 1859 L->replaceAllUsesWith(AvailableVal); 1860 if (DepLI->getType()->isPointerTy()) 1861 MD->invalidateCachedPointerInfo(DepLI); 1862 VN.erase(L); 1863 toErase.push_back(L); 1864 ++NumGVNLoad; 1865 return true; 1866 } 1867 1868 // If this load really doesn't depend on anything, then we must be loading an 1869 // undef value. This can happen when loading for a fresh allocation with no 1870 // intervening stores, for example. 1871 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1872 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1873 VN.erase(L); 1874 toErase.push_back(L); 1875 ++NumGVNLoad; 1876 return true; 1877 } 1878 1879 // If this load occurs either right after a lifetime begin, 1880 // then the loaded value is undefined. 1881 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1882 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1883 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1884 VN.erase(L); 1885 toErase.push_back(L); 1886 ++NumGVNLoad; 1887 return true; 1888 } 1889 } 1890 1891 return false; 1892} 1893 1894// lookupNumber - In order to find a leader for a given value number at a 1895// specific basic block, we first obtain the list of all Values for that number, 1896// and then scan the list to find one whose block dominates the block in 1897// question. This is fast because dominator tree queries consist of only 1898// a few comparisons of DFS numbers. 1899Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1900 std::pair<Value*, void*> Vals = NumberTable[num]; 1901 if (!Vals.first) return 0; 1902 Instruction *Inst = dyn_cast<Instruction>(Vals.first); 1903 if (!Inst) return Vals.first; 1904 BasicBlock *Parent = Inst->getParent(); 1905 if (DT->dominates(Parent, BB)) 1906 return Inst; 1907 1908 std::pair<Value*, void*>* Next = 1909 static_cast<std::pair<Value*, void*>*>(Vals.second); 1910 while (Next) { 1911 Instruction *CurrInst = dyn_cast<Instruction>(Next->first); 1912 if (!CurrInst) return Next->first; 1913 1914 BasicBlock *Parent = CurrInst->getParent(); 1915 if (DT->dominates(Parent, BB)) 1916 return CurrInst; 1917 1918 Next = static_cast<std::pair<Value*, void*>*>(Next->second); 1919 } 1920 1921 return 0; 1922} 1923 1924 1925/// processInstruction - When calculating availability, handle an instruction 1926/// by inserting it into the appropriate sets 1927bool GVN::processInstruction(Instruction *I, 1928 SmallVectorImpl<Instruction*> &toErase) { 1929 // Ignore dbg info intrinsics. 1930 if (isa<DbgInfoIntrinsic>(I)) 1931 return false; 1932 1933 // If the instruction can be easily simplified then do so now in preference 1934 // to value numbering it. Value numbering often exposes redundancies, for 1935 // example if it determines that %y is equal to %x then the instruction 1936 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 1937 if (Value *V = SimplifyInstruction(I, TD, DT)) { 1938 I->replaceAllUsesWith(V); 1939 if (MD && V->getType()->isPointerTy()) 1940 MD->invalidateCachedPointerInfo(V); 1941 VN.erase(I); 1942 toErase.push_back(I); 1943 return true; 1944 } 1945 1946 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1947 bool Changed = processLoad(LI, toErase); 1948 1949 if (!Changed) { 1950 unsigned Num = VN.lookup_or_add(LI); 1951 insert_table(Num, LI); 1952 } 1953 1954 return Changed; 1955 } 1956 1957 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1958 unsigned Num = VN.lookup_or_add(I); 1959 1960 // Allocations are always uniquely numbered, so we can save time and memory 1961 // by fast failing them. 1962 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1963 insert_table(Num, I); 1964 return false; 1965 } 1966 1967 if (isa<PHINode>(I)) { 1968 insert_table(Num, I); 1969 1970 // If the number we were assigned was a brand new VN, then we don't 1971 // need to do a lookup to see if the number already exists 1972 // somewhere in the domtree: it can't! 1973 } else if (Num == NextNum) { 1974 insert_table(Num, I); 1975 1976 // Perform fast-path value-number based elimination of values inherited from 1977 // dominators. 1978 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1979 // Remove it! 1980 VN.erase(I); 1981 I->replaceAllUsesWith(repl); 1982 if (MD && repl->getType()->isPointerTy()) 1983 MD->invalidateCachedPointerInfo(repl); 1984 toErase.push_back(I); 1985 return true; 1986 1987 } else { 1988 insert_table(Num, I); 1989 } 1990 1991 return false; 1992} 1993 1994/// runOnFunction - This is the main transformation entry point for a function. 1995bool GVN::runOnFunction(Function& F) { 1996 if (!NoLoads) 1997 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1998 DT = &getAnalysis<DominatorTree>(); 1999 TD = getAnalysisIfAvailable<TargetData>(); 2000 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 2001 VN.setMemDep(MD); 2002 VN.setDomTree(DT); 2003 2004 bool Changed = false; 2005 bool ShouldContinue = true; 2006 2007 // Merge unconditional branches, allowing PRE to catch more 2008 // optimization opportunities. 2009 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2010 BasicBlock *BB = FI; 2011 ++FI; 2012 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2013 if (removedBlock) ++NumGVNBlocks; 2014 2015 Changed |= removedBlock; 2016 } 2017 2018 unsigned Iteration = 0; 2019 2020 while (ShouldContinue) { 2021 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2022 ShouldContinue = iterateOnFunction(F); 2023 if (splitCriticalEdges()) 2024 ShouldContinue = true; 2025 Changed |= ShouldContinue; 2026 ++Iteration; 2027 } 2028 2029 if (EnablePRE) { 2030 bool PREChanged = true; 2031 while (PREChanged) { 2032 PREChanged = performPRE(F); 2033 Changed |= PREChanged; 2034 } 2035 } 2036 // FIXME: Should perform GVN again after PRE does something. PRE can move 2037 // computations into blocks where they become fully redundant. Note that 2038 // we can't do this until PRE's critical edge splitting updates memdep. 2039 // Actually, when this happens, we should just fully integrate PRE into GVN. 2040 2041 cleanupGlobalSets(); 2042 2043 return Changed; 2044} 2045 2046 2047bool GVN::processBlock(BasicBlock *BB) { 2048 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2049 // incrementing BI before processing an instruction). 2050 SmallVector<Instruction*, 8> toErase; 2051 bool ChangedFunction = false; 2052 2053 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2054 BI != BE;) { 2055 ChangedFunction |= processInstruction(BI, toErase); 2056 if (toErase.empty()) { 2057 ++BI; 2058 continue; 2059 } 2060 2061 // If we need some instructions deleted, do it now. 2062 NumGVNInstr += toErase.size(); 2063 2064 // Avoid iterator invalidation. 2065 bool AtStart = BI == BB->begin(); 2066 if (!AtStart) 2067 --BI; 2068 2069 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2070 E = toErase.end(); I != E; ++I) { 2071 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2072 if (MD) MD->removeInstruction(*I); 2073 (*I)->eraseFromParent(); 2074 DEBUG(verifyRemoved(*I)); 2075 } 2076 toErase.clear(); 2077 2078 if (AtStart) 2079 BI = BB->begin(); 2080 else 2081 ++BI; 2082 } 2083 2084 return ChangedFunction; 2085} 2086 2087/// performPRE - Perform a purely local form of PRE that looks for diamond 2088/// control flow patterns and attempts to perform simple PRE at the join point. 2089bool GVN::performPRE(Function &F) { 2090 bool Changed = false; 2091 DenseMap<BasicBlock*, Value*> predMap; 2092 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2093 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2094 BasicBlock *CurrentBlock = *DI; 2095 2096 // Nothing to PRE in the entry block. 2097 if (CurrentBlock == &F.getEntryBlock()) continue; 2098 2099 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2100 BE = CurrentBlock->end(); BI != BE; ) { 2101 Instruction *CurInst = BI++; 2102 2103 if (isa<AllocaInst>(CurInst) || 2104 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2105 CurInst->getType()->isVoidTy() || 2106 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2107 isa<DbgInfoIntrinsic>(CurInst)) 2108 continue; 2109 2110 // We don't currently value number ANY inline asm calls. 2111 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2112 if (CallI->isInlineAsm()) 2113 continue; 2114 2115 uint32_t ValNo = VN.lookup(CurInst); 2116 2117 // Look for the predecessors for PRE opportunities. We're 2118 // only trying to solve the basic diamond case, where 2119 // a value is computed in the successor and one predecessor, 2120 // but not the other. We also explicitly disallow cases 2121 // where the successor is its own predecessor, because they're 2122 // more complicated to get right. 2123 unsigned NumWith = 0; 2124 unsigned NumWithout = 0; 2125 BasicBlock *PREPred = 0; 2126 predMap.clear(); 2127 2128 for (pred_iterator PI = pred_begin(CurrentBlock), 2129 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2130 BasicBlock *P = *PI; 2131 // We're not interested in PRE where the block is its 2132 // own predecessor, or in blocks with predecessors 2133 // that are not reachable. 2134 if (P == CurrentBlock) { 2135 NumWithout = 2; 2136 break; 2137 } else if (!DT->dominates(&F.getEntryBlock(), P)) { 2138 NumWithout = 2; 2139 break; 2140 } 2141 2142 Value* predV = lookupNumber(P, ValNo); 2143 if (predV == 0) { 2144 PREPred = P; 2145 ++NumWithout; 2146 } else if (predV == CurInst) { 2147 NumWithout = 2; 2148 } else { 2149 predMap[P] = predV; 2150 ++NumWith; 2151 } 2152 } 2153 2154 // Don't do PRE when it might increase code size, i.e. when 2155 // we would need to insert instructions in more than one pred. 2156 if (NumWithout != 1 || NumWith == 0) 2157 continue; 2158 2159 // Don't do PRE across indirect branch. 2160 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2161 continue; 2162 2163 // We can't do PRE safely on a critical edge, so instead we schedule 2164 // the edge to be split and perform the PRE the next time we iterate 2165 // on the function. 2166 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2167 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2168 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2169 continue; 2170 } 2171 2172 // Instantiate the expression in the predecessor that lacked it. 2173 // Because we are going top-down through the block, all value numbers 2174 // will be available in the predecessor by the time we need them. Any 2175 // that weren't originally present will have been instantiated earlier 2176 // in this loop. 2177 Instruction *PREInstr = CurInst->clone(); 2178 bool success = true; 2179 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2180 Value *Op = PREInstr->getOperand(i); 2181 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2182 continue; 2183 2184 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2185 PREInstr->setOperand(i, V); 2186 } else { 2187 success = false; 2188 break; 2189 } 2190 } 2191 2192 // Fail out if we encounter an operand that is not available in 2193 // the PRE predecessor. This is typically because of loads which 2194 // are not value numbered precisely. 2195 if (!success) { 2196 delete PREInstr; 2197 DEBUG(verifyRemoved(PREInstr)); 2198 continue; 2199 } 2200 2201 PREInstr->insertBefore(PREPred->getTerminator()); 2202 PREInstr->setName(CurInst->getName() + ".pre"); 2203 predMap[PREPred] = PREInstr; 2204 VN.add(PREInstr, ValNo); 2205 ++NumGVNPRE; 2206 2207 // Update the availability map to include the new instruction. 2208 insert_table(ValNo, PREInstr); 2209 2210 // Create a PHI to make the value available in this block. 2211 PHINode* Phi = PHINode::Create(CurInst->getType(), 2212 CurInst->getName() + ".pre-phi", 2213 CurrentBlock->begin()); 2214 for (pred_iterator PI = pred_begin(CurrentBlock), 2215 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2216 BasicBlock *P = *PI; 2217 Phi->addIncoming(predMap[P], P); 2218 } 2219 2220 VN.add(Phi, ValNo); 2221 insert_table(ValNo, Phi); 2222 2223 CurInst->replaceAllUsesWith(Phi); 2224 if (MD && Phi->getType()->isPointerTy()) 2225 MD->invalidateCachedPointerInfo(Phi); 2226 VN.erase(CurInst); 2227 erase_table(ValNo, CurInst); 2228 2229 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2230 if (MD) MD->removeInstruction(CurInst); 2231 CurInst->eraseFromParent(); 2232 DEBUG(verifyRemoved(CurInst)); 2233 Changed = true; 2234 } 2235 } 2236 2237 if (splitCriticalEdges()) 2238 Changed = true; 2239 2240 return Changed; 2241} 2242 2243/// splitCriticalEdges - Split critical edges found during the previous 2244/// iteration that may enable further optimization. 2245bool GVN::splitCriticalEdges() { 2246 if (toSplit.empty()) 2247 return false; 2248 do { 2249 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2250 SplitCriticalEdge(Edge.first, Edge.second, this); 2251 } while (!toSplit.empty()); 2252 if (MD) MD->invalidateCachedPredecessors(); 2253 return true; 2254} 2255 2256/// iterateOnFunction - Executes one iteration of GVN 2257bool GVN::iterateOnFunction(Function &F) { 2258 cleanupGlobalSets(); 2259 2260 // Top-down walk of the dominator tree 2261 bool Changed = false; 2262#if 0 2263 // Needed for value numbering with phi construction to work. 2264 ReversePostOrderTraversal<Function*> RPOT(&F); 2265 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2266 RE = RPOT.end(); RI != RE; ++RI) 2267 Changed |= processBlock(*RI); 2268#else 2269 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2270 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2271 Changed |= processBlock(DI->getBlock()); 2272#endif 2273 2274 return Changed; 2275} 2276 2277void GVN::cleanupGlobalSets() { 2278 VN.clear(); 2279 NumberTable.clear(); 2280 TableAllocator.Reset(); 2281} 2282 2283/// verifyRemoved - Verify that the specified instruction does not occur in our 2284/// internal data structures. 2285void GVN::verifyRemoved(const Instruction *Inst) const { 2286 VN.verifyRemoved(Inst); 2287 2288 // Walk through the value number scope to make sure the instruction isn't 2289 // ferreted away in it. 2290 for (DenseMap<uint32_t, std::pair<Value*, void*> >::const_iterator 2291 I = NumberTable.begin(), E = NumberTable.end(); I != E; ++I) { 2292 std::pair<Value*, void*> const * Node = &I->second; 2293 assert(Node->first != Inst && "Inst still in value numbering scope!"); 2294 2295 while (Node->second) { 2296 Node = static_cast<std::pair<Value*, void*>*>(Node->second); 2297 assert(Node->first != Inst && "Inst still in value numbering scope!"); 2298 } 2299 } 2300} 2301