GVN.cpp revision d883a9d1ed7cca9674ba7055ec45bfe2b8cb6463
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Operator.h" 28#include "llvm/Value.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/DepthFirstIterator.h" 31#include "llvm/ADT/PostOrderIterator.h" 32#include "llvm/ADT/SmallPtrSet.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/ConstantFolding.h" 37#include "llvm/Analysis/Dominators.h" 38#include "llvm/Analysis/Loads.h" 39#include "llvm/Analysis/MemoryBuiltins.h" 40#include "llvm/Analysis/MemoryDependenceAnalysis.h" 41#include "llvm/Analysis/PHITransAddr.h" 42#include "llvm/Support/CFG.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Support/GetElementPtrTypeIterator.h" 47#include "llvm/Support/IRBuilder.h" 48#include "llvm/Support/raw_ostream.h" 49#include "llvm/Target/TargetData.h" 50#include "llvm/Transforms/Utils/BasicBlockUtils.h" 51#include "llvm/Transforms/Utils/Local.h" 52#include "llvm/Transforms/Utils/SSAUpdater.h" 53using namespace llvm; 54 55STATISTIC(NumGVNInstr, "Number of instructions deleted"); 56STATISTIC(NumGVNLoad, "Number of loads deleted"); 57STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 58STATISTIC(NumGVNBlocks, "Number of blocks merged"); 59STATISTIC(NumPRELoad, "Number of loads PRE'd"); 60 61static cl::opt<bool> EnablePRE("enable-pre", 62 cl::init(true), cl::Hidden); 63static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 64static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false)); 65 66//===----------------------------------------------------------------------===// 67// ValueTable Class 68//===----------------------------------------------------------------------===// 69 70/// This class holds the mapping between values and value numbers. It is used 71/// as an efficient mechanism to determine the expression-wise equivalence of 72/// two values. 73namespace { 74 struct Expression { 75 enum ExpressionOpcode { 76 ADD = Instruction::Add, 77 FADD = Instruction::FAdd, 78 SUB = Instruction::Sub, 79 FSUB = Instruction::FSub, 80 MUL = Instruction::Mul, 81 FMUL = Instruction::FMul, 82 UDIV = Instruction::UDiv, 83 SDIV = Instruction::SDiv, 84 FDIV = Instruction::FDiv, 85 UREM = Instruction::URem, 86 SREM = Instruction::SRem, 87 FREM = Instruction::FRem, 88 SHL = Instruction::Shl, 89 LSHR = Instruction::LShr, 90 ASHR = Instruction::AShr, 91 AND = Instruction::And, 92 OR = Instruction::Or, 93 XOR = Instruction::Xor, 94 TRUNC = Instruction::Trunc, 95 ZEXT = Instruction::ZExt, 96 SEXT = Instruction::SExt, 97 FPTOUI = Instruction::FPToUI, 98 FPTOSI = Instruction::FPToSI, 99 UITOFP = Instruction::UIToFP, 100 SITOFP = Instruction::SIToFP, 101 FPTRUNC = Instruction::FPTrunc, 102 FPEXT = Instruction::FPExt, 103 PTRTOINT = Instruction::PtrToInt, 104 INTTOPTR = Instruction::IntToPtr, 105 BITCAST = Instruction::BitCast, 106 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 107 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 108 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 109 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 110 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 111 SHUFFLE, SELECT, GEP, CALL, CONSTANT, 112 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 113 114 ExpressionOpcode opcode; 115 const Type* type; 116 SmallVector<uint32_t, 4> varargs; 117 Value *function; 118 119 Expression() { } 120 Expression(ExpressionOpcode o) : opcode(o) { } 121 122 bool operator==(const Expression &other) const { 123 if (opcode != other.opcode) 124 return false; 125 else if (opcode == EMPTY || opcode == TOMBSTONE) 126 return true; 127 else if (type != other.type) 128 return false; 129 else if (function != other.function) 130 return false; 131 else { 132 if (varargs.size() != other.varargs.size()) 133 return false; 134 135 for (size_t i = 0; i < varargs.size(); ++i) 136 if (varargs[i] != other.varargs[i]) 137 return false; 138 139 return true; 140 } 141 } 142 143 bool operator!=(const Expression &other) const { 144 return !(*this == other); 145 } 146 }; 147 148 class ValueTable { 149 private: 150 DenseMap<Value*, uint32_t> valueNumbering; 151 DenseMap<Expression, uint32_t> expressionNumbering; 152 AliasAnalysis* AA; 153 MemoryDependenceAnalysis* MD; 154 DominatorTree* DT; 155 156 uint32_t nextValueNumber; 157 158 Expression::ExpressionOpcode getOpcode(CmpInst* C); 159 Expression create_expression(BinaryOperator* BO); 160 Expression create_expression(CmpInst* C); 161 Expression create_expression(ShuffleVectorInst* V); 162 Expression create_expression(ExtractElementInst* C); 163 Expression create_expression(InsertElementInst* V); 164 Expression create_expression(SelectInst* V); 165 Expression create_expression(CastInst* C); 166 Expression create_expression(GetElementPtrInst* G); 167 Expression create_expression(CallInst* C); 168 Expression create_expression(Constant* C); 169 Expression create_expression(ExtractValueInst* C); 170 Expression create_expression(InsertValueInst* C); 171 172 uint32_t lookup_or_add_call(CallInst* C); 173 public: 174 ValueTable() : nextValueNumber(1) { } 175 uint32_t lookup_or_add(Value *V); 176 uint32_t lookup(Value *V) const; 177 void add(Value *V, uint32_t num); 178 void clear(); 179 void erase(Value *v); 180 unsigned size(); 181 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 182 AliasAnalysis *getAliasAnalysis() const { return AA; } 183 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 184 void setDomTree(DominatorTree* D) { DT = D; } 185 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 186 void verifyRemoved(const Value *) const; 187 }; 188} 189 190namespace llvm { 191template <> struct DenseMapInfo<Expression> { 192 static inline Expression getEmptyKey() { 193 return Expression(Expression::EMPTY); 194 } 195 196 static inline Expression getTombstoneKey() { 197 return Expression(Expression::TOMBSTONE); 198 } 199 200 static unsigned getHashValue(const Expression e) { 201 unsigned hash = e.opcode; 202 203 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 204 (unsigned)((uintptr_t)e.type >> 9)); 205 206 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 207 E = e.varargs.end(); I != E; ++I) 208 hash = *I + hash * 37; 209 210 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 211 (unsigned)((uintptr_t)e.function >> 9)) + 212 hash * 37; 213 214 return hash; 215 } 216 static bool isEqual(const Expression &LHS, const Expression &RHS) { 217 return LHS == RHS; 218 } 219}; 220 221template <> 222struct isPodLike<Expression> { static const bool value = true; }; 223 224} 225 226//===----------------------------------------------------------------------===// 227// ValueTable Internal Functions 228//===----------------------------------------------------------------------===// 229 230Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 231 if (isa<ICmpInst>(C)) { 232 switch (C->getPredicate()) { 233 default: // THIS SHOULD NEVER HAPPEN 234 llvm_unreachable("Comparison with unknown predicate?"); 235 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 236 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 237 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 238 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 239 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 240 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 241 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 242 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 243 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 244 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 245 } 246 } else { 247 switch (C->getPredicate()) { 248 default: // THIS SHOULD NEVER HAPPEN 249 llvm_unreachable("Comparison with unknown predicate?"); 250 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 251 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 252 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 253 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 254 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 255 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 256 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 257 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 258 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 259 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 260 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 261 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 262 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 263 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 264 } 265 } 266} 267 268Expression ValueTable::create_expression(CallInst* C) { 269 Expression e; 270 271 e.type = C->getType(); 272 e.function = C->getCalledFunction(); 273 e.opcode = Expression::CALL; 274 275 CallSite CS(C); 276 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end(); 277 I != E; ++I) 278 e.varargs.push_back(lookup_or_add(*I)); 279 280 return e; 281} 282 283Expression ValueTable::create_expression(BinaryOperator* BO) { 284 Expression e; 285 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 286 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 287 e.function = 0; 288 e.type = BO->getType(); 289 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode()); 290 291 return e; 292} 293 294Expression ValueTable::create_expression(CmpInst* C) { 295 Expression e; 296 297 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 298 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 299 e.function = 0; 300 e.type = C->getType(); 301 e.opcode = getOpcode(C); 302 303 return e; 304} 305 306Expression ValueTable::create_expression(CastInst* C) { 307 Expression e; 308 309 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 310 e.function = 0; 311 e.type = C->getType(); 312 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode()); 313 314 return e; 315} 316 317Expression ValueTable::create_expression(ShuffleVectorInst* S) { 318 Expression e; 319 320 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 321 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 322 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 323 e.function = 0; 324 e.type = S->getType(); 325 e.opcode = Expression::SHUFFLE; 326 327 return e; 328} 329 330Expression ValueTable::create_expression(ExtractElementInst* E) { 331 Expression e; 332 333 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 334 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 335 e.function = 0; 336 e.type = E->getType(); 337 e.opcode = Expression::EXTRACT; 338 339 return e; 340} 341 342Expression ValueTable::create_expression(InsertElementInst* I) { 343 Expression e; 344 345 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 346 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 347 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 348 e.function = 0; 349 e.type = I->getType(); 350 e.opcode = Expression::INSERT; 351 352 return e; 353} 354 355Expression ValueTable::create_expression(SelectInst* I) { 356 Expression e; 357 358 e.varargs.push_back(lookup_or_add(I->getCondition())); 359 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 360 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 361 e.function = 0; 362 e.type = I->getType(); 363 e.opcode = Expression::SELECT; 364 365 return e; 366} 367 368Expression ValueTable::create_expression(GetElementPtrInst* G) { 369 Expression e; 370 371 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 372 e.function = 0; 373 e.type = G->getType(); 374 e.opcode = Expression::GEP; 375 376 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 377 I != E; ++I) 378 e.varargs.push_back(lookup_or_add(*I)); 379 380 return e; 381} 382 383Expression ValueTable::create_expression(ExtractValueInst* E) { 384 Expression e; 385 386 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 387 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 388 II != IE; ++II) 389 e.varargs.push_back(*II); 390 e.function = 0; 391 e.type = E->getType(); 392 e.opcode = Expression::EXTRACTVALUE; 393 394 return e; 395} 396 397Expression ValueTable::create_expression(InsertValueInst* E) { 398 Expression e; 399 400 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 401 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 402 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 403 II != IE; ++II) 404 e.varargs.push_back(*II); 405 e.function = 0; 406 e.type = E->getType(); 407 e.opcode = Expression::INSERTVALUE; 408 409 return e; 410} 411 412//===----------------------------------------------------------------------===// 413// ValueTable External Functions 414//===----------------------------------------------------------------------===// 415 416/// add - Insert a value into the table with a specified value number. 417void ValueTable::add(Value *V, uint32_t num) { 418 valueNumbering.insert(std::make_pair(V, num)); 419} 420 421uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 422 if (AA->doesNotAccessMemory(C)) { 423 Expression exp = create_expression(C); 424 uint32_t& e = expressionNumbering[exp]; 425 if (!e) e = nextValueNumber++; 426 valueNumbering[C] = e; 427 return e; 428 } else if (AA->onlyReadsMemory(C)) { 429 Expression exp = create_expression(C); 430 uint32_t& e = expressionNumbering[exp]; 431 if (!e) { 432 e = nextValueNumber++; 433 valueNumbering[C] = e; 434 return e; 435 } 436 if (!MD) { 437 e = nextValueNumber++; 438 valueNumbering[C] = e; 439 return e; 440 } 441 442 MemDepResult local_dep = MD->getDependency(C); 443 444 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 445 valueNumbering[C] = nextValueNumber; 446 return nextValueNumber++; 447 } 448 449 if (local_dep.isDef()) { 450 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 451 452 if (local_cdep->getNumOperands() != C->getNumOperands()) { 453 valueNumbering[C] = nextValueNumber; 454 return nextValueNumber++; 455 } 456 457 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 458 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 459 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 460 if (c_vn != cd_vn) { 461 valueNumbering[C] = nextValueNumber; 462 return nextValueNumber++; 463 } 464 } 465 466 uint32_t v = lookup_or_add(local_cdep); 467 valueNumbering[C] = v; 468 return v; 469 } 470 471 // Non-local case. 472 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 473 MD->getNonLocalCallDependency(CallSite(C)); 474 // FIXME: call/call dependencies for readonly calls should return def, not 475 // clobber! Move the checking logic to MemDep! 476 CallInst* cdep = 0; 477 478 // Check to see if we have a single dominating call instruction that is 479 // identical to C. 480 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 481 const NonLocalDepEntry *I = &deps[i]; 482 // Ignore non-local dependencies. 483 if (I->getResult().isNonLocal()) 484 continue; 485 486 // We don't handle non-depedencies. If we already have a call, reject 487 // instruction dependencies. 488 if (I->getResult().isClobber() || cdep != 0) { 489 cdep = 0; 490 break; 491 } 492 493 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 494 // FIXME: All duplicated with non-local case. 495 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 496 cdep = NonLocalDepCall; 497 continue; 498 } 499 500 cdep = 0; 501 break; 502 } 503 504 if (!cdep) { 505 valueNumbering[C] = nextValueNumber; 506 return nextValueNumber++; 507 } 508 509 if (cdep->getNumOperands() != C->getNumOperands()) { 510 valueNumbering[C] = nextValueNumber; 511 return nextValueNumber++; 512 } 513 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 514 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 515 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 516 if (c_vn != cd_vn) { 517 valueNumbering[C] = nextValueNumber; 518 return nextValueNumber++; 519 } 520 } 521 522 uint32_t v = lookup_or_add(cdep); 523 valueNumbering[C] = v; 524 return v; 525 526 } else { 527 valueNumbering[C] = nextValueNumber; 528 return nextValueNumber++; 529 } 530} 531 532/// lookup_or_add - Returns the value number for the specified value, assigning 533/// it a new number if it did not have one before. 534uint32_t ValueTable::lookup_or_add(Value *V) { 535 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 536 if (VI != valueNumbering.end()) 537 return VI->second; 538 539 if (!isa<Instruction>(V)) { 540 valueNumbering[V] = nextValueNumber; 541 return nextValueNumber++; 542 } 543 544 Instruction* I = cast<Instruction>(V); 545 Expression exp; 546 switch (I->getOpcode()) { 547 case Instruction::Call: 548 return lookup_or_add_call(cast<CallInst>(I)); 549 case Instruction::Add: 550 case Instruction::FAdd: 551 case Instruction::Sub: 552 case Instruction::FSub: 553 case Instruction::Mul: 554 case Instruction::FMul: 555 case Instruction::UDiv: 556 case Instruction::SDiv: 557 case Instruction::FDiv: 558 case Instruction::URem: 559 case Instruction::SRem: 560 case Instruction::FRem: 561 case Instruction::Shl: 562 case Instruction::LShr: 563 case Instruction::AShr: 564 case Instruction::And: 565 case Instruction::Or : 566 case Instruction::Xor: 567 exp = create_expression(cast<BinaryOperator>(I)); 568 break; 569 case Instruction::ICmp: 570 case Instruction::FCmp: 571 exp = create_expression(cast<CmpInst>(I)); 572 break; 573 case Instruction::Trunc: 574 case Instruction::ZExt: 575 case Instruction::SExt: 576 case Instruction::FPToUI: 577 case Instruction::FPToSI: 578 case Instruction::UIToFP: 579 case Instruction::SIToFP: 580 case Instruction::FPTrunc: 581 case Instruction::FPExt: 582 case Instruction::PtrToInt: 583 case Instruction::IntToPtr: 584 case Instruction::BitCast: 585 exp = create_expression(cast<CastInst>(I)); 586 break; 587 case Instruction::Select: 588 exp = create_expression(cast<SelectInst>(I)); 589 break; 590 case Instruction::ExtractElement: 591 exp = create_expression(cast<ExtractElementInst>(I)); 592 break; 593 case Instruction::InsertElement: 594 exp = create_expression(cast<InsertElementInst>(I)); 595 break; 596 case Instruction::ShuffleVector: 597 exp = create_expression(cast<ShuffleVectorInst>(I)); 598 break; 599 case Instruction::ExtractValue: 600 exp = create_expression(cast<ExtractValueInst>(I)); 601 break; 602 case Instruction::InsertValue: 603 exp = create_expression(cast<InsertValueInst>(I)); 604 break; 605 case Instruction::GetElementPtr: 606 exp = create_expression(cast<GetElementPtrInst>(I)); 607 break; 608 default: 609 valueNumbering[V] = nextValueNumber; 610 return nextValueNumber++; 611 } 612 613 uint32_t& e = expressionNumbering[exp]; 614 if (!e) e = nextValueNumber++; 615 valueNumbering[V] = e; 616 return e; 617} 618 619/// lookup - Returns the value number of the specified value. Fails if 620/// the value has not yet been numbered. 621uint32_t ValueTable::lookup(Value *V) const { 622 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 623 assert(VI != valueNumbering.end() && "Value not numbered?"); 624 return VI->second; 625} 626 627/// clear - Remove all entries from the ValueTable 628void ValueTable::clear() { 629 valueNumbering.clear(); 630 expressionNumbering.clear(); 631 nextValueNumber = 1; 632} 633 634/// erase - Remove a value from the value numbering 635void ValueTable::erase(Value *V) { 636 valueNumbering.erase(V); 637} 638 639/// verifyRemoved - Verify that the value is removed from all internal data 640/// structures. 641void ValueTable::verifyRemoved(const Value *V) const { 642 for (DenseMap<Value*, uint32_t>::const_iterator 643 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 644 assert(I->first != V && "Inst still occurs in value numbering map!"); 645 } 646} 647 648//===----------------------------------------------------------------------===// 649// GVN Pass 650//===----------------------------------------------------------------------===// 651 652namespace { 653 struct ValueNumberScope { 654 ValueNumberScope* parent; 655 DenseMap<uint32_t, Value*> table; 656 657 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 658 }; 659} 660 661namespace { 662 663 class GVN : public FunctionPass { 664 bool runOnFunction(Function &F); 665 public: 666 static char ID; // Pass identification, replacement for typeid 667 explicit GVN(bool noloads = false) 668 : FunctionPass(&ID), NoLoads(noloads), MD(0) { } 669 670 private: 671 bool NoLoads; 672 MemoryDependenceAnalysis *MD; 673 DominatorTree *DT; 674 675 ValueTable VN; 676 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 677 678 // List of critical edges to be split between iterations. 679 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 680 681 // This transformation requires dominator postdominator info 682 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 683 AU.addRequired<DominatorTree>(); 684 if (!NoLoads) 685 AU.addRequired<MemoryDependenceAnalysis>(); 686 AU.addRequired<AliasAnalysis>(); 687 688 AU.addPreserved<DominatorTree>(); 689 AU.addPreserved<AliasAnalysis>(); 690 } 691 692 // Helper fuctions 693 // FIXME: eliminate or document these better 694 bool processLoad(LoadInst* L, 695 SmallVectorImpl<Instruction*> &toErase); 696 bool processInstruction(Instruction *I, 697 SmallVectorImpl<Instruction*> &toErase); 698 bool processNonLocalLoad(LoadInst* L, 699 SmallVectorImpl<Instruction*> &toErase); 700 bool processBlock(BasicBlock *BB); 701 void dump(DenseMap<uint32_t, Value*>& d); 702 bool iterateOnFunction(Function &F); 703 Value *CollapsePhi(PHINode* p); 704 bool performPRE(Function& F); 705 Value *lookupNumber(BasicBlock *BB, uint32_t num); 706 void cleanupGlobalSets(); 707 void verifyRemoved(const Instruction *I) const; 708 bool splitCriticalEdges(); 709 }; 710 711 char GVN::ID = 0; 712} 713 714// createGVNPass - The public interface to this file... 715FunctionPass *llvm::createGVNPass(bool NoLoads) { 716 return new GVN(NoLoads); 717} 718 719static RegisterPass<GVN> X("gvn", 720 "Global Value Numbering"); 721 722void GVN::dump(DenseMap<uint32_t, Value*>& d) { 723 errs() << "{\n"; 724 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 725 E = d.end(); I != E; ++I) { 726 errs() << I->first << "\n"; 727 I->second->dump(); 728 } 729 errs() << "}\n"; 730} 731 732static bool isSafeReplacement(PHINode* p, Instruction *inst) { 733 if (!isa<PHINode>(inst)) 734 return true; 735 736 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 737 UI != E; ++UI) 738 if (PHINode* use_phi = dyn_cast<PHINode>(UI)) 739 if (use_phi->getParent() == inst->getParent()) 740 return false; 741 742 return true; 743} 744 745Value *GVN::CollapsePhi(PHINode *PN) { 746 Value *ConstVal = PN->hasConstantValue(DT); 747 if (!ConstVal) return 0; 748 749 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 750 if (!Inst) 751 return ConstVal; 752 753 if (DT->dominates(Inst, PN)) 754 if (isSafeReplacement(PN, Inst)) 755 return Inst; 756 return 0; 757} 758 759/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 760/// we're analyzing is fully available in the specified block. As we go, keep 761/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 762/// map is actually a tri-state map with the following values: 763/// 0) we know the block *is not* fully available. 764/// 1) we know the block *is* fully available. 765/// 2) we do not know whether the block is fully available or not, but we are 766/// currently speculating that it will be. 767/// 3) we are speculating for this block and have used that to speculate for 768/// other blocks. 769static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 770 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 771 // Optimistically assume that the block is fully available and check to see 772 // if we already know about this block in one lookup. 773 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 774 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 775 776 // If the entry already existed for this block, return the precomputed value. 777 if (!IV.second) { 778 // If this is a speculative "available" value, mark it as being used for 779 // speculation of other blocks. 780 if (IV.first->second == 2) 781 IV.first->second = 3; 782 return IV.first->second != 0; 783 } 784 785 // Otherwise, see if it is fully available in all predecessors. 786 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 787 788 // If this block has no predecessors, it isn't live-in here. 789 if (PI == PE) 790 goto SpeculationFailure; 791 792 for (; PI != PE; ++PI) 793 // If the value isn't fully available in one of our predecessors, then it 794 // isn't fully available in this block either. Undo our previous 795 // optimistic assumption and bail out. 796 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 797 goto SpeculationFailure; 798 799 return true; 800 801// SpeculationFailure - If we get here, we found out that this is not, after 802// all, a fully-available block. We have a problem if we speculated on this and 803// used the speculation to mark other blocks as available. 804SpeculationFailure: 805 char &BBVal = FullyAvailableBlocks[BB]; 806 807 // If we didn't speculate on this, just return with it set to false. 808 if (BBVal == 2) { 809 BBVal = 0; 810 return false; 811 } 812 813 // If we did speculate on this value, we could have blocks set to 1 that are 814 // incorrect. Walk the (transitive) successors of this block and mark them as 815 // 0 if set to one. 816 SmallVector<BasicBlock*, 32> BBWorklist; 817 BBWorklist.push_back(BB); 818 819 do { 820 BasicBlock *Entry = BBWorklist.pop_back_val(); 821 // Note that this sets blocks to 0 (unavailable) if they happen to not 822 // already be in FullyAvailableBlocks. This is safe. 823 char &EntryVal = FullyAvailableBlocks[Entry]; 824 if (EntryVal == 0) continue; // Already unavailable. 825 826 // Mark as unavailable. 827 EntryVal = 0; 828 829 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 830 BBWorklist.push_back(*I); 831 } while (!BBWorklist.empty()); 832 833 return false; 834} 835 836 837/// CanCoerceMustAliasedValueToLoad - Return true if 838/// CoerceAvailableValueToLoadType will succeed. 839static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 840 const Type *LoadTy, 841 const TargetData &TD) { 842 // If the loaded or stored value is an first class array or struct, don't try 843 // to transform them. We need to be able to bitcast to integer. 844 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 845 StoredVal->getType()->isStructTy() || 846 StoredVal->getType()->isArrayTy()) 847 return false; 848 849 // The store has to be at least as big as the load. 850 if (TD.getTypeSizeInBits(StoredVal->getType()) < 851 TD.getTypeSizeInBits(LoadTy)) 852 return false; 853 854 return true; 855} 856 857 858/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 859/// then a load from a must-aliased pointer of a different type, try to coerce 860/// the stored value. LoadedTy is the type of the load we want to replace and 861/// InsertPt is the place to insert new instructions. 862/// 863/// If we can't do it, return null. 864static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 865 const Type *LoadedTy, 866 Instruction *InsertPt, 867 const TargetData &TD) { 868 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 869 return 0; 870 871 const Type *StoredValTy = StoredVal->getType(); 872 873 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy); 874 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 875 876 // If the store and reload are the same size, we can always reuse it. 877 if (StoreSize == LoadSize) { 878 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) { 879 // Pointer to Pointer -> use bitcast. 880 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 881 } 882 883 // Convert source pointers to integers, which can be bitcast. 884 if (StoredValTy->isPointerTy()) { 885 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 886 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 887 } 888 889 const Type *TypeToCastTo = LoadedTy; 890 if (TypeToCastTo->isPointerTy()) 891 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 892 893 if (StoredValTy != TypeToCastTo) 894 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 895 896 // Cast to pointer if the load needs a pointer type. 897 if (LoadedTy->isPointerTy()) 898 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 899 900 return StoredVal; 901 } 902 903 // If the loaded value is smaller than the available value, then we can 904 // extract out a piece from it. If the available value is too small, then we 905 // can't do anything. 906 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 907 908 // Convert source pointers to integers, which can be manipulated. 909 if (StoredValTy->isPointerTy()) { 910 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 911 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 912 } 913 914 // Convert vectors and fp to integer, which can be manipulated. 915 if (!StoredValTy->isIntegerTy()) { 916 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 917 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 918 } 919 920 // If this is a big-endian system, we need to shift the value down to the low 921 // bits so that a truncate will work. 922 if (TD.isBigEndian()) { 923 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 924 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 925 } 926 927 // Truncate the integer to the right size now. 928 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 929 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 930 931 if (LoadedTy == NewIntTy) 932 return StoredVal; 933 934 // If the result is a pointer, inttoptr. 935 if (LoadedTy->isPointerTy()) 936 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 937 938 // Otherwise, bitcast. 939 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 940} 941 942/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 943/// be expressed as a base pointer plus a constant offset. Return the base and 944/// offset to the caller. 945static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 946 const TargetData &TD) { 947 Operator *PtrOp = dyn_cast<Operator>(Ptr); 948 if (PtrOp == 0) return Ptr; 949 950 // Just look through bitcasts. 951 if (PtrOp->getOpcode() == Instruction::BitCast) 952 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 953 954 // If this is a GEP with constant indices, we can look through it. 955 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 956 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 957 958 gep_type_iterator GTI = gep_type_begin(GEP); 959 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 960 ++I, ++GTI) { 961 ConstantInt *OpC = cast<ConstantInt>(*I); 962 if (OpC->isZero()) continue; 963 964 // Handle a struct and array indices which add their offset to the pointer. 965 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 966 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 967 } else { 968 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 969 Offset += OpC->getSExtValue()*Size; 970 } 971 } 972 973 // Re-sign extend from the pointer size if needed to get overflow edge cases 974 // right. 975 unsigned PtrSize = TD.getPointerSizeInBits(); 976 if (PtrSize < 64) 977 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 978 979 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 980} 981 982 983/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 984/// memdep query of a load that ends up being a clobbering memory write (store, 985/// memset, memcpy, memmove). This means that the write *may* provide bits used 986/// by the load but we can't be sure because the pointers don't mustalias. 987/// 988/// Check this case to see if there is anything more we can do before we give 989/// up. This returns -1 if we have to give up, or a byte number in the stored 990/// value of the piece that feeds the load. 991static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 992 Value *WritePtr, 993 uint64_t WriteSizeInBits, 994 const TargetData &TD) { 995 // If the loaded or stored value is an first class array or struct, don't try 996 // to transform them. We need to be able to bitcast to integer. 997 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 998 return -1; 999 1000 int64_t StoreOffset = 0, LoadOffset = 0; 1001 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1002 Value *LoadBase = 1003 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1004 if (StoreBase != LoadBase) 1005 return -1; 1006 1007 // If the load and store are to the exact same address, they should have been 1008 // a must alias. AA must have gotten confused. 1009 // FIXME: Study to see if/when this happens. One case is forwarding a memset 1010 // to a load from the base of the memset. 1011#if 0 1012 if (LoadOffset == StoreOffset) { 1013 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1014 << "Base = " << *StoreBase << "\n" 1015 << "Store Ptr = " << *WritePtr << "\n" 1016 << "Store Offs = " << StoreOffset << "\n" 1017 << "Load Ptr = " << *LoadPtr << "\n"; 1018 abort(); 1019 } 1020#endif 1021 1022 // If the load and store don't overlap at all, the store doesn't provide 1023 // anything to the load. In this case, they really don't alias at all, AA 1024 // must have gotten confused. 1025 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1026 // remove this check, as it is duplicated with what we have below. 1027 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1028 1029 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1030 return -1; 1031 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1032 LoadSize >>= 3; 1033 1034 1035 bool isAAFailure = false; 1036 if (StoreOffset < LoadOffset) 1037 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1038 else 1039 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1040 1041 if (isAAFailure) { 1042#if 0 1043 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1044 << "Base = " << *StoreBase << "\n" 1045 << "Store Ptr = " << *WritePtr << "\n" 1046 << "Store Offs = " << StoreOffset << "\n" 1047 << "Load Ptr = " << *LoadPtr << "\n"; 1048 abort(); 1049#endif 1050 return -1; 1051 } 1052 1053 // If the Load isn't completely contained within the stored bits, we don't 1054 // have all the bits to feed it. We could do something crazy in the future 1055 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1056 // valuable. 1057 if (StoreOffset > LoadOffset || 1058 StoreOffset+StoreSize < LoadOffset+LoadSize) 1059 return -1; 1060 1061 // Okay, we can do this transformation. Return the number of bytes into the 1062 // store that the load is. 1063 return LoadOffset-StoreOffset; 1064} 1065 1066/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1067/// memdep query of a load that ends up being a clobbering store. 1068static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1069 StoreInst *DepSI, 1070 const TargetData &TD) { 1071 // Cannot handle reading from store of first-class aggregate yet. 1072 if (DepSI->getOperand(0)->getType()->isStructTy() || 1073 DepSI->getOperand(0)->getType()->isArrayTy()) 1074 return -1; 1075 1076 Value *StorePtr = DepSI->getPointerOperand(); 1077 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1078 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1079 StorePtr, StoreSize, TD); 1080} 1081 1082static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1083 MemIntrinsic *MI, 1084 const TargetData &TD) { 1085 // If the mem operation is a non-constant size, we can't handle it. 1086 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1087 if (SizeCst == 0) return -1; 1088 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1089 1090 // If this is memset, we just need to see if the offset is valid in the size 1091 // of the memset.. 1092 if (MI->getIntrinsicID() == Intrinsic::memset) 1093 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1094 MemSizeInBits, TD); 1095 1096 // If we have a memcpy/memmove, the only case we can handle is if this is a 1097 // copy from constant memory. In that case, we can read directly from the 1098 // constant memory. 1099 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1100 1101 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1102 if (Src == 0) return -1; 1103 1104 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1105 if (GV == 0 || !GV->isConstant()) return -1; 1106 1107 // See if the access is within the bounds of the transfer. 1108 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1109 MI->getDest(), MemSizeInBits, TD); 1110 if (Offset == -1) 1111 return Offset; 1112 1113 // Otherwise, see if we can constant fold a load from the constant with the 1114 // offset applied as appropriate. 1115 Src = ConstantExpr::getBitCast(Src, 1116 llvm::Type::getInt8PtrTy(Src->getContext())); 1117 Constant *OffsetCst = 1118 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1119 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1120 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1121 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1122 return Offset; 1123 return -1; 1124} 1125 1126 1127/// GetStoreValueForLoad - This function is called when we have a 1128/// memdep query of a load that ends up being a clobbering store. This means 1129/// that the store *may* provide bits used by the load but we can't be sure 1130/// because the pointers don't mustalias. Check this case to see if there is 1131/// anything more we can do before we give up. 1132static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1133 const Type *LoadTy, 1134 Instruction *InsertPt, const TargetData &TD){ 1135 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1136 1137 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1138 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; 1139 1140 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1141 1142 // Compute which bits of the stored value are being used by the load. Convert 1143 // to an integer type to start with. 1144 if (SrcVal->getType()->isPointerTy()) 1145 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1146 if (!SrcVal->getType()->isIntegerTy()) 1147 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1148 "tmp"); 1149 1150 // Shift the bits to the least significant depending on endianness. 1151 unsigned ShiftAmt; 1152 if (TD.isLittleEndian()) 1153 ShiftAmt = Offset*8; 1154 else 1155 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1156 1157 if (ShiftAmt) 1158 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1159 1160 if (LoadSize != StoreSize) 1161 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1162 "tmp"); 1163 1164 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1165} 1166 1167/// GetMemInstValueForLoad - This function is called when we have a 1168/// memdep query of a load that ends up being a clobbering mem intrinsic. 1169static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1170 const Type *LoadTy, Instruction *InsertPt, 1171 const TargetData &TD){ 1172 LLVMContext &Ctx = LoadTy->getContext(); 1173 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1174 1175 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1176 1177 // We know that this method is only called when the mem transfer fully 1178 // provides the bits for the load. 1179 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1180 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1181 // independently of what the offset is. 1182 Value *Val = MSI->getValue(); 1183 if (LoadSize != 1) 1184 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1185 1186 Value *OneElt = Val; 1187 1188 // Splat the value out to the right number of bits. 1189 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1190 // If we can double the number of bytes set, do it. 1191 if (NumBytesSet*2 <= LoadSize) { 1192 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1193 Val = Builder.CreateOr(Val, ShVal); 1194 NumBytesSet <<= 1; 1195 continue; 1196 } 1197 1198 // Otherwise insert one byte at a time. 1199 Value *ShVal = Builder.CreateShl(Val, 1*8); 1200 Val = Builder.CreateOr(OneElt, ShVal); 1201 ++NumBytesSet; 1202 } 1203 1204 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1205 } 1206 1207 // Otherwise, this is a memcpy/memmove from a constant global. 1208 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1209 Constant *Src = cast<Constant>(MTI->getSource()); 1210 1211 // Otherwise, see if we can constant fold a load from the constant with the 1212 // offset applied as appropriate. 1213 Src = ConstantExpr::getBitCast(Src, 1214 llvm::Type::getInt8PtrTy(Src->getContext())); 1215 Constant *OffsetCst = 1216 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1217 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1218 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1219 return ConstantFoldLoadFromConstPtr(Src, &TD); 1220} 1221 1222namespace { 1223 1224struct AvailableValueInBlock { 1225 /// BB - The basic block in question. 1226 BasicBlock *BB; 1227 enum ValType { 1228 SimpleVal, // A simple offsetted value that is accessed. 1229 MemIntrin // A memory intrinsic which is loaded from. 1230 }; 1231 1232 /// V - The value that is live out of the block. 1233 PointerIntPair<Value *, 1, ValType> Val; 1234 1235 /// Offset - The byte offset in Val that is interesting for the load query. 1236 unsigned Offset; 1237 1238 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1239 unsigned Offset = 0) { 1240 AvailableValueInBlock Res; 1241 Res.BB = BB; 1242 Res.Val.setPointer(V); 1243 Res.Val.setInt(SimpleVal); 1244 Res.Offset = Offset; 1245 return Res; 1246 } 1247 1248 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1249 unsigned Offset = 0) { 1250 AvailableValueInBlock Res; 1251 Res.BB = BB; 1252 Res.Val.setPointer(MI); 1253 Res.Val.setInt(MemIntrin); 1254 Res.Offset = Offset; 1255 return Res; 1256 } 1257 1258 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1259 Value *getSimpleValue() const { 1260 assert(isSimpleValue() && "Wrong accessor"); 1261 return Val.getPointer(); 1262 } 1263 1264 MemIntrinsic *getMemIntrinValue() const { 1265 assert(!isSimpleValue() && "Wrong accessor"); 1266 return cast<MemIntrinsic>(Val.getPointer()); 1267 } 1268 1269 /// MaterializeAdjustedValue - Emit code into this block to adjust the value 1270 /// defined here to the specified type. This handles various coercion cases. 1271 Value *MaterializeAdjustedValue(const Type *LoadTy, 1272 const TargetData *TD) const { 1273 Value *Res; 1274 if (isSimpleValue()) { 1275 Res = getSimpleValue(); 1276 if (Res->getType() != LoadTy) { 1277 assert(TD && "Need target data to handle type mismatch case"); 1278 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), 1279 *TD); 1280 1281 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1282 << *getSimpleValue() << '\n' 1283 << *Res << '\n' << "\n\n\n"); 1284 } 1285 } else { 1286 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, 1287 LoadTy, BB->getTerminator(), *TD); 1288 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1289 << " " << *getMemIntrinValue() << '\n' 1290 << *Res << '\n' << "\n\n\n"); 1291 } 1292 return Res; 1293 } 1294}; 1295 1296} 1297 1298/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1299/// construct SSA form, allowing us to eliminate LI. This returns the value 1300/// that should be used at LI's definition site. 1301static Value *ConstructSSAForLoadSet(LoadInst *LI, 1302 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1303 const TargetData *TD, 1304 const DominatorTree &DT, 1305 AliasAnalysis *AA) { 1306 // Check for the fully redundant, dominating load case. In this case, we can 1307 // just use the dominating value directly. 1308 if (ValuesPerBlock.size() == 1 && 1309 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) 1310 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); 1311 1312 // Otherwise, we have to construct SSA form. 1313 SmallVector<PHINode*, 8> NewPHIs; 1314 SSAUpdater SSAUpdate(&NewPHIs); 1315 SSAUpdate.Initialize(LI); 1316 1317 const Type *LoadTy = LI->getType(); 1318 1319 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1320 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1321 BasicBlock *BB = AV.BB; 1322 1323 if (SSAUpdate.HasValueForBlock(BB)) 1324 continue; 1325 1326 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); 1327 } 1328 1329 // Perform PHI construction. 1330 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1331 1332 // If new PHI nodes were created, notify alias analysis. 1333 if (V->getType()->isPointerTy()) 1334 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1335 AA->copyValue(LI, NewPHIs[i]); 1336 1337 return V; 1338} 1339 1340static bool isLifetimeStart(const Instruction *Inst) { 1341 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1342 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1343 return false; 1344} 1345 1346/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1347/// non-local by performing PHI construction. 1348bool GVN::processNonLocalLoad(LoadInst *LI, 1349 SmallVectorImpl<Instruction*> &toErase) { 1350 // Find the non-local dependencies of the load. 1351 SmallVector<NonLocalDepResult, 64> Deps; 1352 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1353 Deps); 1354 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: " 1355 // << Deps.size() << *LI << '\n'); 1356 1357 // If we had to process more than one hundred blocks to find the 1358 // dependencies, this load isn't worth worrying about. Optimizing 1359 // it will be too expensive. 1360 if (Deps.size() > 100) 1361 return false; 1362 1363 // If we had a phi translation failure, we'll have a single entry which is a 1364 // clobber in the current block. Reject this early. 1365 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1366 DEBUG( 1367 dbgs() << "GVN: non-local load "; 1368 WriteAsOperand(dbgs(), LI); 1369 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1370 ); 1371 return false; 1372 } 1373 1374 // Filter out useless results (non-locals, etc). Keep track of the blocks 1375 // where we have a value available in repl, also keep track of whether we see 1376 // dependencies that produce an unknown value for the load (such as a call 1377 // that could potentially clobber the load). 1378 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1379 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1380 1381 const TargetData *TD = 0; 1382 1383 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1384 BasicBlock *DepBB = Deps[i].getBB(); 1385 MemDepResult DepInfo = Deps[i].getResult(); 1386 1387 if (DepInfo.isClobber()) { 1388 // The address being loaded in this non-local block may not be the same as 1389 // the pointer operand of the load if PHI translation occurs. Make sure 1390 // to consider the right address. 1391 Value *Address = Deps[i].getAddress(); 1392 1393 // If the dependence is to a store that writes to a superset of the bits 1394 // read by the load, we can extract the bits we need for the load from the 1395 // stored value. 1396 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1397 if (TD == 0) 1398 TD = getAnalysisIfAvailable<TargetData>(); 1399 if (TD && Address) { 1400 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1401 DepSI, *TD); 1402 if (Offset != -1) { 1403 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1404 DepSI->getOperand(0), 1405 Offset)); 1406 continue; 1407 } 1408 } 1409 } 1410 1411 // If the clobbering value is a memset/memcpy/memmove, see if we can 1412 // forward a value on from it. 1413 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1414 if (TD == 0) 1415 TD = getAnalysisIfAvailable<TargetData>(); 1416 if (TD && Address) { 1417 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1418 DepMI, *TD); 1419 if (Offset != -1) { 1420 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1421 Offset)); 1422 continue; 1423 } 1424 } 1425 } 1426 1427 UnavailableBlocks.push_back(DepBB); 1428 continue; 1429 } 1430 1431 Instruction *DepInst = DepInfo.getInst(); 1432 1433 // Loading the allocation -> undef. 1434 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1435 // Loading immediately after lifetime begin -> undef. 1436 isLifetimeStart(DepInst)) { 1437 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1438 UndefValue::get(LI->getType()))); 1439 continue; 1440 } 1441 1442 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1443 // Reject loads and stores that are to the same address but are of 1444 // different types if we have to. 1445 if (S->getOperand(0)->getType() != LI->getType()) { 1446 if (TD == 0) 1447 TD = getAnalysisIfAvailable<TargetData>(); 1448 1449 // If the stored value is larger or equal to the loaded value, we can 1450 // reuse it. 1451 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1452 LI->getType(), *TD)) { 1453 UnavailableBlocks.push_back(DepBB); 1454 continue; 1455 } 1456 } 1457 1458 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1459 S->getOperand(0))); 1460 continue; 1461 } 1462 1463 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1464 // If the types mismatch and we can't handle it, reject reuse of the load. 1465 if (LD->getType() != LI->getType()) { 1466 if (TD == 0) 1467 TD = getAnalysisIfAvailable<TargetData>(); 1468 1469 // If the stored value is larger or equal to the loaded value, we can 1470 // reuse it. 1471 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1472 UnavailableBlocks.push_back(DepBB); 1473 continue; 1474 } 1475 } 1476 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1477 continue; 1478 } 1479 1480 UnavailableBlocks.push_back(DepBB); 1481 continue; 1482 } 1483 1484 // If we have no predecessors that produce a known value for this load, exit 1485 // early. 1486 if (ValuesPerBlock.empty()) return false; 1487 1488 // If all of the instructions we depend on produce a known value for this 1489 // load, then it is fully redundant and we can use PHI insertion to compute 1490 // its value. Insert PHIs and remove the fully redundant value now. 1491 if (UnavailableBlocks.empty()) { 1492 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1493 1494 // Perform PHI construction. 1495 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1496 VN.getAliasAnalysis()); 1497 LI->replaceAllUsesWith(V); 1498 1499 if (isa<PHINode>(V)) 1500 V->takeName(LI); 1501 if (V->getType()->isPointerTy()) 1502 MD->invalidateCachedPointerInfo(V); 1503 VN.erase(LI); 1504 toErase.push_back(LI); 1505 ++NumGVNLoad; 1506 return true; 1507 } 1508 1509 if (!EnablePRE || !EnableLoadPRE) 1510 return false; 1511 1512 // Okay, we have *some* definitions of the value. This means that the value 1513 // is available in some of our (transitive) predecessors. Lets think about 1514 // doing PRE of this load. This will involve inserting a new load into the 1515 // predecessor when it's not available. We could do this in general, but 1516 // prefer to not increase code size. As such, we only do this when we know 1517 // that we only have to insert *one* load (which means we're basically moving 1518 // the load, not inserting a new one). 1519 1520 SmallPtrSet<BasicBlock *, 4> Blockers; 1521 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1522 Blockers.insert(UnavailableBlocks[i]); 1523 1524 // Lets find first basic block with more than one predecessor. Walk backwards 1525 // through predecessors if needed. 1526 BasicBlock *LoadBB = LI->getParent(); 1527 BasicBlock *TmpBB = LoadBB; 1528 1529 bool isSinglePred = false; 1530 bool allSingleSucc = true; 1531 while (TmpBB->getSinglePredecessor()) { 1532 isSinglePred = true; 1533 TmpBB = TmpBB->getSinglePredecessor(); 1534 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1535 return false; 1536 if (Blockers.count(TmpBB)) 1537 return false; 1538 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1539 allSingleSucc = false; 1540 } 1541 1542 assert(TmpBB); 1543 LoadBB = TmpBB; 1544 1545 // If we have a repl set with LI itself in it, this means we have a loop where 1546 // at least one of the values is LI. Since this means that we won't be able 1547 // to eliminate LI even if we insert uses in the other predecessors, we will 1548 // end up increasing code size. Reject this by scanning for LI. 1549 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1550 if (ValuesPerBlock[i].isSimpleValue() && 1551 ValuesPerBlock[i].getSimpleValue() == LI) { 1552 // Skip cases where LI is the only definition, even for EnableFullLoadPRE. 1553 if (!EnableFullLoadPRE || e == 1) 1554 return false; 1555 } 1556 } 1557 1558 // FIXME: It is extremely unclear what this loop is doing, other than 1559 // artificially restricting loadpre. 1560 if (isSinglePred) { 1561 bool isHot = false; 1562 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1563 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1564 if (AV.isSimpleValue()) 1565 // "Hot" Instruction is in some loop (because it dominates its dep. 1566 // instruction). 1567 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1568 if (DT->dominates(LI, I)) { 1569 isHot = true; 1570 break; 1571 } 1572 } 1573 1574 // We are interested only in "hot" instructions. We don't want to do any 1575 // mis-optimizations here. 1576 if (!isHot) 1577 return false; 1578 } 1579 1580 // Check to see how many predecessors have the loaded value fully 1581 // available. 1582 DenseMap<BasicBlock*, Value*> PredLoads; 1583 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1584 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1585 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1586 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1587 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1588 1589 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit; 1590 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1591 PI != E; ++PI) { 1592 BasicBlock *Pred = *PI; 1593 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1594 continue; 1595 } 1596 PredLoads[Pred] = 0; 1597 1598 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1599 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1600 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1601 << Pred->getName() << "': " << *LI << '\n'); 1602 return false; 1603 } 1604 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB); 1605 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum)); 1606 } 1607 } 1608 if (!NeedToSplit.empty()) { 1609 toSplit.append(NeedToSplit.begin(), NeedToSplit.end()); 1610 return false; 1611 } 1612 1613 // Decide whether PRE is profitable for this load. 1614 unsigned NumUnavailablePreds = PredLoads.size(); 1615 assert(NumUnavailablePreds != 0 && 1616 "Fully available value should be eliminated above!"); 1617 if (!EnableFullLoadPRE) { 1618 // If this load is unavailable in multiple predecessors, reject it. 1619 // FIXME: If we could restructure the CFG, we could make a common pred with 1620 // all the preds that don't have an available LI and insert a new load into 1621 // that one block. 1622 if (NumUnavailablePreds != 1) 1623 return false; 1624 } 1625 1626 // Check if the load can safely be moved to all the unavailable predecessors. 1627 bool CanDoPRE = true; 1628 SmallVector<Instruction*, 8> NewInsts; 1629 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1630 E = PredLoads.end(); I != E; ++I) { 1631 BasicBlock *UnavailablePred = I->first; 1632 1633 // Do PHI translation to get its value in the predecessor if necessary. The 1634 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1635 1636 // If all preds have a single successor, then we know it is safe to insert 1637 // the load on the pred (?!?), so we can insert code to materialize the 1638 // pointer if it is not available. 1639 PHITransAddr Address(LI->getOperand(0), TD); 1640 Value *LoadPtr = 0; 1641 if (allSingleSucc) { 1642 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1643 *DT, NewInsts); 1644 } else { 1645 Address.PHITranslateValue(LoadBB, UnavailablePred, DT); 1646 LoadPtr = Address.getAddr(); 1647 } 1648 1649 // If we couldn't find or insert a computation of this phi translated value, 1650 // we fail PRE. 1651 if (LoadPtr == 0) { 1652 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1653 << *LI->getOperand(0) << "\n"); 1654 CanDoPRE = false; 1655 break; 1656 } 1657 1658 // Make sure it is valid to move this load here. We have to watch out for: 1659 // @1 = getelementptr (i8* p, ... 1660 // test p and branch if == 0 1661 // load @1 1662 // It is valid to have the getelementptr before the test, even if p can be 0, 1663 // as getelementptr only does address arithmetic. 1664 // If we are not pushing the value through any multiple-successor blocks 1665 // we do not have this case. Otherwise, check that the load is safe to 1666 // put anywhere; this can be improved, but should be conservatively safe. 1667 if (!allSingleSucc && 1668 // FIXME: REEVALUTE THIS. 1669 !isSafeToLoadUnconditionally(LoadPtr, 1670 UnavailablePred->getTerminator(), 1671 LI->getAlignment(), TD)) { 1672 CanDoPRE = false; 1673 break; 1674 } 1675 1676 I->second = LoadPtr; 1677 } 1678 1679 if (!CanDoPRE) { 1680 while (!NewInsts.empty()) 1681 NewInsts.pop_back_val()->eraseFromParent(); 1682 return false; 1683 } 1684 1685 // Okay, we can eliminate this load by inserting a reload in the predecessor 1686 // and using PHI construction to get the value in the other predecessors, do 1687 // it. 1688 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1689 DEBUG(if (!NewInsts.empty()) 1690 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1691 << *NewInsts.back() << '\n'); 1692 1693 // Assign value numbers to the new instructions. 1694 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1695 // FIXME: We really _ought_ to insert these value numbers into their 1696 // parent's availability map. However, in doing so, we risk getting into 1697 // ordering issues. If a block hasn't been processed yet, we would be 1698 // marking a value as AVAIL-IN, which isn't what we intend. 1699 VN.lookup_or_add(NewInsts[i]); 1700 } 1701 1702 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(), 1703 E = PredLoads.end(); I != E; ++I) { 1704 BasicBlock *UnavailablePred = I->first; 1705 Value *LoadPtr = I->second; 1706 1707 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1708 LI->getAlignment(), 1709 UnavailablePred->getTerminator()); 1710 1711 // Add the newly created load. 1712 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1713 NewLoad)); 1714 MD->invalidateCachedPointerInfo(LoadPtr); 1715 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1716 } 1717 1718 // Perform PHI construction. 1719 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, 1720 VN.getAliasAnalysis()); 1721 LI->replaceAllUsesWith(V); 1722 if (isa<PHINode>(V)) 1723 V->takeName(LI); 1724 if (V->getType()->isPointerTy()) 1725 MD->invalidateCachedPointerInfo(V); 1726 VN.erase(LI); 1727 toErase.push_back(LI); 1728 ++NumPRELoad; 1729 return true; 1730} 1731 1732/// processLoad - Attempt to eliminate a load, first by eliminating it 1733/// locally, and then attempting non-local elimination if that fails. 1734bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1735 if (!MD) 1736 return false; 1737 1738 if (L->isVolatile()) 1739 return false; 1740 1741 // ... to a pointer that has been loaded from before... 1742 MemDepResult Dep = MD->getDependency(L); 1743 1744 // If the value isn't available, don't do anything! 1745 if (Dep.isClobber()) { 1746 // Check to see if we have something like this: 1747 // store i32 123, i32* %P 1748 // %A = bitcast i32* %P to i8* 1749 // %B = gep i8* %A, i32 1 1750 // %C = load i8* %B 1751 // 1752 // We could do that by recognizing if the clobber instructions are obviously 1753 // a common base + constant offset, and if the previous store (or memset) 1754 // completely covers this load. This sort of thing can happen in bitfield 1755 // access code. 1756 Value *AvailVal = 0; 1757 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1758 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1759 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1760 L->getPointerOperand(), 1761 DepSI, *TD); 1762 if (Offset != -1) 1763 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1764 L->getType(), L, *TD); 1765 } 1766 1767 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1768 // a value on from it. 1769 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1770 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1771 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1772 L->getPointerOperand(), 1773 DepMI, *TD); 1774 if (Offset != -1) 1775 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1776 } 1777 } 1778 1779 if (AvailVal) { 1780 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1781 << *AvailVal << '\n' << *L << "\n\n\n"); 1782 1783 // Replace the load! 1784 L->replaceAllUsesWith(AvailVal); 1785 if (AvailVal->getType()->isPointerTy()) 1786 MD->invalidateCachedPointerInfo(AvailVal); 1787 VN.erase(L); 1788 toErase.push_back(L); 1789 ++NumGVNLoad; 1790 return true; 1791 } 1792 1793 DEBUG( 1794 // fast print dep, using operator<< on instruction would be too slow 1795 dbgs() << "GVN: load "; 1796 WriteAsOperand(dbgs(), L); 1797 Instruction *I = Dep.getInst(); 1798 dbgs() << " is clobbered by " << *I << '\n'; 1799 ); 1800 return false; 1801 } 1802 1803 // If it is defined in another block, try harder. 1804 if (Dep.isNonLocal()) 1805 return processNonLocalLoad(L, toErase); 1806 1807 Instruction *DepInst = Dep.getInst(); 1808 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1809 Value *StoredVal = DepSI->getOperand(0); 1810 1811 // The store and load are to a must-aliased pointer, but they may not 1812 // actually have the same type. See if we know how to reuse the stored 1813 // value (depending on its type). 1814 const TargetData *TD = 0; 1815 if (StoredVal->getType() != L->getType()) { 1816 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1817 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1818 L, *TD); 1819 if (StoredVal == 0) 1820 return false; 1821 1822 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1823 << '\n' << *L << "\n\n\n"); 1824 } 1825 else 1826 return false; 1827 } 1828 1829 // Remove it! 1830 L->replaceAllUsesWith(StoredVal); 1831 if (StoredVal->getType()->isPointerTy()) 1832 MD->invalidateCachedPointerInfo(StoredVal); 1833 VN.erase(L); 1834 toErase.push_back(L); 1835 ++NumGVNLoad; 1836 return true; 1837 } 1838 1839 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1840 Value *AvailableVal = DepLI; 1841 1842 // The loads are of a must-aliased pointer, but they may not actually have 1843 // the same type. See if we know how to reuse the previously loaded value 1844 // (depending on its type). 1845 const TargetData *TD = 0; 1846 if (DepLI->getType() != L->getType()) { 1847 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1848 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1849 if (AvailableVal == 0) 1850 return false; 1851 1852 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1853 << "\n" << *L << "\n\n\n"); 1854 } 1855 else 1856 return false; 1857 } 1858 1859 // Remove it! 1860 L->replaceAllUsesWith(AvailableVal); 1861 if (DepLI->getType()->isPointerTy()) 1862 MD->invalidateCachedPointerInfo(DepLI); 1863 VN.erase(L); 1864 toErase.push_back(L); 1865 ++NumGVNLoad; 1866 return true; 1867 } 1868 1869 // If this load really doesn't depend on anything, then we must be loading an 1870 // undef value. This can happen when loading for a fresh allocation with no 1871 // intervening stores, for example. 1872 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1873 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1874 VN.erase(L); 1875 toErase.push_back(L); 1876 ++NumGVNLoad; 1877 return true; 1878 } 1879 1880 // If this load occurs either right after a lifetime begin, 1881 // then the loaded value is undefined. 1882 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1883 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1884 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1885 VN.erase(L); 1886 toErase.push_back(L); 1887 ++NumGVNLoad; 1888 return true; 1889 } 1890 } 1891 1892 return false; 1893} 1894 1895Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1896 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1897 if (I == localAvail.end()) 1898 return 0; 1899 1900 ValueNumberScope *Locals = I->second; 1901 while (Locals) { 1902 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1903 if (I != Locals->table.end()) 1904 return I->second; 1905 Locals = Locals->parent; 1906 } 1907 1908 return 0; 1909} 1910 1911 1912/// processInstruction - When calculating availability, handle an instruction 1913/// by inserting it into the appropriate sets 1914bool GVN::processInstruction(Instruction *I, 1915 SmallVectorImpl<Instruction*> &toErase) { 1916 // Ignore dbg info intrinsics. 1917 if (isa<DbgInfoIntrinsic>(I)) 1918 return false; 1919 1920 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1921 bool Changed = processLoad(LI, toErase); 1922 1923 if (!Changed) { 1924 unsigned Num = VN.lookup_or_add(LI); 1925 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1926 } 1927 1928 return Changed; 1929 } 1930 1931 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1932 unsigned Num = VN.lookup_or_add(I); 1933 1934 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1935 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1936 1937 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1938 return false; 1939 1940 Value *BranchCond = BI->getCondition(); 1941 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1942 1943 BasicBlock *TrueSucc = BI->getSuccessor(0); 1944 BasicBlock *FalseSucc = BI->getSuccessor(1); 1945 1946 if (TrueSucc->getSinglePredecessor()) 1947 localAvail[TrueSucc]->table[CondVN] = 1948 ConstantInt::getTrue(TrueSucc->getContext()); 1949 if (FalseSucc->getSinglePredecessor()) 1950 localAvail[FalseSucc]->table[CondVN] = 1951 ConstantInt::getFalse(TrueSucc->getContext()); 1952 1953 return false; 1954 1955 // Allocations are always uniquely numbered, so we can save time and memory 1956 // by fast failing them. 1957 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1958 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1959 return false; 1960 } 1961 1962 // Collapse PHI nodes 1963 if (PHINode* p = dyn_cast<PHINode>(I)) { 1964 Value *constVal = CollapsePhi(p); 1965 1966 if (constVal) { 1967 p->replaceAllUsesWith(constVal); 1968 if (MD && constVal->getType()->isPointerTy()) 1969 MD->invalidateCachedPointerInfo(constVal); 1970 VN.erase(p); 1971 1972 toErase.push_back(p); 1973 } else { 1974 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1975 } 1976 1977 // If the number we were assigned was a brand new VN, then we don't 1978 // need to do a lookup to see if the number already exists 1979 // somewhere in the domtree: it can't! 1980 } else if (Num == NextNum) { 1981 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1982 1983 // Perform fast-path value-number based elimination of values inherited from 1984 // dominators. 1985 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1986 // Remove it! 1987 VN.erase(I); 1988 I->replaceAllUsesWith(repl); 1989 if (MD && repl->getType()->isPointerTy()) 1990 MD->invalidateCachedPointerInfo(repl); 1991 toErase.push_back(I); 1992 return true; 1993 1994 } else { 1995 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1996 } 1997 1998 return false; 1999} 2000 2001/// runOnFunction - This is the main transformation entry point for a function. 2002bool GVN::runOnFunction(Function& F) { 2003 if (!NoLoads) 2004 MD = &getAnalysis<MemoryDependenceAnalysis>(); 2005 DT = &getAnalysis<DominatorTree>(); 2006 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 2007 VN.setMemDep(MD); 2008 VN.setDomTree(DT); 2009 2010 bool Changed = false; 2011 bool ShouldContinue = true; 2012 2013 // Merge unconditional branches, allowing PRE to catch more 2014 // optimization opportunities. 2015 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2016 BasicBlock *BB = FI; 2017 ++FI; 2018 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 2019 if (removedBlock) ++NumGVNBlocks; 2020 2021 Changed |= removedBlock; 2022 } 2023 2024 unsigned Iteration = 0; 2025 2026 while (ShouldContinue) { 2027 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2028 ShouldContinue = iterateOnFunction(F); 2029 if (splitCriticalEdges()) 2030 ShouldContinue = true; 2031 Changed |= ShouldContinue; 2032 ++Iteration; 2033 } 2034 2035 if (EnablePRE) { 2036 bool PREChanged = true; 2037 while (PREChanged) { 2038 PREChanged = performPRE(F); 2039 Changed |= PREChanged; 2040 } 2041 } 2042 // FIXME: Should perform GVN again after PRE does something. PRE can move 2043 // computations into blocks where they become fully redundant. Note that 2044 // we can't do this until PRE's critical edge splitting updates memdep. 2045 // Actually, when this happens, we should just fully integrate PRE into GVN. 2046 2047 cleanupGlobalSets(); 2048 2049 return Changed; 2050} 2051 2052 2053bool GVN::processBlock(BasicBlock *BB) { 2054 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2055 // incrementing BI before processing an instruction). 2056 SmallVector<Instruction*, 8> toErase; 2057 bool ChangedFunction = false; 2058 2059 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2060 BI != BE;) { 2061 ChangedFunction |= processInstruction(BI, toErase); 2062 if (toErase.empty()) { 2063 ++BI; 2064 continue; 2065 } 2066 2067 // If we need some instructions deleted, do it now. 2068 NumGVNInstr += toErase.size(); 2069 2070 // Avoid iterator invalidation. 2071 bool AtStart = BI == BB->begin(); 2072 if (!AtStart) 2073 --BI; 2074 2075 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2076 E = toErase.end(); I != E; ++I) { 2077 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2078 if (MD) MD->removeInstruction(*I); 2079 (*I)->eraseFromParent(); 2080 DEBUG(verifyRemoved(*I)); 2081 } 2082 toErase.clear(); 2083 2084 if (AtStart) 2085 BI = BB->begin(); 2086 else 2087 ++BI; 2088 } 2089 2090 return ChangedFunction; 2091} 2092 2093/// performPRE - Perform a purely local form of PRE that looks for diamond 2094/// control flow patterns and attempts to perform simple PRE at the join point. 2095bool GVN::performPRE(Function &F) { 2096 bool Changed = false; 2097 DenseMap<BasicBlock*, Value*> predMap; 2098 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2099 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2100 BasicBlock *CurrentBlock = *DI; 2101 2102 // Nothing to PRE in the entry block. 2103 if (CurrentBlock == &F.getEntryBlock()) continue; 2104 2105 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2106 BE = CurrentBlock->end(); BI != BE; ) { 2107 Instruction *CurInst = BI++; 2108 2109 if (isa<AllocaInst>(CurInst) || 2110 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2111 CurInst->getType()->isVoidTy() || 2112 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2113 isa<DbgInfoIntrinsic>(CurInst)) 2114 continue; 2115 2116 uint32_t ValNo = VN.lookup(CurInst); 2117 2118 // Look for the predecessors for PRE opportunities. We're 2119 // only trying to solve the basic diamond case, where 2120 // a value is computed in the successor and one predecessor, 2121 // but not the other. We also explicitly disallow cases 2122 // where the successor is its own predecessor, because they're 2123 // more complicated to get right. 2124 unsigned NumWith = 0; 2125 unsigned NumWithout = 0; 2126 BasicBlock *PREPred = 0; 2127 predMap.clear(); 2128 2129 for (pred_iterator PI = pred_begin(CurrentBlock), 2130 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2131 // We're not interested in PRE where the block is its 2132 // own predecessor, or in blocks with predecessors 2133 // that are not reachable. 2134 if (*PI == CurrentBlock) { 2135 NumWithout = 2; 2136 break; 2137 } else if (!localAvail.count(*PI)) { 2138 NumWithout = 2; 2139 break; 2140 } 2141 2142 DenseMap<uint32_t, Value*>::iterator predV = 2143 localAvail[*PI]->table.find(ValNo); 2144 if (predV == localAvail[*PI]->table.end()) { 2145 PREPred = *PI; 2146 ++NumWithout; 2147 } else if (predV->second == CurInst) { 2148 NumWithout = 2; 2149 } else { 2150 predMap[*PI] = predV->second; 2151 ++NumWith; 2152 } 2153 } 2154 2155 // Don't do PRE when it might increase code size, i.e. when 2156 // we would need to insert instructions in more than one pred. 2157 if (NumWithout != 1 || NumWith == 0) 2158 continue; 2159 2160 // Don't do PRE across indirect branch. 2161 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2162 continue; 2163 2164 // We can't do PRE safely on a critical edge, so instead we schedule 2165 // the edge to be split and perform the PRE the next time we iterate 2166 // on the function. 2167 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2168 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2169 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2170 continue; 2171 } 2172 2173 // Instantiate the expression in the predecessor that lacked it. 2174 // Because we are going top-down through the block, all value numbers 2175 // will be available in the predecessor by the time we need them. Any 2176 // that weren't originally present will have been instantiated earlier 2177 // in this loop. 2178 Instruction *PREInstr = CurInst->clone(); 2179 bool success = true; 2180 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2181 Value *Op = PREInstr->getOperand(i); 2182 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2183 continue; 2184 2185 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2186 PREInstr->setOperand(i, V); 2187 } else { 2188 success = false; 2189 break; 2190 } 2191 } 2192 2193 // Fail out if we encounter an operand that is not available in 2194 // the PRE predecessor. This is typically because of loads which 2195 // are not value numbered precisely. 2196 if (!success) { 2197 delete PREInstr; 2198 DEBUG(verifyRemoved(PREInstr)); 2199 continue; 2200 } 2201 2202 PREInstr->insertBefore(PREPred->getTerminator()); 2203 PREInstr->setName(CurInst->getName() + ".pre"); 2204 predMap[PREPred] = PREInstr; 2205 VN.add(PREInstr, ValNo); 2206 ++NumGVNPRE; 2207 2208 // Update the availability map to include the new instruction. 2209 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2210 2211 // Create a PHI to make the value available in this block. 2212 PHINode* Phi = PHINode::Create(CurInst->getType(), 2213 CurInst->getName() + ".pre-phi", 2214 CurrentBlock->begin()); 2215 for (pred_iterator PI = pred_begin(CurrentBlock), 2216 PE = pred_end(CurrentBlock); PI != PE; ++PI) 2217 Phi->addIncoming(predMap[*PI], *PI); 2218 2219 VN.add(Phi, ValNo); 2220 localAvail[CurrentBlock]->table[ValNo] = Phi; 2221 2222 CurInst->replaceAllUsesWith(Phi); 2223 if (MD && Phi->getType()->isPointerTy()) 2224 MD->invalidateCachedPointerInfo(Phi); 2225 VN.erase(CurInst); 2226 2227 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2228 if (MD) MD->removeInstruction(CurInst); 2229 CurInst->eraseFromParent(); 2230 DEBUG(verifyRemoved(CurInst)); 2231 Changed = true; 2232 } 2233 } 2234 2235 if (splitCriticalEdges()) 2236 Changed = true; 2237 2238 return Changed; 2239} 2240 2241/// splitCriticalEdges - Split critical edges found during the previous 2242/// iteration that may enable further optimization. 2243bool GVN::splitCriticalEdges() { 2244 if (toSplit.empty()) 2245 return false; 2246 do { 2247 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2248 SplitCriticalEdge(Edge.first, Edge.second, this); 2249 } while (!toSplit.empty()); 2250 if (MD) MD->invalidateCachedPredecessors(); 2251 return true; 2252} 2253 2254/// iterateOnFunction - Executes one iteration of GVN 2255bool GVN::iterateOnFunction(Function &F) { 2256 cleanupGlobalSets(); 2257 2258 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2259 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2260 if (DI->getIDom()) 2261 localAvail[DI->getBlock()] = 2262 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2263 else 2264 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2265 } 2266 2267 // Top-down walk of the dominator tree 2268 bool Changed = false; 2269#if 0 2270 // Needed for value numbering with phi construction to work. 2271 ReversePostOrderTraversal<Function*> RPOT(&F); 2272 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2273 RE = RPOT.end(); RI != RE; ++RI) 2274 Changed |= processBlock(*RI); 2275#else 2276 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2277 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2278 Changed |= processBlock(DI->getBlock()); 2279#endif 2280 2281 return Changed; 2282} 2283 2284void GVN::cleanupGlobalSets() { 2285 VN.clear(); 2286 2287 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2288 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2289 delete I->second; 2290 localAvail.clear(); 2291} 2292 2293/// verifyRemoved - Verify that the specified instruction does not occur in our 2294/// internal data structures. 2295void GVN::verifyRemoved(const Instruction *Inst) const { 2296 VN.verifyRemoved(Inst); 2297 2298 // Walk through the value number scope to make sure the instruction isn't 2299 // ferreted away in it. 2300 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2301 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2302 const ValueNumberScope *VNS = I->second; 2303 2304 while (VNS) { 2305 for (DenseMap<uint32_t, Value*>::const_iterator 2306 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2307 assert(II->second != Inst && "Inst still in value numbering scope!"); 2308 } 2309 2310 VNS = VNS->parent; 2311 } 2312 } 2313} 2314