GVN.cpp revision 03f17da4dade799132a3224f194779d342a96722
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/IntrinsicInst.h" 25#include "llvm/LLVMContext.h" 26#include "llvm/Operator.h" 27#include "llvm/Value.h" 28#include "llvm/ADT/DenseMap.h" 29#include "llvm/ADT/DepthFirstIterator.h" 30#include "llvm/ADT/PostOrderIterator.h" 31#include "llvm/ADT/SmallPtrSet.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/Analysis/AliasAnalysis.h" 35#include "llvm/Analysis/ConstantFolding.h" 36#include "llvm/Analysis/Dominators.h" 37#include "llvm/Analysis/MemoryBuiltins.h" 38#include "llvm/Analysis/MemoryDependenceAnalysis.h" 39#include "llvm/Analysis/PHITransAddr.h" 40#include "llvm/Support/CFG.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Support/IRBuilder.h" 46#include "llvm/Support/raw_ostream.h" 47#include "llvm/Target/TargetData.h" 48#include "llvm/Transforms/Utils/BasicBlockUtils.h" 49#include "llvm/Transforms/Utils/Local.h" 50#include "llvm/Transforms/Utils/SSAUpdater.h" 51#include <cstdio> 52using namespace llvm; 53 54STATISTIC(NumGVNInstr, "Number of instructions deleted"); 55STATISTIC(NumGVNLoad, "Number of loads deleted"); 56STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 57STATISTIC(NumGVNBlocks, "Number of blocks merged"); 58STATISTIC(NumPRELoad, "Number of loads PRE'd"); 59 60static cl::opt<bool> EnablePRE("enable-pre", 61 cl::init(true), cl::Hidden); 62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 63 64//===----------------------------------------------------------------------===// 65// ValueTable Class 66//===----------------------------------------------------------------------===// 67 68/// This class holds the mapping between values and value numbers. It is used 69/// as an efficient mechanism to determine the expression-wise equivalence of 70/// two values. 71namespace { 72 struct Expression { 73 enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL, 74 UDIV, SDIV, FDIV, UREM, SREM, 75 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ, 76 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 77 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 78 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 79 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 80 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 81 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI, 82 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT, 83 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT, 84 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 85 86 ExpressionOpcode opcode; 87 const Type* type; 88 SmallVector<uint32_t, 4> varargs; 89 Value *function; 90 91 Expression() { } 92 Expression(ExpressionOpcode o) : opcode(o) { } 93 94 bool operator==(const Expression &other) const { 95 if (opcode != other.opcode) 96 return false; 97 else if (opcode == EMPTY || opcode == TOMBSTONE) 98 return true; 99 else if (type != other.type) 100 return false; 101 else if (function != other.function) 102 return false; 103 else { 104 if (varargs.size() != other.varargs.size()) 105 return false; 106 107 for (size_t i = 0; i < varargs.size(); ++i) 108 if (varargs[i] != other.varargs[i]) 109 return false; 110 111 return true; 112 } 113 } 114 115 bool operator!=(const Expression &other) const { 116 return !(*this == other); 117 } 118 }; 119 120 class ValueTable { 121 private: 122 DenseMap<Value*, uint32_t> valueNumbering; 123 DenseMap<Expression, uint32_t> expressionNumbering; 124 AliasAnalysis* AA; 125 MemoryDependenceAnalysis* MD; 126 DominatorTree* DT; 127 128 uint32_t nextValueNumber; 129 130 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO); 131 Expression::ExpressionOpcode getOpcode(CmpInst* C); 132 Expression::ExpressionOpcode getOpcode(CastInst* C); 133 Expression create_expression(BinaryOperator* BO); 134 Expression create_expression(CmpInst* C); 135 Expression create_expression(ShuffleVectorInst* V); 136 Expression create_expression(ExtractElementInst* C); 137 Expression create_expression(InsertElementInst* V); 138 Expression create_expression(SelectInst* V); 139 Expression create_expression(CastInst* C); 140 Expression create_expression(GetElementPtrInst* G); 141 Expression create_expression(CallInst* C); 142 Expression create_expression(Constant* C); 143 Expression create_expression(ExtractValueInst* C); 144 Expression create_expression(InsertValueInst* C); 145 146 uint32_t lookup_or_add_call(CallInst* C); 147 public: 148 ValueTable() : nextValueNumber(1) { } 149 uint32_t lookup_or_add(Value *V); 150 uint32_t lookup(Value *V) const; 151 void add(Value *V, uint32_t num); 152 void clear(); 153 void erase(Value *v); 154 unsigned size(); 155 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 156 AliasAnalysis *getAliasAnalysis() const { return AA; } 157 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 158 void setDomTree(DominatorTree* D) { DT = D; } 159 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 160 void verifyRemoved(const Value *) const; 161 }; 162} 163 164namespace llvm { 165template <> struct DenseMapInfo<Expression> { 166 static inline Expression getEmptyKey() { 167 return Expression(Expression::EMPTY); 168 } 169 170 static inline Expression getTombstoneKey() { 171 return Expression(Expression::TOMBSTONE); 172 } 173 174 static unsigned getHashValue(const Expression e) { 175 unsigned hash = e.opcode; 176 177 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 178 (unsigned)((uintptr_t)e.type >> 9)); 179 180 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 181 E = e.varargs.end(); I != E; ++I) 182 hash = *I + hash * 37; 183 184 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 185 (unsigned)((uintptr_t)e.function >> 9)) + 186 hash * 37; 187 188 return hash; 189 } 190 static bool isEqual(const Expression &LHS, const Expression &RHS) { 191 return LHS == RHS; 192 } 193 static bool isPod() { return true; } 194}; 195} 196 197//===----------------------------------------------------------------------===// 198// ValueTable Internal Functions 199//===----------------------------------------------------------------------===// 200Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) { 201 switch(BO->getOpcode()) { 202 default: // THIS SHOULD NEVER HAPPEN 203 llvm_unreachable("Binary operator with unknown opcode?"); 204 case Instruction::Add: return Expression::ADD; 205 case Instruction::FAdd: return Expression::FADD; 206 case Instruction::Sub: return Expression::SUB; 207 case Instruction::FSub: return Expression::FSUB; 208 case Instruction::Mul: return Expression::MUL; 209 case Instruction::FMul: return Expression::FMUL; 210 case Instruction::UDiv: return Expression::UDIV; 211 case Instruction::SDiv: return Expression::SDIV; 212 case Instruction::FDiv: return Expression::FDIV; 213 case Instruction::URem: return Expression::UREM; 214 case Instruction::SRem: return Expression::SREM; 215 case Instruction::FRem: return Expression::FREM; 216 case Instruction::Shl: return Expression::SHL; 217 case Instruction::LShr: return Expression::LSHR; 218 case Instruction::AShr: return Expression::ASHR; 219 case Instruction::And: return Expression::AND; 220 case Instruction::Or: return Expression::OR; 221 case Instruction::Xor: return Expression::XOR; 222 } 223} 224 225Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 226 if (isa<ICmpInst>(C)) { 227 switch (C->getPredicate()) { 228 default: // THIS SHOULD NEVER HAPPEN 229 llvm_unreachable("Comparison with unknown predicate?"); 230 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 231 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 232 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 233 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 234 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 235 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 236 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 237 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 238 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 239 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 240 } 241 } else { 242 switch (C->getPredicate()) { 243 default: // THIS SHOULD NEVER HAPPEN 244 llvm_unreachable("Comparison with unknown predicate?"); 245 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 246 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 247 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 248 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 249 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 250 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 251 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 252 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 253 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 254 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 255 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 256 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 257 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 258 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 259 } 260 } 261} 262 263Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) { 264 switch(C->getOpcode()) { 265 default: // THIS SHOULD NEVER HAPPEN 266 llvm_unreachable("Cast operator with unknown opcode?"); 267 case Instruction::Trunc: return Expression::TRUNC; 268 case Instruction::ZExt: return Expression::ZEXT; 269 case Instruction::SExt: return Expression::SEXT; 270 case Instruction::FPToUI: return Expression::FPTOUI; 271 case Instruction::FPToSI: return Expression::FPTOSI; 272 case Instruction::UIToFP: return Expression::UITOFP; 273 case Instruction::SIToFP: return Expression::SITOFP; 274 case Instruction::FPTrunc: return Expression::FPTRUNC; 275 case Instruction::FPExt: return Expression::FPEXT; 276 case Instruction::PtrToInt: return Expression::PTRTOINT; 277 case Instruction::IntToPtr: return Expression::INTTOPTR; 278 case Instruction::BitCast: return Expression::BITCAST; 279 } 280} 281 282Expression ValueTable::create_expression(CallInst* C) { 283 Expression e; 284 285 e.type = C->getType(); 286 e.function = C->getCalledFunction(); 287 e.opcode = Expression::CALL; 288 289 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); 290 I != E; ++I) 291 e.varargs.push_back(lookup_or_add(*I)); 292 293 return e; 294} 295 296Expression ValueTable::create_expression(BinaryOperator* BO) { 297 Expression e; 298 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 299 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 300 e.function = 0; 301 e.type = BO->getType(); 302 e.opcode = getOpcode(BO); 303 304 return e; 305} 306 307Expression ValueTable::create_expression(CmpInst* C) { 308 Expression e; 309 310 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 311 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 312 e.function = 0; 313 e.type = C->getType(); 314 e.opcode = getOpcode(C); 315 316 return e; 317} 318 319Expression ValueTable::create_expression(CastInst* C) { 320 Expression e; 321 322 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 323 e.function = 0; 324 e.type = C->getType(); 325 e.opcode = getOpcode(C); 326 327 return e; 328} 329 330Expression ValueTable::create_expression(ShuffleVectorInst* S) { 331 Expression e; 332 333 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 334 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 335 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 336 e.function = 0; 337 e.type = S->getType(); 338 e.opcode = Expression::SHUFFLE; 339 340 return e; 341} 342 343Expression ValueTable::create_expression(ExtractElementInst* E) { 344 Expression e; 345 346 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 347 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 348 e.function = 0; 349 e.type = E->getType(); 350 e.opcode = Expression::EXTRACT; 351 352 return e; 353} 354 355Expression ValueTable::create_expression(InsertElementInst* I) { 356 Expression e; 357 358 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 359 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 360 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 361 e.function = 0; 362 e.type = I->getType(); 363 e.opcode = Expression::INSERT; 364 365 return e; 366} 367 368Expression ValueTable::create_expression(SelectInst* I) { 369 Expression e; 370 371 e.varargs.push_back(lookup_or_add(I->getCondition())); 372 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 373 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 374 e.function = 0; 375 e.type = I->getType(); 376 e.opcode = Expression::SELECT; 377 378 return e; 379} 380 381Expression ValueTable::create_expression(GetElementPtrInst* G) { 382 Expression e; 383 384 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 385 e.function = 0; 386 e.type = G->getType(); 387 e.opcode = Expression::GEP; 388 389 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 390 I != E; ++I) 391 e.varargs.push_back(lookup_or_add(*I)); 392 393 return e; 394} 395 396Expression ValueTable::create_expression(ExtractValueInst* E) { 397 Expression e; 398 399 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 400 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 401 II != IE; ++II) 402 e.varargs.push_back(*II); 403 e.function = 0; 404 e.type = E->getType(); 405 e.opcode = Expression::EXTRACTVALUE; 406 407 return e; 408} 409 410Expression ValueTable::create_expression(InsertValueInst* E) { 411 Expression e; 412 413 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 414 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 415 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 416 II != IE; ++II) 417 e.varargs.push_back(*II); 418 e.function = 0; 419 e.type = E->getType(); 420 e.opcode = Expression::INSERTVALUE; 421 422 return e; 423} 424 425//===----------------------------------------------------------------------===// 426// ValueTable External Functions 427//===----------------------------------------------------------------------===// 428 429/// add - Insert a value into the table with a specified value number. 430void ValueTable::add(Value *V, uint32_t num) { 431 valueNumbering.insert(std::make_pair(V, num)); 432} 433 434uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 435 if (AA->doesNotAccessMemory(C)) { 436 Expression exp = create_expression(C); 437 uint32_t& e = expressionNumbering[exp]; 438 if (!e) e = nextValueNumber++; 439 valueNumbering[C] = e; 440 return e; 441 } else if (AA->onlyReadsMemory(C)) { 442 Expression exp = create_expression(C); 443 uint32_t& e = expressionNumbering[exp]; 444 if (!e) { 445 e = nextValueNumber++; 446 valueNumbering[C] = e; 447 return e; 448 } 449 if (!MD) { 450 e = nextValueNumber++; 451 valueNumbering[C] = e; 452 return e; 453 } 454 455 MemDepResult local_dep = MD->getDependency(C); 456 457 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 458 valueNumbering[C] = nextValueNumber; 459 return nextValueNumber++; 460 } 461 462 if (local_dep.isDef()) { 463 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 464 465 if (local_cdep->getNumOperands() != C->getNumOperands()) { 466 valueNumbering[C] = nextValueNumber; 467 return nextValueNumber++; 468 } 469 470 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 471 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 472 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i)); 473 if (c_vn != cd_vn) { 474 valueNumbering[C] = nextValueNumber; 475 return nextValueNumber++; 476 } 477 } 478 479 uint32_t v = lookup_or_add(local_cdep); 480 valueNumbering[C] = v; 481 return v; 482 } 483 484 // Non-local case. 485 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 486 MD->getNonLocalCallDependency(CallSite(C)); 487 // FIXME: call/call dependencies for readonly calls should return def, not 488 // clobber! Move the checking logic to MemDep! 489 CallInst* cdep = 0; 490 491 // Check to see if we have a single dominating call instruction that is 492 // identical to C. 493 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 494 const NonLocalDepEntry *I = &deps[i]; 495 // Ignore non-local dependencies. 496 if (I->getResult().isNonLocal()) 497 continue; 498 499 // We don't handle non-depedencies. If we already have a call, reject 500 // instruction dependencies. 501 if (I->getResult().isClobber() || cdep != 0) { 502 cdep = 0; 503 break; 504 } 505 506 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 507 // FIXME: All duplicated with non-local case. 508 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 509 cdep = NonLocalDepCall; 510 continue; 511 } 512 513 cdep = 0; 514 break; 515 } 516 517 if (!cdep) { 518 valueNumbering[C] = nextValueNumber; 519 return nextValueNumber++; 520 } 521 522 if (cdep->getNumOperands() != C->getNumOperands()) { 523 valueNumbering[C] = nextValueNumber; 524 return nextValueNumber++; 525 } 526 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 527 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 528 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i)); 529 if (c_vn != cd_vn) { 530 valueNumbering[C] = nextValueNumber; 531 return nextValueNumber++; 532 } 533 } 534 535 uint32_t v = lookup_or_add(cdep); 536 valueNumbering[C] = v; 537 return v; 538 539 } else { 540 valueNumbering[C] = nextValueNumber; 541 return nextValueNumber++; 542 } 543} 544 545/// lookup_or_add - Returns the value number for the specified value, assigning 546/// it a new number if it did not have one before. 547uint32_t ValueTable::lookup_or_add(Value *V) { 548 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 549 if (VI != valueNumbering.end()) 550 return VI->second; 551 552 if (!isa<Instruction>(V)) { 553 valueNumbering[V] = nextValueNumber; 554 return nextValueNumber++; 555 } 556 557 Instruction* I = cast<Instruction>(V); 558 Expression exp; 559 switch (I->getOpcode()) { 560 case Instruction::Call: 561 return lookup_or_add_call(cast<CallInst>(I)); 562 case Instruction::Add: 563 case Instruction::FAdd: 564 case Instruction::Sub: 565 case Instruction::FSub: 566 case Instruction::Mul: 567 case Instruction::FMul: 568 case Instruction::UDiv: 569 case Instruction::SDiv: 570 case Instruction::FDiv: 571 case Instruction::URem: 572 case Instruction::SRem: 573 case Instruction::FRem: 574 case Instruction::Shl: 575 case Instruction::LShr: 576 case Instruction::AShr: 577 case Instruction::And: 578 case Instruction::Or : 579 case Instruction::Xor: 580 exp = create_expression(cast<BinaryOperator>(I)); 581 break; 582 case Instruction::ICmp: 583 case Instruction::FCmp: 584 exp = create_expression(cast<CmpInst>(I)); 585 break; 586 case Instruction::Trunc: 587 case Instruction::ZExt: 588 case Instruction::SExt: 589 case Instruction::FPToUI: 590 case Instruction::FPToSI: 591 case Instruction::UIToFP: 592 case Instruction::SIToFP: 593 case Instruction::FPTrunc: 594 case Instruction::FPExt: 595 case Instruction::PtrToInt: 596 case Instruction::IntToPtr: 597 case Instruction::BitCast: 598 exp = create_expression(cast<CastInst>(I)); 599 break; 600 case Instruction::Select: 601 exp = create_expression(cast<SelectInst>(I)); 602 break; 603 case Instruction::ExtractElement: 604 exp = create_expression(cast<ExtractElementInst>(I)); 605 break; 606 case Instruction::InsertElement: 607 exp = create_expression(cast<InsertElementInst>(I)); 608 break; 609 case Instruction::ShuffleVector: 610 exp = create_expression(cast<ShuffleVectorInst>(I)); 611 break; 612 case Instruction::ExtractValue: 613 exp = create_expression(cast<ExtractValueInst>(I)); 614 break; 615 case Instruction::InsertValue: 616 exp = create_expression(cast<InsertValueInst>(I)); 617 break; 618 case Instruction::GetElementPtr: 619 exp = create_expression(cast<GetElementPtrInst>(I)); 620 break; 621 default: 622 valueNumbering[V] = nextValueNumber; 623 return nextValueNumber++; 624 } 625 626 uint32_t& e = expressionNumbering[exp]; 627 if (!e) e = nextValueNumber++; 628 valueNumbering[V] = e; 629 return e; 630} 631 632/// lookup - Returns the value number of the specified value. Fails if 633/// the value has not yet been numbered. 634uint32_t ValueTable::lookup(Value *V) const { 635 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 636 assert(VI != valueNumbering.end() && "Value not numbered?"); 637 return VI->second; 638} 639 640/// clear - Remove all entries from the ValueTable 641void ValueTable::clear() { 642 valueNumbering.clear(); 643 expressionNumbering.clear(); 644 nextValueNumber = 1; 645} 646 647/// erase - Remove a value from the value numbering 648void ValueTable::erase(Value *V) { 649 valueNumbering.erase(V); 650} 651 652/// verifyRemoved - Verify that the value is removed from all internal data 653/// structures. 654void ValueTable::verifyRemoved(const Value *V) const { 655 for (DenseMap<Value*, uint32_t>::const_iterator 656 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 657 assert(I->first != V && "Inst still occurs in value numbering map!"); 658 } 659} 660 661//===----------------------------------------------------------------------===// 662// GVN Pass 663//===----------------------------------------------------------------------===// 664 665namespace { 666 struct ValueNumberScope { 667 ValueNumberScope* parent; 668 DenseMap<uint32_t, Value*> table; 669 670 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 671 }; 672} 673 674namespace { 675 676 class GVN : public FunctionPass { 677 bool runOnFunction(Function &F); 678 public: 679 static char ID; // Pass identification, replacement for typeid 680 explicit GVN(bool nopre = false, bool noloads = false) 681 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { } 682 683 private: 684 bool NoPRE; 685 bool NoLoads; 686 MemoryDependenceAnalysis *MD; 687 DominatorTree *DT; 688 689 ValueTable VN; 690 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 691 692 // This transformation requires dominator postdominator info 693 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 694 AU.addRequired<DominatorTree>(); 695 if (!NoLoads) 696 AU.addRequired<MemoryDependenceAnalysis>(); 697 AU.addRequired<AliasAnalysis>(); 698 699 AU.addPreserved<DominatorTree>(); 700 AU.addPreserved<AliasAnalysis>(); 701 } 702 703 // Helper fuctions 704 // FIXME: eliminate or document these better 705 bool processLoad(LoadInst* L, 706 SmallVectorImpl<Instruction*> &toErase); 707 bool processInstruction(Instruction *I, 708 SmallVectorImpl<Instruction*> &toErase); 709 bool processNonLocalLoad(LoadInst* L, 710 SmallVectorImpl<Instruction*> &toErase); 711 bool processBlock(BasicBlock *BB); 712 void dump(DenseMap<uint32_t, Value*>& d); 713 bool iterateOnFunction(Function &F); 714 Value *CollapsePhi(PHINode* p); 715 bool performPRE(Function& F); 716 Value *lookupNumber(BasicBlock *BB, uint32_t num); 717 void cleanupGlobalSets(); 718 void verifyRemoved(const Instruction *I) const; 719 }; 720 721 char GVN::ID = 0; 722} 723 724// createGVNPass - The public interface to this file... 725FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) { 726 return new GVN(NoPRE, NoLoads); 727} 728 729static RegisterPass<GVN> X("gvn", 730 "Global Value Numbering"); 731 732void GVN::dump(DenseMap<uint32_t, Value*>& d) { 733 printf("{\n"); 734 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 735 E = d.end(); I != E; ++I) { 736 printf("%d\n", I->first); 737 I->second->dump(); 738 } 739 printf("}\n"); 740} 741 742static bool isSafeReplacement(PHINode* p, Instruction *inst) { 743 if (!isa<PHINode>(inst)) 744 return true; 745 746 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 747 UI != E; ++UI) 748 if (PHINode* use_phi = dyn_cast<PHINode>(UI)) 749 if (use_phi->getParent() == inst->getParent()) 750 return false; 751 752 return true; 753} 754 755Value *GVN::CollapsePhi(PHINode *PN) { 756 Value *ConstVal = PN->hasConstantValue(DT); 757 if (!ConstVal) return 0; 758 759 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 760 if (!Inst) 761 return ConstVal; 762 763 if (DT->dominates(Inst, PN)) 764 if (isSafeReplacement(PN, Inst)) 765 return Inst; 766 return 0; 767} 768 769/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 770/// we're analyzing is fully available in the specified block. As we go, keep 771/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 772/// map is actually a tri-state map with the following values: 773/// 0) we know the block *is not* fully available. 774/// 1) we know the block *is* fully available. 775/// 2) we do not know whether the block is fully available or not, but we are 776/// currently speculating that it will be. 777/// 3) we are speculating for this block and have used that to speculate for 778/// other blocks. 779static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 780 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 781 // Optimistically assume that the block is fully available and check to see 782 // if we already know about this block in one lookup. 783 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 784 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 785 786 // If the entry already existed for this block, return the precomputed value. 787 if (!IV.second) { 788 // If this is a speculative "available" value, mark it as being used for 789 // speculation of other blocks. 790 if (IV.first->second == 2) 791 IV.first->second = 3; 792 return IV.first->second != 0; 793 } 794 795 // Otherwise, see if it is fully available in all predecessors. 796 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 797 798 // If this block has no predecessors, it isn't live-in here. 799 if (PI == PE) 800 goto SpeculationFailure; 801 802 for (; PI != PE; ++PI) 803 // If the value isn't fully available in one of our predecessors, then it 804 // isn't fully available in this block either. Undo our previous 805 // optimistic assumption and bail out. 806 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 807 goto SpeculationFailure; 808 809 return true; 810 811// SpeculationFailure - If we get here, we found out that this is not, after 812// all, a fully-available block. We have a problem if we speculated on this and 813// used the speculation to mark other blocks as available. 814SpeculationFailure: 815 char &BBVal = FullyAvailableBlocks[BB]; 816 817 // If we didn't speculate on this, just return with it set to false. 818 if (BBVal == 2) { 819 BBVal = 0; 820 return false; 821 } 822 823 // If we did speculate on this value, we could have blocks set to 1 that are 824 // incorrect. Walk the (transitive) successors of this block and mark them as 825 // 0 if set to one. 826 SmallVector<BasicBlock*, 32> BBWorklist; 827 BBWorklist.push_back(BB); 828 829 while (!BBWorklist.empty()) { 830 BasicBlock *Entry = BBWorklist.pop_back_val(); 831 // Note that this sets blocks to 0 (unavailable) if they happen to not 832 // already be in FullyAvailableBlocks. This is safe. 833 char &EntryVal = FullyAvailableBlocks[Entry]; 834 if (EntryVal == 0) continue; // Already unavailable. 835 836 // Mark as unavailable. 837 EntryVal = 0; 838 839 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 840 BBWorklist.push_back(*I); 841 } 842 843 return false; 844} 845 846 847/// CanCoerceMustAliasedValueToLoad - Return true if 848/// CoerceAvailableValueToLoadType will succeed. 849static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 850 const Type *LoadTy, 851 const TargetData &TD) { 852 // If the loaded or stored value is an first class array or struct, don't try 853 // to transform them. We need to be able to bitcast to integer. 854 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) || 855 isa<StructType>(StoredVal->getType()) || 856 isa<ArrayType>(StoredVal->getType())) 857 return false; 858 859 // The store has to be at least as big as the load. 860 if (TD.getTypeSizeInBits(StoredVal->getType()) < 861 TD.getTypeSizeInBits(LoadTy)) 862 return false; 863 864 return true; 865} 866 867 868/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 869/// then a load from a must-aliased pointer of a different type, try to coerce 870/// the stored value. LoadedTy is the type of the load we want to replace and 871/// InsertPt is the place to insert new instructions. 872/// 873/// If we can't do it, return null. 874static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 875 const Type *LoadedTy, 876 Instruction *InsertPt, 877 const TargetData &TD) { 878 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 879 return 0; 880 881 const Type *StoredValTy = StoredVal->getType(); 882 883 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); 884 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 885 886 // If the store and reload are the same size, we can always reuse it. 887 if (StoreSize == LoadSize) { 888 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) { 889 // Pointer to Pointer -> use bitcast. 890 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 891 } 892 893 // Convert source pointers to integers, which can be bitcast. 894 if (isa<PointerType>(StoredValTy)) { 895 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 896 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 897 } 898 899 const Type *TypeToCastTo = LoadedTy; 900 if (isa<PointerType>(TypeToCastTo)) 901 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 902 903 if (StoredValTy != TypeToCastTo) 904 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 905 906 // Cast to pointer if the load needs a pointer type. 907 if (isa<PointerType>(LoadedTy)) 908 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 909 910 return StoredVal; 911 } 912 913 // If the loaded value is smaller than the available value, then we can 914 // extract out a piece from it. If the available value is too small, then we 915 // can't do anything. 916 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 917 918 // Convert source pointers to integers, which can be manipulated. 919 if (isa<PointerType>(StoredValTy)) { 920 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 921 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 922 } 923 924 // Convert vectors and fp to integer, which can be manipulated. 925 if (!isa<IntegerType>(StoredValTy)) { 926 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 927 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 928 } 929 930 // If this is a big-endian system, we need to shift the value down to the low 931 // bits so that a truncate will work. 932 if (TD.isBigEndian()) { 933 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 934 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 935 } 936 937 // Truncate the integer to the right size now. 938 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 939 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 940 941 if (LoadedTy == NewIntTy) 942 return StoredVal; 943 944 // If the result is a pointer, inttoptr. 945 if (isa<PointerType>(LoadedTy)) 946 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 947 948 // Otherwise, bitcast. 949 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 950} 951 952/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 953/// be expressed as a base pointer plus a constant offset. Return the base and 954/// offset to the caller. 955static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 956 const TargetData &TD) { 957 Operator *PtrOp = dyn_cast<Operator>(Ptr); 958 if (PtrOp == 0) return Ptr; 959 960 // Just look through bitcasts. 961 if (PtrOp->getOpcode() == Instruction::BitCast) 962 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 963 964 // If this is a GEP with constant indices, we can look through it. 965 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 966 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 967 968 gep_type_iterator GTI = gep_type_begin(GEP); 969 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 970 ++I, ++GTI) { 971 ConstantInt *OpC = cast<ConstantInt>(*I); 972 if (OpC->isZero()) continue; 973 974 // Handle a struct and array indices which add their offset to the pointer. 975 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 976 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 977 } else { 978 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 979 Offset += OpC->getSExtValue()*Size; 980 } 981 } 982 983 // Re-sign extend from the pointer size if needed to get overflow edge cases 984 // right. 985 unsigned PtrSize = TD.getPointerSizeInBits(); 986 if (PtrSize < 64) 987 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 988 989 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 990} 991 992 993/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 994/// memdep query of a load that ends up being a clobbering memory write (store, 995/// memset, memcpy, memmove). This means that the write *may* provide bits used 996/// by the load but we can't be sure because the pointers don't mustalias. 997/// 998/// Check this case to see if there is anything more we can do before we give 999/// up. This returns -1 if we have to give up, or a byte number in the stored 1000/// value of the piece that feeds the load. 1001static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 1002 Value *WritePtr, 1003 uint64_t WriteSizeInBits, 1004 const TargetData &TD) { 1005 // If the loaded or stored value is an first class array or struct, don't try 1006 // to transform them. We need to be able to bitcast to integer. 1007 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy)) 1008 return -1; 1009 1010 int64_t StoreOffset = 0, LoadOffset = 0; 1011 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1012 Value *LoadBase = 1013 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1014 if (StoreBase != LoadBase) 1015 return -1; 1016 1017 // If the load and store are to the exact same address, they should have been 1018 // a must alias. AA must have gotten confused. 1019 // FIXME: Study to see if/when this happens. 1020 if (LoadOffset == StoreOffset) { 1021#if 0 1022 errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1023 << "Base = " << *StoreBase << "\n" 1024 << "Store Ptr = " << *WritePtr << "\n" 1025 << "Store Offs = " << StoreOffset << "\n" 1026 << "Load Ptr = " << *LoadPtr << "\n" 1027 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1028 abort(); 1029#endif 1030 return -1; 1031 } 1032 1033 // If the load and store don't overlap at all, the store doesn't provide 1034 // anything to the load. In this case, they really don't alias at all, AA 1035 // must have gotten confused. 1036 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1037 // remove this check, as it is duplicated with what we have below. 1038 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1039 1040 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1041 return -1; 1042 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1043 LoadSize >>= 3; 1044 1045 1046 bool isAAFailure = false; 1047 if (StoreOffset < LoadOffset) { 1048 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1049 } else { 1050 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1051 } 1052 if (isAAFailure) { 1053#if 0 1054 errs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1055 << "Base = " << *StoreBase << "\n" 1056 << "Store Ptr = " << *WritePtr << "\n" 1057 << "Store Offs = " << StoreOffset << "\n" 1058 << "Load Ptr = " << *L->getPointerOperand() << "\n" 1059 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1060 errs() << "'" << L->getParent()->getParent()->getName() << "'" 1061 << *L->getParent(); 1062 abort(); 1063#endif 1064 return -1; 1065 } 1066 1067 // If the Load isn't completely contained within the stored bits, we don't 1068 // have all the bits to feed it. We could do something crazy in the future 1069 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1070 // valuable. 1071 if (StoreOffset > LoadOffset || 1072 StoreOffset+StoreSize < LoadOffset+LoadSize) 1073 return -1; 1074 1075 // Okay, we can do this transformation. Return the number of bytes into the 1076 // store that the load is. 1077 return LoadOffset-StoreOffset; 1078} 1079 1080/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1081/// memdep query of a load that ends up being a clobbering store. 1082static int AnalyzeLoadFromClobberingStore(LoadInst *L, StoreInst *DepSI, 1083 const TargetData &TD) { 1084 // Cannot handle reading from store of first-class aggregate yet. 1085 if (isa<StructType>(DepSI->getOperand(0)->getType()) || 1086 isa<ArrayType>(DepSI->getOperand(0)->getType())) 1087 return -1; 1088 1089 Value *StorePtr = DepSI->getPointerOperand(); 1090 uint64_t StoreSize = TD.getTypeSizeInBits(StorePtr->getType()); 1091 return AnalyzeLoadFromClobberingWrite(L->getType(), L->getPointerOperand(), 1092 StorePtr, StoreSize, TD); 1093} 1094 1095static int AnalyzeLoadFromClobberingMemInst(LoadInst *L, MemIntrinsic *MI, 1096 const TargetData &TD) { 1097 // If the mem operation is a non-constant size, we can't handle it. 1098 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1099 if (SizeCst == 0) return -1; 1100 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1101 1102 // If this is memset, we just need to see if the offset is valid in the size 1103 // of the memset.. 1104 if (MI->getIntrinsicID() == Intrinsic::memset) 1105 return AnalyzeLoadFromClobberingWrite(L->getType(), L->getPointerOperand(), 1106 MI->getDest(), MemSizeInBits, TD); 1107 1108 // If we have a memcpy/memmove, the only case we can handle is if this is a 1109 // copy from constant memory. In that case, we can read directly from the 1110 // constant memory. 1111 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1112 1113 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1114 if (Src == 0) return -1; 1115 1116 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1117 if (GV == 0 || !GV->isConstant()) return -1; 1118 1119 // See if the access is within the bounds of the transfer. 1120 int Offset = 1121 AnalyzeLoadFromClobberingWrite(L->getType(), L->getPointerOperand(), 1122 MI->getDest(), MemSizeInBits, TD); 1123 if (Offset == -1) 1124 return Offset; 1125 1126 // Otherwise, see if we can constant fold a load from the constant with the 1127 // offset applied as appropriate. 1128 Src = ConstantExpr::getBitCast(Src, 1129 llvm::Type::getInt8PtrTy(Src->getContext())); 1130 Constant *OffsetCst = 1131 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1132 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1133 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(L->getType())); 1134 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1135 return Offset; 1136 return -1; 1137} 1138 1139 1140/// GetStoreValueForLoad - This function is called when we have a 1141/// memdep query of a load that ends up being a clobbering store. This means 1142/// that the store *may* provide bits used by the load but we can't be sure 1143/// because the pointers don't mustalias. Check this case to see if there is 1144/// anything more we can do before we give up. 1145static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1146 const Type *LoadTy, 1147 Instruction *InsertPt, const TargetData &TD){ 1148 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1149 1150 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8; 1151 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1152 1153 1154 // Compute which bits of the stored value are being used by the load. Convert 1155 // to an integer type to start with. 1156 if (isa<PointerType>(SrcVal->getType())) 1157 SrcVal = new PtrToIntInst(SrcVal, TD.getIntPtrType(Ctx), "tmp", InsertPt); 1158 if (!isa<IntegerType>(SrcVal->getType())) 1159 SrcVal = new BitCastInst(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1160 "tmp", InsertPt); 1161 1162 // Shift the bits to the least significant depending on endianness. 1163 unsigned ShiftAmt; 1164 if (TD.isLittleEndian()) 1165 ShiftAmt = Offset*8; 1166 else 1167 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1168 1169 if (ShiftAmt) 1170 SrcVal = BinaryOperator::CreateLShr(SrcVal, 1171 ConstantInt::get(SrcVal->getType(), ShiftAmt), "tmp", InsertPt); 1172 1173 if (LoadSize != StoreSize) 1174 SrcVal = new TruncInst(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1175 "tmp", InsertPt); 1176 1177 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1178} 1179 1180/// GetMemInstValueForLoad - This function is called when we have a 1181/// memdep query of a load that ends up being a clobbering mem intrinsic. 1182static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1183 const Type *LoadTy, Instruction *InsertPt, 1184 const TargetData &TD){ 1185 LLVMContext &Ctx = LoadTy->getContext(); 1186 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1187 1188 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1189 1190 // We know that this method is only called when the mem transfer fully 1191 // provides the bits for the load. 1192 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1193 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1194 // independently of what the offset is. 1195 Value *Val = MSI->getValue(); 1196 if (LoadSize != 1) 1197 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1198 1199 Value *OneElt = Val; 1200 1201 // Splat the value out to the right number of bits. 1202 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1203 // If we can double the number of bytes set, do it. 1204 if (NumBytesSet*2 <= LoadSize) { 1205 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1206 Val = Builder.CreateOr(Val, ShVal); 1207 NumBytesSet <<= 1; 1208 continue; 1209 } 1210 1211 // Otherwise insert one byte at a time. 1212 Value *ShVal = Builder.CreateShl(Val, 1*8); 1213 Val = Builder.CreateOr(OneElt, ShVal); 1214 ++NumBytesSet; 1215 } 1216 1217 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1218 } 1219 1220 // Otherwise, this is a memcpy/memmove from a constant global. 1221 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1222 Constant *Src = cast<Constant>(MTI->getSource()); 1223 1224 // Otherwise, see if we can constant fold a load from the constant with the 1225 // offset applied as appropriate. 1226 Src = ConstantExpr::getBitCast(Src, 1227 llvm::Type::getInt8PtrTy(Src->getContext())); 1228 Constant *OffsetCst = 1229 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1230 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1231 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1232 return ConstantFoldLoadFromConstPtr(Src, &TD); 1233} 1234 1235 1236 1237struct AvailableValueInBlock { 1238 /// BB - The basic block in question. 1239 BasicBlock *BB; 1240 enum ValType { 1241 SimpleVal, // A simple offsetted value that is accessed. 1242 MemIntrin // A memory intrinsic which is loaded from. 1243 }; 1244 1245 /// V - The value that is live out of the block. 1246 PointerIntPair<Value *, 1, ValType> Val; 1247 1248 /// Offset - The byte offset in Val that is interesting for the load query. 1249 unsigned Offset; 1250 1251 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1252 unsigned Offset = 0) { 1253 AvailableValueInBlock Res; 1254 Res.BB = BB; 1255 Res.Val.setPointer(V); 1256 Res.Val.setInt(SimpleVal); 1257 Res.Offset = Offset; 1258 return Res; 1259 } 1260 1261 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1262 unsigned Offset = 0) { 1263 AvailableValueInBlock Res; 1264 Res.BB = BB; 1265 Res.Val.setPointer(MI); 1266 Res.Val.setInt(MemIntrin); 1267 Res.Offset = Offset; 1268 return Res; 1269 } 1270 1271 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1272 Value *getSimpleValue() const { 1273 assert(isSimpleValue() && "Wrong accessor"); 1274 return Val.getPointer(); 1275 } 1276 1277 MemIntrinsic *getMemIntrinValue() const { 1278 assert(!isSimpleValue() && "Wrong accessor"); 1279 return cast<MemIntrinsic>(Val.getPointer()); 1280 } 1281}; 1282 1283/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1284/// construct SSA form, allowing us to eliminate LI. This returns the value 1285/// that should be used at LI's definition site. 1286static Value *ConstructSSAForLoadSet(LoadInst *LI, 1287 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1288 const TargetData *TD, 1289 AliasAnalysis *AA) { 1290 SmallVector<PHINode*, 8> NewPHIs; 1291 SSAUpdater SSAUpdate(&NewPHIs); 1292 SSAUpdate.Initialize(LI); 1293 1294 const Type *LoadTy = LI->getType(); 1295 1296 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1297 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1298 BasicBlock *BB = AV.BB; 1299 1300 if (SSAUpdate.HasValueForBlock(BB)) 1301 continue; 1302 1303 unsigned Offset = AV.Offset; 1304 1305 Value *AvailableVal; 1306 if (AV.isSimpleValue()) { 1307 AvailableVal = AV.getSimpleValue(); 1308 if (AvailableVal->getType() != LoadTy) { 1309 assert(TD && "Need target data to handle type mismatch case"); 1310 AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy, 1311 BB->getTerminator(), *TD); 1312 1313 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1314 << *AV.getSimpleValue() << '\n' 1315 << *AvailableVal << '\n' << "\n\n\n"); 1316 } 1317 } else { 1318 AvailableVal = GetMemInstValueForLoad(AV.getMemIntrinValue(), Offset, 1319 LoadTy, BB->getTerminator(), *TD); 1320 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1321 << " " << *AV.getMemIntrinValue() << '\n' 1322 << *AvailableVal << '\n' << "\n\n\n"); 1323 } 1324 SSAUpdate.AddAvailableValue(BB, AvailableVal); 1325 } 1326 1327 // Perform PHI construction. 1328 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1329 1330 // If new PHI nodes were created, notify alias analysis. 1331 if (isa<PointerType>(V->getType())) 1332 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1333 AA->copyValue(LI, NewPHIs[i]); 1334 1335 return V; 1336} 1337 1338static bool isLifetimeStart(Instruction *Inst) { 1339 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1340 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1341 return false; 1342} 1343 1344/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1345/// non-local by performing PHI construction. 1346bool GVN::processNonLocalLoad(LoadInst *LI, 1347 SmallVectorImpl<Instruction*> &toErase) { 1348 // Find the non-local dependencies of the load. 1349 SmallVector<NonLocalDepEntry, 64> Deps; 1350 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1351 Deps); 1352 //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: " 1353 // << Deps.size() << *LI << '\n'); 1354 1355 // If we had to process more than one hundred blocks to find the 1356 // dependencies, this load isn't worth worrying about. Optimizing 1357 // it will be too expensive. 1358 if (Deps.size() > 100) 1359 return false; 1360 1361 // If we had a phi translation failure, we'll have a single entry which is a 1362 // clobber in the current block. Reject this early. 1363 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1364 DEBUG( 1365 errs() << "GVN: non-local load "; 1366 WriteAsOperand(errs(), LI); 1367 errs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1368 ); 1369 return false; 1370 } 1371 1372 // Filter out useless results (non-locals, etc). Keep track of the blocks 1373 // where we have a value available in repl, also keep track of whether we see 1374 // dependencies that produce an unknown value for the load (such as a call 1375 // that could potentially clobber the load). 1376 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1377 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1378 1379 const TargetData *TD = 0; 1380 1381 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1382 BasicBlock *DepBB = Deps[i].getBB(); 1383 MemDepResult DepInfo = Deps[i].getResult(); 1384 1385 if (DepInfo.isClobber()) { 1386 // If the dependence is to a store that writes to a superset of the bits 1387 // read by the load, we can extract the bits we need for the load from the 1388 // stored value. 1389 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1390 if (TD == 0) 1391 TD = getAnalysisIfAvailable<TargetData>(); 1392 if (TD) { 1393 int Offset = AnalyzeLoadFromClobberingStore(LI, DepSI, *TD); 1394 if (Offset != -1) { 1395 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1396 DepSI->getOperand(0), 1397 Offset)); 1398 continue; 1399 } 1400 } 1401 } 1402 1403 // If the clobbering value is a memset/memcpy/memmove, see if we can 1404 // forward a value on from it. 1405 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1406 if (TD == 0) 1407 TD = getAnalysisIfAvailable<TargetData>(); 1408 if (TD) { 1409 int Offset = AnalyzeLoadFromClobberingMemInst(LI, DepMI, *TD); 1410 if (Offset != -1) { 1411 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1412 Offset)); 1413 continue; 1414 } 1415 } 1416 } 1417 1418 UnavailableBlocks.push_back(DepBB); 1419 continue; 1420 } 1421 1422 Instruction *DepInst = DepInfo.getInst(); 1423 1424 // Loading the allocation -> undef. 1425 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1426 // Loading immediately after lifetime begin -> undef. 1427 isLifetimeStart(DepInst)) { 1428 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1429 UndefValue::get(LI->getType()))); 1430 continue; 1431 } 1432 1433 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1434 // Reject loads and stores that are to the same address but are of 1435 // different types if we have to. 1436 if (S->getOperand(0)->getType() != LI->getType()) { 1437 if (TD == 0) 1438 TD = getAnalysisIfAvailable<TargetData>(); 1439 1440 // If the stored value is larger or equal to the loaded value, we can 1441 // reuse it. 1442 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1443 LI->getType(), *TD)) { 1444 UnavailableBlocks.push_back(DepBB); 1445 continue; 1446 } 1447 } 1448 1449 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1450 S->getOperand(0))); 1451 continue; 1452 } 1453 1454 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1455 // If the types mismatch and we can't handle it, reject reuse of the load. 1456 if (LD->getType() != LI->getType()) { 1457 if (TD == 0) 1458 TD = getAnalysisIfAvailable<TargetData>(); 1459 1460 // If the stored value is larger or equal to the loaded value, we can 1461 // reuse it. 1462 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1463 UnavailableBlocks.push_back(DepBB); 1464 continue; 1465 } 1466 } 1467 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1468 continue; 1469 } 1470 1471 UnavailableBlocks.push_back(DepBB); 1472 continue; 1473 } 1474 1475 // If we have no predecessors that produce a known value for this load, exit 1476 // early. 1477 if (ValuesPerBlock.empty()) return false; 1478 1479 // If all of the instructions we depend on produce a known value for this 1480 // load, then it is fully redundant and we can use PHI insertion to compute 1481 // its value. Insert PHIs and remove the fully redundant value now. 1482 if (UnavailableBlocks.empty()) { 1483 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1484 1485 // Perform PHI construction. 1486 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1487 VN.getAliasAnalysis()); 1488 LI->replaceAllUsesWith(V); 1489 1490 if (isa<PHINode>(V)) 1491 V->takeName(LI); 1492 if (isa<PointerType>(V->getType())) 1493 MD->invalidateCachedPointerInfo(V); 1494 toErase.push_back(LI); 1495 NumGVNLoad++; 1496 return true; 1497 } 1498 1499 if (!EnablePRE || !EnableLoadPRE) 1500 return false; 1501 1502 // Okay, we have *some* definitions of the value. This means that the value 1503 // is available in some of our (transitive) predecessors. Lets think about 1504 // doing PRE of this load. This will involve inserting a new load into the 1505 // predecessor when it's not available. We could do this in general, but 1506 // prefer to not increase code size. As such, we only do this when we know 1507 // that we only have to insert *one* load (which means we're basically moving 1508 // the load, not inserting a new one). 1509 1510 SmallPtrSet<BasicBlock *, 4> Blockers; 1511 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1512 Blockers.insert(UnavailableBlocks[i]); 1513 1514 // Lets find first basic block with more than one predecessor. Walk backwards 1515 // through predecessors if needed. 1516 BasicBlock *LoadBB = LI->getParent(); 1517 BasicBlock *TmpBB = LoadBB; 1518 1519 bool isSinglePred = false; 1520 bool allSingleSucc = true; 1521 while (TmpBB->getSinglePredecessor()) { 1522 isSinglePred = true; 1523 TmpBB = TmpBB->getSinglePredecessor(); 1524 if (!TmpBB) // If haven't found any, bail now. 1525 return false; 1526 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1527 return false; 1528 if (Blockers.count(TmpBB)) 1529 return false; 1530 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1531 allSingleSucc = false; 1532 } 1533 1534 assert(TmpBB); 1535 LoadBB = TmpBB; 1536 1537 // If we have a repl set with LI itself in it, this means we have a loop where 1538 // at least one of the values is LI. Since this means that we won't be able 1539 // to eliminate LI even if we insert uses in the other predecessors, we will 1540 // end up increasing code size. Reject this by scanning for LI. 1541 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1542 if (ValuesPerBlock[i].isSimpleValue() && 1543 ValuesPerBlock[i].getSimpleValue() == LI) 1544 return false; 1545 1546 // FIXME: It is extremely unclear what this loop is doing, other than 1547 // artificially restricting loadpre. 1548 if (isSinglePred) { 1549 bool isHot = false; 1550 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1551 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1552 if (AV.isSimpleValue()) 1553 // "Hot" Instruction is in some loop (because it dominates its dep. 1554 // instruction). 1555 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1556 if (DT->dominates(LI, I)) { 1557 isHot = true; 1558 break; 1559 } 1560 } 1561 1562 // We are interested only in "hot" instructions. We don't want to do any 1563 // mis-optimizations here. 1564 if (!isHot) 1565 return false; 1566 } 1567 1568 // Okay, we have some hope :). Check to see if the loaded value is fully 1569 // available in all but one predecessor. 1570 // FIXME: If we could restructure the CFG, we could make a common pred with 1571 // all the preds that don't have an available LI and insert a new load into 1572 // that one block. 1573 BasicBlock *UnavailablePred = 0; 1574 1575 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1576 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1577 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1578 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1579 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1580 1581 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1582 PI != E; ++PI) { 1583 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 1584 continue; 1585 1586 // If this load is not available in multiple predecessors, reject it. 1587 if (UnavailablePred && UnavailablePred != *PI) 1588 return false; 1589 UnavailablePred = *PI; 1590 } 1591 1592 assert(UnavailablePred != 0 && 1593 "Fully available value should be eliminated above!"); 1594 1595 // We don't currently handle critical edges :( 1596 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) { 1597 DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '" 1598 << UnavailablePred->getName() << "': " << *LI << '\n'); 1599 return false; 1600 } 1601 1602 // Do PHI translation to get its value in the predecessor if necessary. The 1603 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1604 // 1605 SmallVector<Instruction*, 8> NewInsts; 1606 1607 // If all preds have a single successor, then we know it is safe to insert the 1608 // load on the pred (?!?), so we can insert code to materialize the pointer if 1609 // it is not available. 1610 PHITransAddr Address(LI->getOperand(0), TD); 1611 Value *LoadPtr = 0; 1612 if (allSingleSucc) { 1613 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1614 *DT, NewInsts); 1615 } else { 1616 Address.PHITranslateValue(LoadBB, UnavailablePred); 1617 LoadPtr = Address.getAddr(); 1618 1619 // Make sure the value is live in the predecessor. 1620 if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr)) 1621 if (!DT->dominates(Inst->getParent(), UnavailablePred)) 1622 LoadPtr = 0; 1623 } 1624 1625 // If we couldn't find or insert a computation of this phi translated value, 1626 // we fail PRE. 1627 if (LoadPtr == 0) { 1628 assert(NewInsts.empty() && "Shouldn't insert insts on failure"); 1629 DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1630 << *LI->getOperand(0) << "\n"); 1631 return false; 1632 } 1633 1634 // Assign value numbers to these new instructions. 1635 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1636 // FIXME: We really _ought_ to insert these value numbers into their 1637 // parent's availability map. However, in doing so, we risk getting into 1638 // ordering issues. If a block hasn't been processed yet, we would be 1639 // marking a value as AVAIL-IN, which isn't what we intend. 1640 VN.lookup_or_add(NewInsts[i]); 1641 } 1642 1643 // Make sure it is valid to move this load here. We have to watch out for: 1644 // @1 = getelementptr (i8* p, ... 1645 // test p and branch if == 0 1646 // load @1 1647 // It is valid to have the getelementptr before the test, even if p can be 0, 1648 // as getelementptr only does address arithmetic. 1649 // If we are not pushing the value through any multiple-successor blocks 1650 // we do not have this case. Otherwise, check that the load is safe to 1651 // put anywhere; this can be improved, but should be conservatively safe. 1652 if (!allSingleSucc && 1653 // FIXME: REEVALUTE THIS. 1654 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) { 1655 assert(NewInsts.empty() && "Should not have inserted instructions"); 1656 return false; 1657 } 1658 1659 // Okay, we can eliminate this load by inserting a reload in the predecessor 1660 // and using PHI construction to get the value in the other predecessors, do 1661 // it. 1662 DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1663 DEBUG(if (!NewInsts.empty()) 1664 errs() << "INSERTED " << NewInsts.size() << " INSTS: " 1665 << *NewInsts.back() << '\n'); 1666 1667 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1668 LI->getAlignment(), 1669 UnavailablePred->getTerminator()); 1670 1671 // Add the newly created load. 1672 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad)); 1673 1674 // Perform PHI construction. 1675 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1676 VN.getAliasAnalysis()); 1677 LI->replaceAllUsesWith(V); 1678 if (isa<PHINode>(V)) 1679 V->takeName(LI); 1680 if (isa<PointerType>(V->getType())) 1681 MD->invalidateCachedPointerInfo(V); 1682 toErase.push_back(LI); 1683 NumPRELoad++; 1684 return true; 1685} 1686 1687/// processLoad - Attempt to eliminate a load, first by eliminating it 1688/// locally, and then attempting non-local elimination if that fails. 1689bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1690 if (!MD) 1691 return false; 1692 1693 if (L->isVolatile()) 1694 return false; 1695 1696 // ... to a pointer that has been loaded from before... 1697 MemDepResult Dep = MD->getDependency(L); 1698 1699 // If the value isn't available, don't do anything! 1700 if (Dep.isClobber()) { 1701 // Check to see if we have something like this: 1702 // store i32 123, i32* %P 1703 // %A = bitcast i32* %P to i8* 1704 // %B = gep i8* %A, i32 1 1705 // %C = load i8* %B 1706 // 1707 // We could do that by recognizing if the clobber instructions are obviously 1708 // a common base + constant offset, and if the previous store (or memset) 1709 // completely covers this load. This sort of thing can happen in bitfield 1710 // access code. 1711 Value *AvailVal = 0; 1712 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1713 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1714 int Offset = AnalyzeLoadFromClobberingStore(L, DepSI, *TD); 1715 if (Offset != -1) 1716 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1717 L->getType(), L, *TD); 1718 } 1719 1720 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1721 // a value on from it. 1722 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1723 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1724 int Offset = AnalyzeLoadFromClobberingMemInst(L, DepMI, *TD); 1725 if (Offset != -1) 1726 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1727 } 1728 } 1729 1730 if (AvailVal) { 1731 DEBUG(errs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1732 << *AvailVal << '\n' << *L << "\n\n\n"); 1733 1734 // Replace the load! 1735 L->replaceAllUsesWith(AvailVal); 1736 if (isa<PointerType>(AvailVal->getType())) 1737 MD->invalidateCachedPointerInfo(AvailVal); 1738 toErase.push_back(L); 1739 NumGVNLoad++; 1740 return true; 1741 } 1742 1743 DEBUG( 1744 // fast print dep, using operator<< on instruction would be too slow 1745 errs() << "GVN: load "; 1746 WriteAsOperand(errs(), L); 1747 Instruction *I = Dep.getInst(); 1748 errs() << " is clobbered by " << *I << '\n'; 1749 ); 1750 return false; 1751 } 1752 1753 // If it is defined in another block, try harder. 1754 if (Dep.isNonLocal()) 1755 return processNonLocalLoad(L, toErase); 1756 1757 Instruction *DepInst = Dep.getInst(); 1758 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1759 Value *StoredVal = DepSI->getOperand(0); 1760 1761 // The store and load are to a must-aliased pointer, but they may not 1762 // actually have the same type. See if we know how to reuse the stored 1763 // value (depending on its type). 1764 const TargetData *TD = 0; 1765 if (StoredVal->getType() != L->getType()) { 1766 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1767 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1768 L, *TD); 1769 if (StoredVal == 0) 1770 return false; 1771 1772 DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1773 << '\n' << *L << "\n\n\n"); 1774 } 1775 else 1776 return false; 1777 } 1778 1779 // Remove it! 1780 L->replaceAllUsesWith(StoredVal); 1781 if (isa<PointerType>(StoredVal->getType())) 1782 MD->invalidateCachedPointerInfo(StoredVal); 1783 toErase.push_back(L); 1784 NumGVNLoad++; 1785 return true; 1786 } 1787 1788 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1789 Value *AvailableVal = DepLI; 1790 1791 // The loads are of a must-aliased pointer, but they may not actually have 1792 // the same type. See if we know how to reuse the previously loaded value 1793 // (depending on its type). 1794 const TargetData *TD = 0; 1795 if (DepLI->getType() != L->getType()) { 1796 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1797 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1798 if (AvailableVal == 0) 1799 return false; 1800 1801 DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1802 << "\n" << *L << "\n\n\n"); 1803 } 1804 else 1805 return false; 1806 } 1807 1808 // Remove it! 1809 L->replaceAllUsesWith(AvailableVal); 1810 if (isa<PointerType>(DepLI->getType())) 1811 MD->invalidateCachedPointerInfo(DepLI); 1812 toErase.push_back(L); 1813 NumGVNLoad++; 1814 return true; 1815 } 1816 1817 // If this load really doesn't depend on anything, then we must be loading an 1818 // undef value. This can happen when loading for a fresh allocation with no 1819 // intervening stores, for example. 1820 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1821 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1822 toErase.push_back(L); 1823 NumGVNLoad++; 1824 return true; 1825 } 1826 1827 // If this load occurs either right after a lifetime begin, 1828 // then the loaded value is undefined. 1829 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1830 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1831 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1832 toErase.push_back(L); 1833 NumGVNLoad++; 1834 return true; 1835 } 1836 } 1837 1838 return false; 1839} 1840 1841Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1842 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1843 if (I == localAvail.end()) 1844 return 0; 1845 1846 ValueNumberScope *Locals = I->second; 1847 while (Locals) { 1848 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1849 if (I != Locals->table.end()) 1850 return I->second; 1851 Locals = Locals->parent; 1852 } 1853 1854 return 0; 1855} 1856 1857 1858/// processInstruction - When calculating availability, handle an instruction 1859/// by inserting it into the appropriate sets 1860bool GVN::processInstruction(Instruction *I, 1861 SmallVectorImpl<Instruction*> &toErase) { 1862 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1863 bool Changed = processLoad(LI, toErase); 1864 1865 if (!Changed) { 1866 unsigned Num = VN.lookup_or_add(LI); 1867 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1868 } 1869 1870 return Changed; 1871 } 1872 1873 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1874 unsigned Num = VN.lookup_or_add(I); 1875 1876 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1877 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1878 1879 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1880 return false; 1881 1882 Value *BranchCond = BI->getCondition(); 1883 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1884 1885 BasicBlock *TrueSucc = BI->getSuccessor(0); 1886 BasicBlock *FalseSucc = BI->getSuccessor(1); 1887 1888 if (TrueSucc->getSinglePredecessor()) 1889 localAvail[TrueSucc]->table[CondVN] = 1890 ConstantInt::getTrue(TrueSucc->getContext()); 1891 if (FalseSucc->getSinglePredecessor()) 1892 localAvail[FalseSucc]->table[CondVN] = 1893 ConstantInt::getFalse(TrueSucc->getContext()); 1894 1895 return false; 1896 1897 // Allocations are always uniquely numbered, so we can save time and memory 1898 // by fast failing them. 1899 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1900 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1901 return false; 1902 } 1903 1904 // Collapse PHI nodes 1905 if (PHINode* p = dyn_cast<PHINode>(I)) { 1906 Value *constVal = CollapsePhi(p); 1907 1908 if (constVal) { 1909 p->replaceAllUsesWith(constVal); 1910 if (MD && isa<PointerType>(constVal->getType())) 1911 MD->invalidateCachedPointerInfo(constVal); 1912 VN.erase(p); 1913 1914 toErase.push_back(p); 1915 } else { 1916 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1917 } 1918 1919 // If the number we were assigned was a brand new VN, then we don't 1920 // need to do a lookup to see if the number already exists 1921 // somewhere in the domtree: it can't! 1922 } else if (Num == NextNum) { 1923 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1924 1925 // Perform fast-path value-number based elimination of values inherited from 1926 // dominators. 1927 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1928 // Remove it! 1929 VN.erase(I); 1930 I->replaceAllUsesWith(repl); 1931 if (MD && isa<PointerType>(repl->getType())) 1932 MD->invalidateCachedPointerInfo(repl); 1933 toErase.push_back(I); 1934 return true; 1935 1936 } else { 1937 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1938 } 1939 1940 return false; 1941} 1942 1943/// runOnFunction - This is the main transformation entry point for a function. 1944bool GVN::runOnFunction(Function& F) { 1945 if (!NoLoads) 1946 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1947 DT = &getAnalysis<DominatorTree>(); 1948 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1949 VN.setMemDep(MD); 1950 VN.setDomTree(DT); 1951 1952 bool Changed = false; 1953 bool ShouldContinue = true; 1954 1955 // Merge unconditional branches, allowing PRE to catch more 1956 // optimization opportunities. 1957 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 1958 BasicBlock *BB = FI; 1959 ++FI; 1960 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 1961 if (removedBlock) NumGVNBlocks++; 1962 1963 Changed |= removedBlock; 1964 } 1965 1966 unsigned Iteration = 0; 1967 1968 while (ShouldContinue) { 1969 DEBUG(errs() << "GVN iteration: " << Iteration << "\n"); 1970 ShouldContinue = iterateOnFunction(F); 1971 Changed |= ShouldContinue; 1972 ++Iteration; 1973 } 1974 1975 if (EnablePRE) { 1976 bool PREChanged = true; 1977 while (PREChanged) { 1978 PREChanged = performPRE(F); 1979 Changed |= PREChanged; 1980 } 1981 } 1982 // FIXME: Should perform GVN again after PRE does something. PRE can move 1983 // computations into blocks where they become fully redundant. Note that 1984 // we can't do this until PRE's critical edge splitting updates memdep. 1985 // Actually, when this happens, we should just fully integrate PRE into GVN. 1986 1987 cleanupGlobalSets(); 1988 1989 return Changed; 1990} 1991 1992 1993bool GVN::processBlock(BasicBlock *BB) { 1994 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 1995 // incrementing BI before processing an instruction). 1996 SmallVector<Instruction*, 8> toErase; 1997 bool ChangedFunction = false; 1998 1999 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2000 BI != BE;) { 2001 ChangedFunction |= processInstruction(BI, toErase); 2002 if (toErase.empty()) { 2003 ++BI; 2004 continue; 2005 } 2006 2007 // If we need some instructions deleted, do it now. 2008 NumGVNInstr += toErase.size(); 2009 2010 // Avoid iterator invalidation. 2011 bool AtStart = BI == BB->begin(); 2012 if (!AtStart) 2013 --BI; 2014 2015 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2016 E = toErase.end(); I != E; ++I) { 2017 DEBUG(errs() << "GVN removed: " << **I << '\n'); 2018 if (MD) MD->removeInstruction(*I); 2019 (*I)->eraseFromParent(); 2020 DEBUG(verifyRemoved(*I)); 2021 } 2022 toErase.clear(); 2023 2024 if (AtStart) 2025 BI = BB->begin(); 2026 else 2027 ++BI; 2028 } 2029 2030 return ChangedFunction; 2031} 2032 2033/// performPRE - Perform a purely local form of PRE that looks for diamond 2034/// control flow patterns and attempts to perform simple PRE at the join point. 2035bool GVN::performPRE(Function &F) { 2036 bool Changed = false; 2037 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 2038 DenseMap<BasicBlock*, Value*> predMap; 2039 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2040 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2041 BasicBlock *CurrentBlock = *DI; 2042 2043 // Nothing to PRE in the entry block. 2044 if (CurrentBlock == &F.getEntryBlock()) continue; 2045 2046 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2047 BE = CurrentBlock->end(); BI != BE; ) { 2048 Instruction *CurInst = BI++; 2049 2050 if (isa<AllocaInst>(CurInst) || 2051 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2052 CurInst->getType()->isVoidTy() || 2053 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2054 isa<DbgInfoIntrinsic>(CurInst)) 2055 continue; 2056 2057 uint32_t ValNo = VN.lookup(CurInst); 2058 2059 // Look for the predecessors for PRE opportunities. We're 2060 // only trying to solve the basic diamond case, where 2061 // a value is computed in the successor and one predecessor, 2062 // but not the other. We also explicitly disallow cases 2063 // where the successor is its own predecessor, because they're 2064 // more complicated to get right. 2065 unsigned NumWith = 0; 2066 unsigned NumWithout = 0; 2067 BasicBlock *PREPred = 0; 2068 predMap.clear(); 2069 2070 for (pred_iterator PI = pred_begin(CurrentBlock), 2071 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2072 // We're not interested in PRE where the block is its 2073 // own predecessor, on in blocks with predecessors 2074 // that are not reachable. 2075 if (*PI == CurrentBlock) { 2076 NumWithout = 2; 2077 break; 2078 } else if (!localAvail.count(*PI)) { 2079 NumWithout = 2; 2080 break; 2081 } 2082 2083 DenseMap<uint32_t, Value*>::iterator predV = 2084 localAvail[*PI]->table.find(ValNo); 2085 if (predV == localAvail[*PI]->table.end()) { 2086 PREPred = *PI; 2087 NumWithout++; 2088 } else if (predV->second == CurInst) { 2089 NumWithout = 2; 2090 } else { 2091 predMap[*PI] = predV->second; 2092 NumWith++; 2093 } 2094 } 2095 2096 // Don't do PRE when it might increase code size, i.e. when 2097 // we would need to insert instructions in more than one pred. 2098 if (NumWithout != 1 || NumWith == 0) 2099 continue; 2100 2101 // Don't do PRE across indirect branch. 2102 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2103 continue; 2104 2105 // We can't do PRE safely on a critical edge, so instead we schedule 2106 // the edge to be split and perform the PRE the next time we iterate 2107 // on the function. 2108 unsigned SuccNum = 0; 2109 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors(); 2110 i != e; ++i) 2111 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) { 2112 SuccNum = i; 2113 break; 2114 } 2115 2116 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2117 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2118 continue; 2119 } 2120 2121 // Instantiate the expression the in predecessor that lacked it. 2122 // Because we are going top-down through the block, all value numbers 2123 // will be available in the predecessor by the time we need them. Any 2124 // that weren't original present will have been instantiated earlier 2125 // in this loop. 2126 Instruction *PREInstr = CurInst->clone(); 2127 bool success = true; 2128 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2129 Value *Op = PREInstr->getOperand(i); 2130 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2131 continue; 2132 2133 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2134 PREInstr->setOperand(i, V); 2135 } else { 2136 success = false; 2137 break; 2138 } 2139 } 2140 2141 // Fail out if we encounter an operand that is not available in 2142 // the PRE predecessor. This is typically because of loads which 2143 // are not value numbered precisely. 2144 if (!success) { 2145 delete PREInstr; 2146 DEBUG(verifyRemoved(PREInstr)); 2147 continue; 2148 } 2149 2150 PREInstr->insertBefore(PREPred->getTerminator()); 2151 PREInstr->setName(CurInst->getName() + ".pre"); 2152 predMap[PREPred] = PREInstr; 2153 VN.add(PREInstr, ValNo); 2154 NumGVNPRE++; 2155 2156 // Update the availability map to include the new instruction. 2157 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2158 2159 // Create a PHI to make the value available in this block. 2160 PHINode* Phi = PHINode::Create(CurInst->getType(), 2161 CurInst->getName() + ".pre-phi", 2162 CurrentBlock->begin()); 2163 for (pred_iterator PI = pred_begin(CurrentBlock), 2164 PE = pred_end(CurrentBlock); PI != PE; ++PI) 2165 Phi->addIncoming(predMap[*PI], *PI); 2166 2167 VN.add(Phi, ValNo); 2168 localAvail[CurrentBlock]->table[ValNo] = Phi; 2169 2170 CurInst->replaceAllUsesWith(Phi); 2171 if (MD && isa<PointerType>(Phi->getType())) 2172 MD->invalidateCachedPointerInfo(Phi); 2173 VN.erase(CurInst); 2174 2175 DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n'); 2176 if (MD) MD->removeInstruction(CurInst); 2177 CurInst->eraseFromParent(); 2178 DEBUG(verifyRemoved(CurInst)); 2179 Changed = true; 2180 } 2181 } 2182 2183 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator 2184 I = toSplit.begin(), E = toSplit.end(); I != E; ++I) 2185 SplitCriticalEdge(I->first, I->second, this); 2186 2187 return Changed || toSplit.size(); 2188} 2189 2190/// iterateOnFunction - Executes one iteration of GVN 2191bool GVN::iterateOnFunction(Function &F) { 2192 cleanupGlobalSets(); 2193 2194 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2195 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2196 if (DI->getIDom()) 2197 localAvail[DI->getBlock()] = 2198 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2199 else 2200 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2201 } 2202 2203 // Top-down walk of the dominator tree 2204 bool Changed = false; 2205#if 0 2206 // Needed for value numbering with phi construction to work. 2207 ReversePostOrderTraversal<Function*> RPOT(&F); 2208 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2209 RE = RPOT.end(); RI != RE; ++RI) 2210 Changed |= processBlock(*RI); 2211#else 2212 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2213 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2214 Changed |= processBlock(DI->getBlock()); 2215#endif 2216 2217 return Changed; 2218} 2219 2220void GVN::cleanupGlobalSets() { 2221 VN.clear(); 2222 2223 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2224 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2225 delete I->second; 2226 localAvail.clear(); 2227} 2228 2229/// verifyRemoved - Verify that the specified instruction does not occur in our 2230/// internal data structures. 2231void GVN::verifyRemoved(const Instruction *Inst) const { 2232 VN.verifyRemoved(Inst); 2233 2234 // Walk through the value number scope to make sure the instruction isn't 2235 // ferreted away in it. 2236 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2237 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2238 const ValueNumberScope *VNS = I->second; 2239 2240 while (VNS) { 2241 for (DenseMap<uint32_t, Value*>::const_iterator 2242 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2243 assert(II->second != Inst && "Inst still in value numbering scope!"); 2244 } 2245 2246 VNS = VNS->parent; 2247 } 2248 } 2249} 2250