GVN.cpp revision 4bbf4ee1491637c247e195e19e3e4a8ee5ad72fa
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/IntrinsicInst.h" 25#include "llvm/LLVMContext.h" 26#include "llvm/Operator.h" 27#include "llvm/Value.h" 28#include "llvm/ADT/DenseMap.h" 29#include "llvm/ADT/DepthFirstIterator.h" 30#include "llvm/ADT/PostOrderIterator.h" 31#include "llvm/ADT/SmallPtrSet.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/Analysis/AliasAnalysis.h" 35#include "llvm/Analysis/ConstantFolding.h" 36#include "llvm/Analysis/Dominators.h" 37#include "llvm/Analysis/MemoryBuiltins.h" 38#include "llvm/Analysis/MemoryDependenceAnalysis.h" 39#include "llvm/Analysis/PHITransAddr.h" 40#include "llvm/Support/CFG.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Support/IRBuilder.h" 46#include "llvm/Support/raw_ostream.h" 47#include "llvm/Target/TargetData.h" 48#include "llvm/Transforms/Utils/BasicBlockUtils.h" 49#include "llvm/Transforms/Utils/Local.h" 50#include "llvm/Transforms/Utils/SSAUpdater.h" 51#include <cstdio> 52using namespace llvm; 53 54STATISTIC(NumGVNInstr, "Number of instructions deleted"); 55STATISTIC(NumGVNLoad, "Number of loads deleted"); 56STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 57STATISTIC(NumGVNBlocks, "Number of blocks merged"); 58STATISTIC(NumPRELoad, "Number of loads PRE'd"); 59 60static cl::opt<bool> EnablePRE("enable-pre", 61 cl::init(true), cl::Hidden); 62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 63 64//===----------------------------------------------------------------------===// 65// ValueTable Class 66//===----------------------------------------------------------------------===// 67 68/// This class holds the mapping between values and value numbers. It is used 69/// as an efficient mechanism to determine the expression-wise equivalence of 70/// two values. 71namespace { 72 struct Expression { 73 enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL, 74 UDIV, SDIV, FDIV, UREM, SREM, 75 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ, 76 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 77 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 78 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 79 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 80 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 81 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI, 82 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT, 83 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT, 84 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 85 86 ExpressionOpcode opcode; 87 const Type* type; 88 SmallVector<uint32_t, 4> varargs; 89 Value *function; 90 91 Expression() { } 92 Expression(ExpressionOpcode o) : opcode(o) { } 93 94 bool operator==(const Expression &other) const { 95 if (opcode != other.opcode) 96 return false; 97 else if (opcode == EMPTY || opcode == TOMBSTONE) 98 return true; 99 else if (type != other.type) 100 return false; 101 else if (function != other.function) 102 return false; 103 else { 104 if (varargs.size() != other.varargs.size()) 105 return false; 106 107 for (size_t i = 0; i < varargs.size(); ++i) 108 if (varargs[i] != other.varargs[i]) 109 return false; 110 111 return true; 112 } 113 } 114 115 bool operator!=(const Expression &other) const { 116 return !(*this == other); 117 } 118 }; 119 120 class ValueTable { 121 private: 122 DenseMap<Value*, uint32_t> valueNumbering; 123 DenseMap<Expression, uint32_t> expressionNumbering; 124 AliasAnalysis* AA; 125 MemoryDependenceAnalysis* MD; 126 DominatorTree* DT; 127 128 uint32_t nextValueNumber; 129 130 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO); 131 Expression::ExpressionOpcode getOpcode(CmpInst* C); 132 Expression::ExpressionOpcode getOpcode(CastInst* C); 133 Expression create_expression(BinaryOperator* BO); 134 Expression create_expression(CmpInst* C); 135 Expression create_expression(ShuffleVectorInst* V); 136 Expression create_expression(ExtractElementInst* C); 137 Expression create_expression(InsertElementInst* V); 138 Expression create_expression(SelectInst* V); 139 Expression create_expression(CastInst* C); 140 Expression create_expression(GetElementPtrInst* G); 141 Expression create_expression(CallInst* C); 142 Expression create_expression(Constant* C); 143 Expression create_expression(ExtractValueInst* C); 144 Expression create_expression(InsertValueInst* C); 145 146 uint32_t lookup_or_add_call(CallInst* C); 147 public: 148 ValueTable() : nextValueNumber(1) { } 149 uint32_t lookup_or_add(Value *V); 150 uint32_t lookup(Value *V) const; 151 void add(Value *V, uint32_t num); 152 void clear(); 153 void erase(Value *v); 154 unsigned size(); 155 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 156 AliasAnalysis *getAliasAnalysis() const { return AA; } 157 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 158 void setDomTree(DominatorTree* D) { DT = D; } 159 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 160 void verifyRemoved(const Value *) const; 161 }; 162} 163 164namespace llvm { 165template <> struct DenseMapInfo<Expression> { 166 static inline Expression getEmptyKey() { 167 return Expression(Expression::EMPTY); 168 } 169 170 static inline Expression getTombstoneKey() { 171 return Expression(Expression::TOMBSTONE); 172 } 173 174 static unsigned getHashValue(const Expression e) { 175 unsigned hash = e.opcode; 176 177 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 178 (unsigned)((uintptr_t)e.type >> 9)); 179 180 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 181 E = e.varargs.end(); I != E; ++I) 182 hash = *I + hash * 37; 183 184 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 185 (unsigned)((uintptr_t)e.function >> 9)) + 186 hash * 37; 187 188 return hash; 189 } 190 static bool isEqual(const Expression &LHS, const Expression &RHS) { 191 return LHS == RHS; 192 } 193}; 194 195template <> 196struct isPodLike<Expression> { static const bool value = true; }; 197 198} 199 200//===----------------------------------------------------------------------===// 201// ValueTable Internal Functions 202//===----------------------------------------------------------------------===// 203Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) { 204 switch(BO->getOpcode()) { 205 default: // THIS SHOULD NEVER HAPPEN 206 llvm_unreachable("Binary operator with unknown opcode?"); 207 case Instruction::Add: return Expression::ADD; 208 case Instruction::FAdd: return Expression::FADD; 209 case Instruction::Sub: return Expression::SUB; 210 case Instruction::FSub: return Expression::FSUB; 211 case Instruction::Mul: return Expression::MUL; 212 case Instruction::FMul: return Expression::FMUL; 213 case Instruction::UDiv: return Expression::UDIV; 214 case Instruction::SDiv: return Expression::SDIV; 215 case Instruction::FDiv: return Expression::FDIV; 216 case Instruction::URem: return Expression::UREM; 217 case Instruction::SRem: return Expression::SREM; 218 case Instruction::FRem: return Expression::FREM; 219 case Instruction::Shl: return Expression::SHL; 220 case Instruction::LShr: return Expression::LSHR; 221 case Instruction::AShr: return Expression::ASHR; 222 case Instruction::And: return Expression::AND; 223 case Instruction::Or: return Expression::OR; 224 case Instruction::Xor: return Expression::XOR; 225 } 226} 227 228Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 229 if (isa<ICmpInst>(C)) { 230 switch (C->getPredicate()) { 231 default: // THIS SHOULD NEVER HAPPEN 232 llvm_unreachable("Comparison with unknown predicate?"); 233 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 234 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 235 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 236 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 237 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 238 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 239 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 240 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 241 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 242 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 243 } 244 } else { 245 switch (C->getPredicate()) { 246 default: // THIS SHOULD NEVER HAPPEN 247 llvm_unreachable("Comparison with unknown predicate?"); 248 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 249 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 250 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 251 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 252 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 253 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 254 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 255 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 256 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 257 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 258 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 259 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 260 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 261 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 262 } 263 } 264} 265 266Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) { 267 switch(C->getOpcode()) { 268 default: // THIS SHOULD NEVER HAPPEN 269 llvm_unreachable("Cast operator with unknown opcode?"); 270 case Instruction::Trunc: return Expression::TRUNC; 271 case Instruction::ZExt: return Expression::ZEXT; 272 case Instruction::SExt: return Expression::SEXT; 273 case Instruction::FPToUI: return Expression::FPTOUI; 274 case Instruction::FPToSI: return Expression::FPTOSI; 275 case Instruction::UIToFP: return Expression::UITOFP; 276 case Instruction::SIToFP: return Expression::SITOFP; 277 case Instruction::FPTrunc: return Expression::FPTRUNC; 278 case Instruction::FPExt: return Expression::FPEXT; 279 case Instruction::PtrToInt: return Expression::PTRTOINT; 280 case Instruction::IntToPtr: return Expression::INTTOPTR; 281 case Instruction::BitCast: return Expression::BITCAST; 282 } 283} 284 285Expression ValueTable::create_expression(CallInst* C) { 286 Expression e; 287 288 e.type = C->getType(); 289 e.function = C->getCalledFunction(); 290 e.opcode = Expression::CALL; 291 292 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); 293 I != E; ++I) 294 e.varargs.push_back(lookup_or_add(*I)); 295 296 return e; 297} 298 299Expression ValueTable::create_expression(BinaryOperator* BO) { 300 Expression e; 301 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 302 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 303 e.function = 0; 304 e.type = BO->getType(); 305 e.opcode = getOpcode(BO); 306 307 return e; 308} 309 310Expression ValueTable::create_expression(CmpInst* C) { 311 Expression e; 312 313 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 314 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 315 e.function = 0; 316 e.type = C->getType(); 317 e.opcode = getOpcode(C); 318 319 return e; 320} 321 322Expression ValueTable::create_expression(CastInst* C) { 323 Expression e; 324 325 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 326 e.function = 0; 327 e.type = C->getType(); 328 e.opcode = getOpcode(C); 329 330 return e; 331} 332 333Expression ValueTable::create_expression(ShuffleVectorInst* S) { 334 Expression e; 335 336 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 337 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 338 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 339 e.function = 0; 340 e.type = S->getType(); 341 e.opcode = Expression::SHUFFLE; 342 343 return e; 344} 345 346Expression ValueTable::create_expression(ExtractElementInst* E) { 347 Expression e; 348 349 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 350 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 351 e.function = 0; 352 e.type = E->getType(); 353 e.opcode = Expression::EXTRACT; 354 355 return e; 356} 357 358Expression ValueTable::create_expression(InsertElementInst* I) { 359 Expression e; 360 361 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 362 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 363 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 364 e.function = 0; 365 e.type = I->getType(); 366 e.opcode = Expression::INSERT; 367 368 return e; 369} 370 371Expression ValueTable::create_expression(SelectInst* I) { 372 Expression e; 373 374 e.varargs.push_back(lookup_or_add(I->getCondition())); 375 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 376 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 377 e.function = 0; 378 e.type = I->getType(); 379 e.opcode = Expression::SELECT; 380 381 return e; 382} 383 384Expression ValueTable::create_expression(GetElementPtrInst* G) { 385 Expression e; 386 387 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 388 e.function = 0; 389 e.type = G->getType(); 390 e.opcode = Expression::GEP; 391 392 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 393 I != E; ++I) 394 e.varargs.push_back(lookup_or_add(*I)); 395 396 return e; 397} 398 399Expression ValueTable::create_expression(ExtractValueInst* E) { 400 Expression e; 401 402 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 403 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 404 II != IE; ++II) 405 e.varargs.push_back(*II); 406 e.function = 0; 407 e.type = E->getType(); 408 e.opcode = Expression::EXTRACTVALUE; 409 410 return e; 411} 412 413Expression ValueTable::create_expression(InsertValueInst* E) { 414 Expression e; 415 416 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 417 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 418 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 419 II != IE; ++II) 420 e.varargs.push_back(*II); 421 e.function = 0; 422 e.type = E->getType(); 423 e.opcode = Expression::INSERTVALUE; 424 425 return e; 426} 427 428//===----------------------------------------------------------------------===// 429// ValueTable External Functions 430//===----------------------------------------------------------------------===// 431 432/// add - Insert a value into the table with a specified value number. 433void ValueTable::add(Value *V, uint32_t num) { 434 valueNumbering.insert(std::make_pair(V, num)); 435} 436 437uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 438 if (AA->doesNotAccessMemory(C)) { 439 Expression exp = create_expression(C); 440 uint32_t& e = expressionNumbering[exp]; 441 if (!e) e = nextValueNumber++; 442 valueNumbering[C] = e; 443 return e; 444 } else if (AA->onlyReadsMemory(C)) { 445 Expression exp = create_expression(C); 446 uint32_t& e = expressionNumbering[exp]; 447 if (!e) { 448 e = nextValueNumber++; 449 valueNumbering[C] = e; 450 return e; 451 } 452 if (!MD) { 453 e = nextValueNumber++; 454 valueNumbering[C] = e; 455 return e; 456 } 457 458 MemDepResult local_dep = MD->getDependency(C); 459 460 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 461 valueNumbering[C] = nextValueNumber; 462 return nextValueNumber++; 463 } 464 465 if (local_dep.isDef()) { 466 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 467 468 if (local_cdep->getNumOperands() != C->getNumOperands()) { 469 valueNumbering[C] = nextValueNumber; 470 return nextValueNumber++; 471 } 472 473 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 474 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 475 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i)); 476 if (c_vn != cd_vn) { 477 valueNumbering[C] = nextValueNumber; 478 return nextValueNumber++; 479 } 480 } 481 482 uint32_t v = lookup_or_add(local_cdep); 483 valueNumbering[C] = v; 484 return v; 485 } 486 487 // Non-local case. 488 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 489 MD->getNonLocalCallDependency(CallSite(C)); 490 // FIXME: call/call dependencies for readonly calls should return def, not 491 // clobber! Move the checking logic to MemDep! 492 CallInst* cdep = 0; 493 494 // Check to see if we have a single dominating call instruction that is 495 // identical to C. 496 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 497 const NonLocalDepEntry *I = &deps[i]; 498 // Ignore non-local dependencies. 499 if (I->getResult().isNonLocal()) 500 continue; 501 502 // We don't handle non-depedencies. If we already have a call, reject 503 // instruction dependencies. 504 if (I->getResult().isClobber() || cdep != 0) { 505 cdep = 0; 506 break; 507 } 508 509 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 510 // FIXME: All duplicated with non-local case. 511 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 512 cdep = NonLocalDepCall; 513 continue; 514 } 515 516 cdep = 0; 517 break; 518 } 519 520 if (!cdep) { 521 valueNumbering[C] = nextValueNumber; 522 return nextValueNumber++; 523 } 524 525 if (cdep->getNumOperands() != C->getNumOperands()) { 526 valueNumbering[C] = nextValueNumber; 527 return nextValueNumber++; 528 } 529 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 530 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 531 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i)); 532 if (c_vn != cd_vn) { 533 valueNumbering[C] = nextValueNumber; 534 return nextValueNumber++; 535 } 536 } 537 538 uint32_t v = lookup_or_add(cdep); 539 valueNumbering[C] = v; 540 return v; 541 542 } else { 543 valueNumbering[C] = nextValueNumber; 544 return nextValueNumber++; 545 } 546} 547 548/// lookup_or_add - Returns the value number for the specified value, assigning 549/// it a new number if it did not have one before. 550uint32_t ValueTable::lookup_or_add(Value *V) { 551 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 552 if (VI != valueNumbering.end()) 553 return VI->second; 554 555 if (!isa<Instruction>(V)) { 556 valueNumbering[V] = nextValueNumber; 557 return nextValueNumber++; 558 } 559 560 Instruction* I = cast<Instruction>(V); 561 Expression exp; 562 switch (I->getOpcode()) { 563 case Instruction::Call: 564 return lookup_or_add_call(cast<CallInst>(I)); 565 case Instruction::Add: 566 case Instruction::FAdd: 567 case Instruction::Sub: 568 case Instruction::FSub: 569 case Instruction::Mul: 570 case Instruction::FMul: 571 case Instruction::UDiv: 572 case Instruction::SDiv: 573 case Instruction::FDiv: 574 case Instruction::URem: 575 case Instruction::SRem: 576 case Instruction::FRem: 577 case Instruction::Shl: 578 case Instruction::LShr: 579 case Instruction::AShr: 580 case Instruction::And: 581 case Instruction::Or : 582 case Instruction::Xor: 583 exp = create_expression(cast<BinaryOperator>(I)); 584 break; 585 case Instruction::ICmp: 586 case Instruction::FCmp: 587 exp = create_expression(cast<CmpInst>(I)); 588 break; 589 case Instruction::Trunc: 590 case Instruction::ZExt: 591 case Instruction::SExt: 592 case Instruction::FPToUI: 593 case Instruction::FPToSI: 594 case Instruction::UIToFP: 595 case Instruction::SIToFP: 596 case Instruction::FPTrunc: 597 case Instruction::FPExt: 598 case Instruction::PtrToInt: 599 case Instruction::IntToPtr: 600 case Instruction::BitCast: 601 exp = create_expression(cast<CastInst>(I)); 602 break; 603 case Instruction::Select: 604 exp = create_expression(cast<SelectInst>(I)); 605 break; 606 case Instruction::ExtractElement: 607 exp = create_expression(cast<ExtractElementInst>(I)); 608 break; 609 case Instruction::InsertElement: 610 exp = create_expression(cast<InsertElementInst>(I)); 611 break; 612 case Instruction::ShuffleVector: 613 exp = create_expression(cast<ShuffleVectorInst>(I)); 614 break; 615 case Instruction::ExtractValue: 616 exp = create_expression(cast<ExtractValueInst>(I)); 617 break; 618 case Instruction::InsertValue: 619 exp = create_expression(cast<InsertValueInst>(I)); 620 break; 621 case Instruction::GetElementPtr: 622 exp = create_expression(cast<GetElementPtrInst>(I)); 623 break; 624 default: 625 valueNumbering[V] = nextValueNumber; 626 return nextValueNumber++; 627 } 628 629 uint32_t& e = expressionNumbering[exp]; 630 if (!e) e = nextValueNumber++; 631 valueNumbering[V] = e; 632 return e; 633} 634 635/// lookup - Returns the value number of the specified value. Fails if 636/// the value has not yet been numbered. 637uint32_t ValueTable::lookup(Value *V) const { 638 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 639 assert(VI != valueNumbering.end() && "Value not numbered?"); 640 return VI->second; 641} 642 643/// clear - Remove all entries from the ValueTable 644void ValueTable::clear() { 645 valueNumbering.clear(); 646 expressionNumbering.clear(); 647 nextValueNumber = 1; 648} 649 650/// erase - Remove a value from the value numbering 651void ValueTable::erase(Value *V) { 652 valueNumbering.erase(V); 653} 654 655/// verifyRemoved - Verify that the value is removed from all internal data 656/// structures. 657void ValueTable::verifyRemoved(const Value *V) const { 658 for (DenseMap<Value*, uint32_t>::const_iterator 659 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 660 assert(I->first != V && "Inst still occurs in value numbering map!"); 661 } 662} 663 664//===----------------------------------------------------------------------===// 665// GVN Pass 666//===----------------------------------------------------------------------===// 667 668namespace { 669 struct ValueNumberScope { 670 ValueNumberScope* parent; 671 DenseMap<uint32_t, Value*> table; 672 673 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 674 }; 675} 676 677namespace { 678 679 class GVN : public FunctionPass { 680 bool runOnFunction(Function &F); 681 public: 682 static char ID; // Pass identification, replacement for typeid 683 explicit GVN(bool nopre = false, bool noloads = false) 684 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { } 685 686 private: 687 bool NoPRE; 688 bool NoLoads; 689 MemoryDependenceAnalysis *MD; 690 DominatorTree *DT; 691 692 ValueTable VN; 693 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 694 695 // This transformation requires dominator postdominator info 696 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 697 AU.addRequired<DominatorTree>(); 698 if (!NoLoads) 699 AU.addRequired<MemoryDependenceAnalysis>(); 700 AU.addRequired<AliasAnalysis>(); 701 702 AU.addPreserved<DominatorTree>(); 703 AU.addPreserved<AliasAnalysis>(); 704 } 705 706 // Helper fuctions 707 // FIXME: eliminate or document these better 708 bool processLoad(LoadInst* L, 709 SmallVectorImpl<Instruction*> &toErase); 710 bool processInstruction(Instruction *I, 711 SmallVectorImpl<Instruction*> &toErase); 712 bool processNonLocalLoad(LoadInst* L, 713 SmallVectorImpl<Instruction*> &toErase); 714 bool processBlock(BasicBlock *BB); 715 void dump(DenseMap<uint32_t, Value*>& d); 716 bool iterateOnFunction(Function &F); 717 Value *CollapsePhi(PHINode* p); 718 bool performPRE(Function& F); 719 Value *lookupNumber(BasicBlock *BB, uint32_t num); 720 void cleanupGlobalSets(); 721 void verifyRemoved(const Instruction *I) const; 722 }; 723 724 char GVN::ID = 0; 725} 726 727// createGVNPass - The public interface to this file... 728FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) { 729 return new GVN(NoPRE, NoLoads); 730} 731 732static RegisterPass<GVN> X("gvn", 733 "Global Value Numbering"); 734 735void GVN::dump(DenseMap<uint32_t, Value*>& d) { 736 printf("{\n"); 737 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 738 E = d.end(); I != E; ++I) { 739 printf("%d\n", I->first); 740 I->second->dump(); 741 } 742 printf("}\n"); 743} 744 745static bool isSafeReplacement(PHINode* p, Instruction *inst) { 746 if (!isa<PHINode>(inst)) 747 return true; 748 749 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 750 UI != E; ++UI) 751 if (PHINode* use_phi = dyn_cast<PHINode>(UI)) 752 if (use_phi->getParent() == inst->getParent()) 753 return false; 754 755 return true; 756} 757 758Value *GVN::CollapsePhi(PHINode *PN) { 759 Value *ConstVal = PN->hasConstantValue(DT); 760 if (!ConstVal) return 0; 761 762 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 763 if (!Inst) 764 return ConstVal; 765 766 if (DT->dominates(Inst, PN)) 767 if (isSafeReplacement(PN, Inst)) 768 return Inst; 769 return 0; 770} 771 772/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 773/// we're analyzing is fully available in the specified block. As we go, keep 774/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 775/// map is actually a tri-state map with the following values: 776/// 0) we know the block *is not* fully available. 777/// 1) we know the block *is* fully available. 778/// 2) we do not know whether the block is fully available or not, but we are 779/// currently speculating that it will be. 780/// 3) we are speculating for this block and have used that to speculate for 781/// other blocks. 782static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 783 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 784 // Optimistically assume that the block is fully available and check to see 785 // if we already know about this block in one lookup. 786 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 787 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 788 789 // If the entry already existed for this block, return the precomputed value. 790 if (!IV.second) { 791 // If this is a speculative "available" value, mark it as being used for 792 // speculation of other blocks. 793 if (IV.first->second == 2) 794 IV.first->second = 3; 795 return IV.first->second != 0; 796 } 797 798 // Otherwise, see if it is fully available in all predecessors. 799 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 800 801 // If this block has no predecessors, it isn't live-in here. 802 if (PI == PE) 803 goto SpeculationFailure; 804 805 for (; PI != PE; ++PI) 806 // If the value isn't fully available in one of our predecessors, then it 807 // isn't fully available in this block either. Undo our previous 808 // optimistic assumption and bail out. 809 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 810 goto SpeculationFailure; 811 812 return true; 813 814// SpeculationFailure - If we get here, we found out that this is not, after 815// all, a fully-available block. We have a problem if we speculated on this and 816// used the speculation to mark other blocks as available. 817SpeculationFailure: 818 char &BBVal = FullyAvailableBlocks[BB]; 819 820 // If we didn't speculate on this, just return with it set to false. 821 if (BBVal == 2) { 822 BBVal = 0; 823 return false; 824 } 825 826 // If we did speculate on this value, we could have blocks set to 1 that are 827 // incorrect. Walk the (transitive) successors of this block and mark them as 828 // 0 if set to one. 829 SmallVector<BasicBlock*, 32> BBWorklist; 830 BBWorklist.push_back(BB); 831 832 while (!BBWorklist.empty()) { 833 BasicBlock *Entry = BBWorklist.pop_back_val(); 834 // Note that this sets blocks to 0 (unavailable) if they happen to not 835 // already be in FullyAvailableBlocks. This is safe. 836 char &EntryVal = FullyAvailableBlocks[Entry]; 837 if (EntryVal == 0) continue; // Already unavailable. 838 839 // Mark as unavailable. 840 EntryVal = 0; 841 842 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 843 BBWorklist.push_back(*I); 844 } 845 846 return false; 847} 848 849 850/// CanCoerceMustAliasedValueToLoad - Return true if 851/// CoerceAvailableValueToLoadType will succeed. 852static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 853 const Type *LoadTy, 854 const TargetData &TD) { 855 // If the loaded or stored value is an first class array or struct, don't try 856 // to transform them. We need to be able to bitcast to integer. 857 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) || 858 isa<StructType>(StoredVal->getType()) || 859 isa<ArrayType>(StoredVal->getType())) 860 return false; 861 862 // The store has to be at least as big as the load. 863 if (TD.getTypeSizeInBits(StoredVal->getType()) < 864 TD.getTypeSizeInBits(LoadTy)) 865 return false; 866 867 return true; 868} 869 870 871/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 872/// then a load from a must-aliased pointer of a different type, try to coerce 873/// the stored value. LoadedTy is the type of the load we want to replace and 874/// InsertPt is the place to insert new instructions. 875/// 876/// If we can't do it, return null. 877static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 878 const Type *LoadedTy, 879 Instruction *InsertPt, 880 const TargetData &TD) { 881 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 882 return 0; 883 884 const Type *StoredValTy = StoredVal->getType(); 885 886 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); 887 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 888 889 // If the store and reload are the same size, we can always reuse it. 890 if (StoreSize == LoadSize) { 891 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) { 892 // Pointer to Pointer -> use bitcast. 893 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 894 } 895 896 // Convert source pointers to integers, which can be bitcast. 897 if (isa<PointerType>(StoredValTy)) { 898 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 899 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 900 } 901 902 const Type *TypeToCastTo = LoadedTy; 903 if (isa<PointerType>(TypeToCastTo)) 904 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 905 906 if (StoredValTy != TypeToCastTo) 907 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 908 909 // Cast to pointer if the load needs a pointer type. 910 if (isa<PointerType>(LoadedTy)) 911 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 912 913 return StoredVal; 914 } 915 916 // If the loaded value is smaller than the available value, then we can 917 // extract out a piece from it. If the available value is too small, then we 918 // can't do anything. 919 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 920 921 // Convert source pointers to integers, which can be manipulated. 922 if (isa<PointerType>(StoredValTy)) { 923 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 924 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 925 } 926 927 // Convert vectors and fp to integer, which can be manipulated. 928 if (!isa<IntegerType>(StoredValTy)) { 929 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 930 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 931 } 932 933 // If this is a big-endian system, we need to shift the value down to the low 934 // bits so that a truncate will work. 935 if (TD.isBigEndian()) { 936 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 937 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 938 } 939 940 // Truncate the integer to the right size now. 941 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 942 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 943 944 if (LoadedTy == NewIntTy) 945 return StoredVal; 946 947 // If the result is a pointer, inttoptr. 948 if (isa<PointerType>(LoadedTy)) 949 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 950 951 // Otherwise, bitcast. 952 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 953} 954 955/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 956/// be expressed as a base pointer plus a constant offset. Return the base and 957/// offset to the caller. 958static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 959 const TargetData &TD) { 960 Operator *PtrOp = dyn_cast<Operator>(Ptr); 961 if (PtrOp == 0) return Ptr; 962 963 // Just look through bitcasts. 964 if (PtrOp->getOpcode() == Instruction::BitCast) 965 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 966 967 // If this is a GEP with constant indices, we can look through it. 968 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 969 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 970 971 gep_type_iterator GTI = gep_type_begin(GEP); 972 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 973 ++I, ++GTI) { 974 ConstantInt *OpC = cast<ConstantInt>(*I); 975 if (OpC->isZero()) continue; 976 977 // Handle a struct and array indices which add their offset to the pointer. 978 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 979 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 980 } else { 981 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 982 Offset += OpC->getSExtValue()*Size; 983 } 984 } 985 986 // Re-sign extend from the pointer size if needed to get overflow edge cases 987 // right. 988 unsigned PtrSize = TD.getPointerSizeInBits(); 989 if (PtrSize < 64) 990 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 991 992 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 993} 994 995 996/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 997/// memdep query of a load that ends up being a clobbering memory write (store, 998/// memset, memcpy, memmove). This means that the write *may* provide bits used 999/// by the load but we can't be sure because the pointers don't mustalias. 1000/// 1001/// Check this case to see if there is anything more we can do before we give 1002/// up. This returns -1 if we have to give up, or a byte number in the stored 1003/// value of the piece that feeds the load. 1004static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr, 1005 Value *WritePtr, 1006 uint64_t WriteSizeInBits, 1007 const TargetData &TD) { 1008 // If the loaded or stored value is an first class array or struct, don't try 1009 // to transform them. We need to be able to bitcast to integer. 1010 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy)) 1011 return -1; 1012 1013 int64_t StoreOffset = 0, LoadOffset = 0; 1014 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1015 Value *LoadBase = 1016 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD); 1017 if (StoreBase != LoadBase) 1018 return -1; 1019 1020 // If the load and store are to the exact same address, they should have been 1021 // a must alias. AA must have gotten confused. 1022 // FIXME: Study to see if/when this happens. 1023 if (LoadOffset == StoreOffset) { 1024#if 0 1025 errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1026 << "Base = " << *StoreBase << "\n" 1027 << "Store Ptr = " << *WritePtr << "\n" 1028 << "Store Offs = " << StoreOffset << "\n" 1029 << "Load Ptr = " << *LoadPtr << "\n"; 1030 abort(); 1031#endif 1032 return -1; 1033 } 1034 1035 // If the load and store don't overlap at all, the store doesn't provide 1036 // anything to the load. In this case, they really don't alias at all, AA 1037 // must have gotten confused. 1038 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1039 // remove this check, as it is duplicated with what we have below. 1040 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); 1041 1042 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1043 return -1; 1044 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1045 LoadSize >>= 3; 1046 1047 1048 bool isAAFailure = false; 1049 if (StoreOffset < LoadOffset) { 1050 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1051 } else { 1052 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1053 } 1054 if (isAAFailure) { 1055#if 0 1056 errs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1057 << "Base = " << *StoreBase << "\n" 1058 << "Store Ptr = " << *WritePtr << "\n" 1059 << "Store Offs = " << StoreOffset << "\n" 1060 << "Load Ptr = " << *LoadPtr << "\n"; 1061 abort(); 1062#endif 1063 return -1; 1064 } 1065 1066 // If the Load isn't completely contained within the stored bits, we don't 1067 // have all the bits to feed it. We could do something crazy in the future 1068 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1069 // valuable. 1070 if (StoreOffset > LoadOffset || 1071 StoreOffset+StoreSize < LoadOffset+LoadSize) 1072 return -1; 1073 1074 // Okay, we can do this transformation. Return the number of bytes into the 1075 // store that the load is. 1076 return LoadOffset-StoreOffset; 1077} 1078 1079/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1080/// memdep query of a load that ends up being a clobbering store. 1081static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr, 1082 StoreInst *DepSI, 1083 const TargetData &TD) { 1084 // Cannot handle reading from store of first-class aggregate yet. 1085 if (isa<StructType>(DepSI->getOperand(0)->getType()) || 1086 isa<ArrayType>(DepSI->getOperand(0)->getType())) 1087 return -1; 1088 1089 Value *StorePtr = DepSI->getPointerOperand(); 1090 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1091 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1092 StorePtr, StoreSize, TD); 1093} 1094 1095static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr, 1096 MemIntrinsic *MI, 1097 const TargetData &TD) { 1098 // If the mem operation is a non-constant size, we can't handle it. 1099 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1100 if (SizeCst == 0) return -1; 1101 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1102 1103 // If this is memset, we just need to see if the offset is valid in the size 1104 // of the memset.. 1105 if (MI->getIntrinsicID() == Intrinsic::memset) 1106 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1107 MemSizeInBits, TD); 1108 1109 // If we have a memcpy/memmove, the only case we can handle is if this is a 1110 // copy from constant memory. In that case, we can read directly from the 1111 // constant memory. 1112 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1113 1114 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1115 if (Src == 0) return -1; 1116 1117 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1118 if (GV == 0 || !GV->isConstant()) return -1; 1119 1120 // See if the access is within the bounds of the transfer. 1121 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1122 MI->getDest(), MemSizeInBits, TD); 1123 if (Offset == -1) 1124 return Offset; 1125 1126 // Otherwise, see if we can constant fold a load from the constant with the 1127 // offset applied as appropriate. 1128 Src = ConstantExpr::getBitCast(Src, 1129 llvm::Type::getInt8PtrTy(Src->getContext())); 1130 Constant *OffsetCst = 1131 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1132 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1133 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1134 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1135 return Offset; 1136 return -1; 1137} 1138 1139 1140/// GetStoreValueForLoad - This function is called when we have a 1141/// memdep query of a load that ends up being a clobbering store. This means 1142/// that the store *may* provide bits used by the load but we can't be sure 1143/// because the pointers don't mustalias. Check this case to see if there is 1144/// anything more we can do before we give up. 1145static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1146 const Type *LoadTy, 1147 Instruction *InsertPt, const TargetData &TD){ 1148 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1149 1150 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8; 1151 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1152 1153 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1154 1155 // Compute which bits of the stored value are being used by the load. Convert 1156 // to an integer type to start with. 1157 if (isa<PointerType>(SrcVal->getType())) 1158 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp"); 1159 if (!isa<IntegerType>(SrcVal->getType())) 1160 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1161 "tmp"); 1162 1163 // Shift the bits to the least significant depending on endianness. 1164 unsigned ShiftAmt; 1165 if (TD.isLittleEndian()) 1166 ShiftAmt = Offset*8; 1167 else 1168 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1169 1170 if (ShiftAmt) 1171 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp"); 1172 1173 if (LoadSize != StoreSize) 1174 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1175 "tmp"); 1176 1177 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1178} 1179 1180/// GetMemInstValueForLoad - This function is called when we have a 1181/// memdep query of a load that ends up being a clobbering mem intrinsic. 1182static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1183 const Type *LoadTy, Instruction *InsertPt, 1184 const TargetData &TD){ 1185 LLVMContext &Ctx = LoadTy->getContext(); 1186 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1187 1188 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1189 1190 // We know that this method is only called when the mem transfer fully 1191 // provides the bits for the load. 1192 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1193 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1194 // independently of what the offset is. 1195 Value *Val = MSI->getValue(); 1196 if (LoadSize != 1) 1197 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1198 1199 Value *OneElt = Val; 1200 1201 // Splat the value out to the right number of bits. 1202 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1203 // If we can double the number of bytes set, do it. 1204 if (NumBytesSet*2 <= LoadSize) { 1205 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1206 Val = Builder.CreateOr(Val, ShVal); 1207 NumBytesSet <<= 1; 1208 continue; 1209 } 1210 1211 // Otherwise insert one byte at a time. 1212 Value *ShVal = Builder.CreateShl(Val, 1*8); 1213 Val = Builder.CreateOr(OneElt, ShVal); 1214 ++NumBytesSet; 1215 } 1216 1217 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1218 } 1219 1220 // Otherwise, this is a memcpy/memmove from a constant global. 1221 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1222 Constant *Src = cast<Constant>(MTI->getSource()); 1223 1224 // Otherwise, see if we can constant fold a load from the constant with the 1225 // offset applied as appropriate. 1226 Src = ConstantExpr::getBitCast(Src, 1227 llvm::Type::getInt8PtrTy(Src->getContext())); 1228 Constant *OffsetCst = 1229 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1230 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1231 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1232 return ConstantFoldLoadFromConstPtr(Src, &TD); 1233} 1234 1235 1236 1237struct AvailableValueInBlock { 1238 /// BB - The basic block in question. 1239 BasicBlock *BB; 1240 enum ValType { 1241 SimpleVal, // A simple offsetted value that is accessed. 1242 MemIntrin // A memory intrinsic which is loaded from. 1243 }; 1244 1245 /// V - The value that is live out of the block. 1246 PointerIntPair<Value *, 1, ValType> Val; 1247 1248 /// Offset - The byte offset in Val that is interesting for the load query. 1249 unsigned Offset; 1250 1251 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1252 unsigned Offset = 0) { 1253 AvailableValueInBlock Res; 1254 Res.BB = BB; 1255 Res.Val.setPointer(V); 1256 Res.Val.setInt(SimpleVal); 1257 Res.Offset = Offset; 1258 return Res; 1259 } 1260 1261 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1262 unsigned Offset = 0) { 1263 AvailableValueInBlock Res; 1264 Res.BB = BB; 1265 Res.Val.setPointer(MI); 1266 Res.Val.setInt(MemIntrin); 1267 Res.Offset = Offset; 1268 return Res; 1269 } 1270 1271 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1272 Value *getSimpleValue() const { 1273 assert(isSimpleValue() && "Wrong accessor"); 1274 return Val.getPointer(); 1275 } 1276 1277 MemIntrinsic *getMemIntrinValue() const { 1278 assert(!isSimpleValue() && "Wrong accessor"); 1279 return cast<MemIntrinsic>(Val.getPointer()); 1280 } 1281}; 1282 1283/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1284/// construct SSA form, allowing us to eliminate LI. This returns the value 1285/// that should be used at LI's definition site. 1286static Value *ConstructSSAForLoadSet(LoadInst *LI, 1287 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1288 const TargetData *TD, 1289 AliasAnalysis *AA) { 1290 SmallVector<PHINode*, 8> NewPHIs; 1291 SSAUpdater SSAUpdate(&NewPHIs); 1292 SSAUpdate.Initialize(LI); 1293 1294 const Type *LoadTy = LI->getType(); 1295 1296 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1297 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1298 BasicBlock *BB = AV.BB; 1299 1300 if (SSAUpdate.HasValueForBlock(BB)) 1301 continue; 1302 1303 unsigned Offset = AV.Offset; 1304 1305 Value *AvailableVal; 1306 if (AV.isSimpleValue()) { 1307 AvailableVal = AV.getSimpleValue(); 1308 if (AvailableVal->getType() != LoadTy) { 1309 assert(TD && "Need target data to handle type mismatch case"); 1310 AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy, 1311 BB->getTerminator(), *TD); 1312 1313 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1314 << *AV.getSimpleValue() << '\n' 1315 << *AvailableVal << '\n' << "\n\n\n"); 1316 } 1317 } else { 1318 AvailableVal = GetMemInstValueForLoad(AV.getMemIntrinValue(), Offset, 1319 LoadTy, BB->getTerminator(), *TD); 1320 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1321 << " " << *AV.getMemIntrinValue() << '\n' 1322 << *AvailableVal << '\n' << "\n\n\n"); 1323 } 1324 SSAUpdate.AddAvailableValue(BB, AvailableVal); 1325 } 1326 1327 // Perform PHI construction. 1328 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1329 1330 // If new PHI nodes were created, notify alias analysis. 1331 if (isa<PointerType>(V->getType())) 1332 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1333 AA->copyValue(LI, NewPHIs[i]); 1334 1335 return V; 1336} 1337 1338static bool isLifetimeStart(Instruction *Inst) { 1339 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1340 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1341 return false; 1342} 1343 1344/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1345/// non-local by performing PHI construction. 1346bool GVN::processNonLocalLoad(LoadInst *LI, 1347 SmallVectorImpl<Instruction*> &toErase) { 1348 // Find the non-local dependencies of the load. 1349 SmallVector<NonLocalDepEntry, 64> Deps; 1350 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1351 Deps); 1352 //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: " 1353 // << Deps.size() << *LI << '\n'); 1354 1355 // If we had to process more than one hundred blocks to find the 1356 // dependencies, this load isn't worth worrying about. Optimizing 1357 // it will be too expensive. 1358 if (Deps.size() > 100) 1359 return false; 1360 1361 // If we had a phi translation failure, we'll have a single entry which is a 1362 // clobber in the current block. Reject this early. 1363 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) { 1364 DEBUG( 1365 errs() << "GVN: non-local load "; 1366 WriteAsOperand(errs(), LI); 1367 errs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n'; 1368 ); 1369 return false; 1370 } 1371 1372 // Filter out useless results (non-locals, etc). Keep track of the blocks 1373 // where we have a value available in repl, also keep track of whether we see 1374 // dependencies that produce an unknown value for the load (such as a call 1375 // that could potentially clobber the load). 1376 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1377 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1378 1379 const TargetData *TD = 0; 1380 1381 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1382 BasicBlock *DepBB = Deps[i].getBB(); 1383 MemDepResult DepInfo = Deps[i].getResult(); 1384 1385 if (DepInfo.isClobber()) { 1386 // The address being loaded in this non-local block may not be the same as 1387 // the pointer operand of the load if PHI translation occurs. Make sure 1388 // to consider the right address. 1389 Value *Address = Deps[i].getAddress(); 1390 1391 // If the dependence is to a store that writes to a superset of the bits 1392 // read by the load, we can extract the bits we need for the load from the 1393 // stored value. 1394 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1395 if (TD == 0) 1396 TD = getAnalysisIfAvailable<TargetData>(); 1397 if (TD && Address) { 1398 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, 1399 DepSI, *TD); 1400 if (Offset != -1) { 1401 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1402 DepSI->getOperand(0), 1403 Offset)); 1404 continue; 1405 } 1406 } 1407 } 1408 1409 // If the clobbering value is a memset/memcpy/memmove, see if we can 1410 // forward a value on from it. 1411 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1412 if (TD == 0) 1413 TD = getAnalysisIfAvailable<TargetData>(); 1414 if (TD && Address) { 1415 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1416 DepMI, *TD); 1417 if (Offset != -1) { 1418 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1419 Offset)); 1420 continue; 1421 } 1422 } 1423 } 1424 1425 UnavailableBlocks.push_back(DepBB); 1426 continue; 1427 } 1428 1429 Instruction *DepInst = DepInfo.getInst(); 1430 1431 // Loading the allocation -> undef. 1432 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1433 // Loading immediately after lifetime begin -> undef. 1434 isLifetimeStart(DepInst)) { 1435 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1436 UndefValue::get(LI->getType()))); 1437 continue; 1438 } 1439 1440 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1441 // Reject loads and stores that are to the same address but are of 1442 // different types if we have to. 1443 if (S->getOperand(0)->getType() != LI->getType()) { 1444 if (TD == 0) 1445 TD = getAnalysisIfAvailable<TargetData>(); 1446 1447 // If the stored value is larger or equal to the loaded value, we can 1448 // reuse it. 1449 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1450 LI->getType(), *TD)) { 1451 UnavailableBlocks.push_back(DepBB); 1452 continue; 1453 } 1454 } 1455 1456 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1457 S->getOperand(0))); 1458 continue; 1459 } 1460 1461 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1462 // If the types mismatch and we can't handle it, reject reuse of the load. 1463 if (LD->getType() != LI->getType()) { 1464 if (TD == 0) 1465 TD = getAnalysisIfAvailable<TargetData>(); 1466 1467 // If the stored value is larger or equal to the loaded value, we can 1468 // reuse it. 1469 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1470 UnavailableBlocks.push_back(DepBB); 1471 continue; 1472 } 1473 } 1474 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1475 continue; 1476 } 1477 1478 UnavailableBlocks.push_back(DepBB); 1479 continue; 1480 } 1481 1482 // If we have no predecessors that produce a known value for this load, exit 1483 // early. 1484 if (ValuesPerBlock.empty()) return false; 1485 1486 // If all of the instructions we depend on produce a known value for this 1487 // load, then it is fully redundant and we can use PHI insertion to compute 1488 // its value. Insert PHIs and remove the fully redundant value now. 1489 if (UnavailableBlocks.empty()) { 1490 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1491 1492 // Perform PHI construction. 1493 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1494 VN.getAliasAnalysis()); 1495 LI->replaceAllUsesWith(V); 1496 1497 if (isa<PHINode>(V)) 1498 V->takeName(LI); 1499 if (isa<PointerType>(V->getType())) 1500 MD->invalidateCachedPointerInfo(V); 1501 toErase.push_back(LI); 1502 NumGVNLoad++; 1503 return true; 1504 } 1505 1506 if (!EnablePRE || !EnableLoadPRE) 1507 return false; 1508 1509 // Okay, we have *some* definitions of the value. This means that the value 1510 // is available in some of our (transitive) predecessors. Lets think about 1511 // doing PRE of this load. This will involve inserting a new load into the 1512 // predecessor when it's not available. We could do this in general, but 1513 // prefer to not increase code size. As such, we only do this when we know 1514 // that we only have to insert *one* load (which means we're basically moving 1515 // the load, not inserting a new one). 1516 1517 SmallPtrSet<BasicBlock *, 4> Blockers; 1518 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1519 Blockers.insert(UnavailableBlocks[i]); 1520 1521 // Lets find first basic block with more than one predecessor. Walk backwards 1522 // through predecessors if needed. 1523 BasicBlock *LoadBB = LI->getParent(); 1524 BasicBlock *TmpBB = LoadBB; 1525 1526 bool isSinglePred = false; 1527 bool allSingleSucc = true; 1528 while (TmpBB->getSinglePredecessor()) { 1529 isSinglePred = true; 1530 TmpBB = TmpBB->getSinglePredecessor(); 1531 if (!TmpBB) // If haven't found any, bail now. 1532 return false; 1533 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1534 return false; 1535 if (Blockers.count(TmpBB)) 1536 return false; 1537 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1538 allSingleSucc = false; 1539 } 1540 1541 assert(TmpBB); 1542 LoadBB = TmpBB; 1543 1544 // If we have a repl set with LI itself in it, this means we have a loop where 1545 // at least one of the values is LI. Since this means that we won't be able 1546 // to eliminate LI even if we insert uses in the other predecessors, we will 1547 // end up increasing code size. Reject this by scanning for LI. 1548 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1549 if (ValuesPerBlock[i].isSimpleValue() && 1550 ValuesPerBlock[i].getSimpleValue() == LI) 1551 return false; 1552 1553 // FIXME: It is extremely unclear what this loop is doing, other than 1554 // artificially restricting loadpre. 1555 if (isSinglePred) { 1556 bool isHot = false; 1557 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1558 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1559 if (AV.isSimpleValue()) 1560 // "Hot" Instruction is in some loop (because it dominates its dep. 1561 // instruction). 1562 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1563 if (DT->dominates(LI, I)) { 1564 isHot = true; 1565 break; 1566 } 1567 } 1568 1569 // We are interested only in "hot" instructions. We don't want to do any 1570 // mis-optimizations here. 1571 if (!isHot) 1572 return false; 1573 } 1574 1575 // Okay, we have some hope :). Check to see if the loaded value is fully 1576 // available in all but one predecessor. 1577 // FIXME: If we could restructure the CFG, we could make a common pred with 1578 // all the preds that don't have an available LI and insert a new load into 1579 // that one block. 1580 BasicBlock *UnavailablePred = 0; 1581 1582 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1583 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1584 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1585 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1586 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1587 1588 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1589 PI != E; ++PI) { 1590 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 1591 continue; 1592 1593 // If this load is not available in multiple predecessors, reject it. 1594 if (UnavailablePred && UnavailablePred != *PI) 1595 return false; 1596 UnavailablePred = *PI; 1597 } 1598 1599 assert(UnavailablePred != 0 && 1600 "Fully available value should be eliminated above!"); 1601 1602 // We don't currently handle critical edges :( 1603 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) { 1604 DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '" 1605 << UnavailablePred->getName() << "': " << *LI << '\n'); 1606 return false; 1607 } 1608 1609 // Do PHI translation to get its value in the predecessor if necessary. The 1610 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1611 // 1612 SmallVector<Instruction*, 8> NewInsts; 1613 1614 // If all preds have a single successor, then we know it is safe to insert the 1615 // load on the pred (?!?), so we can insert code to materialize the pointer if 1616 // it is not available. 1617 PHITransAddr Address(LI->getOperand(0), TD); 1618 Value *LoadPtr = 0; 1619 if (allSingleSucc) { 1620 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1621 *DT, NewInsts); 1622 } else { 1623 Address.PHITranslateValue(LoadBB, UnavailablePred); 1624 LoadPtr = Address.getAddr(); 1625 1626 // Make sure the value is live in the predecessor. 1627 if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr)) 1628 if (!DT->dominates(Inst->getParent(), UnavailablePred)) 1629 LoadPtr = 0; 1630 } 1631 1632 // If we couldn't find or insert a computation of this phi translated value, 1633 // we fail PRE. 1634 if (LoadPtr == 0) { 1635 assert(NewInsts.empty() && "Shouldn't insert insts on failure"); 1636 DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1637 << *LI->getOperand(0) << "\n"); 1638 return false; 1639 } 1640 1641 // Assign value numbers to these new instructions. 1642 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1643 // FIXME: We really _ought_ to insert these value numbers into their 1644 // parent's availability map. However, in doing so, we risk getting into 1645 // ordering issues. If a block hasn't been processed yet, we would be 1646 // marking a value as AVAIL-IN, which isn't what we intend. 1647 VN.lookup_or_add(NewInsts[i]); 1648 } 1649 1650 // Make sure it is valid to move this load here. We have to watch out for: 1651 // @1 = getelementptr (i8* p, ... 1652 // test p and branch if == 0 1653 // load @1 1654 // It is valid to have the getelementptr before the test, even if p can be 0, 1655 // as getelementptr only does address arithmetic. 1656 // If we are not pushing the value through any multiple-successor blocks 1657 // we do not have this case. Otherwise, check that the load is safe to 1658 // put anywhere; this can be improved, but should be conservatively safe. 1659 if (!allSingleSucc && 1660 // FIXME: REEVALUTE THIS. 1661 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) { 1662 assert(NewInsts.empty() && "Should not have inserted instructions"); 1663 return false; 1664 } 1665 1666 // Okay, we can eliminate this load by inserting a reload in the predecessor 1667 // and using PHI construction to get the value in the other predecessors, do 1668 // it. 1669 DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1670 DEBUG(if (!NewInsts.empty()) 1671 errs() << "INSERTED " << NewInsts.size() << " INSTS: " 1672 << *NewInsts.back() << '\n'); 1673 1674 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1675 LI->getAlignment(), 1676 UnavailablePred->getTerminator()); 1677 1678 // Add the newly created load. 1679 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad)); 1680 1681 // Perform PHI construction. 1682 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1683 VN.getAliasAnalysis()); 1684 LI->replaceAllUsesWith(V); 1685 if (isa<PHINode>(V)) 1686 V->takeName(LI); 1687 if (isa<PointerType>(V->getType())) 1688 MD->invalidateCachedPointerInfo(V); 1689 toErase.push_back(LI); 1690 NumPRELoad++; 1691 return true; 1692} 1693 1694/// processLoad - Attempt to eliminate a load, first by eliminating it 1695/// locally, and then attempting non-local elimination if that fails. 1696bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1697 if (!MD) 1698 return false; 1699 1700 if (L->isVolatile()) 1701 return false; 1702 1703 // ... to a pointer that has been loaded from before... 1704 MemDepResult Dep = MD->getDependency(L); 1705 1706 // If the value isn't available, don't do anything! 1707 if (Dep.isClobber()) { 1708 // Check to see if we have something like this: 1709 // store i32 123, i32* %P 1710 // %A = bitcast i32* %P to i8* 1711 // %B = gep i8* %A, i32 1 1712 // %C = load i8* %B 1713 // 1714 // We could do that by recognizing if the clobber instructions are obviously 1715 // a common base + constant offset, and if the previous store (or memset) 1716 // completely covers this load. This sort of thing can happen in bitfield 1717 // access code. 1718 Value *AvailVal = 0; 1719 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1720 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1721 int Offset = AnalyzeLoadFromClobberingStore(L->getType(), 1722 L->getPointerOperand(), 1723 DepSI, *TD); 1724 if (Offset != -1) 1725 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1726 L->getType(), L, *TD); 1727 } 1728 1729 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1730 // a value on from it. 1731 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1732 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1733 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), 1734 L->getPointerOperand(), 1735 DepMI, *TD); 1736 if (Offset != -1) 1737 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1738 } 1739 } 1740 1741 if (AvailVal) { 1742 DEBUG(errs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1743 << *AvailVal << '\n' << *L << "\n\n\n"); 1744 1745 // Replace the load! 1746 L->replaceAllUsesWith(AvailVal); 1747 if (isa<PointerType>(AvailVal->getType())) 1748 MD->invalidateCachedPointerInfo(AvailVal); 1749 toErase.push_back(L); 1750 NumGVNLoad++; 1751 return true; 1752 } 1753 1754 DEBUG( 1755 // fast print dep, using operator<< on instruction would be too slow 1756 errs() << "GVN: load "; 1757 WriteAsOperand(errs(), L); 1758 Instruction *I = Dep.getInst(); 1759 errs() << " is clobbered by " << *I << '\n'; 1760 ); 1761 return false; 1762 } 1763 1764 // If it is defined in another block, try harder. 1765 if (Dep.isNonLocal()) 1766 return processNonLocalLoad(L, toErase); 1767 1768 Instruction *DepInst = Dep.getInst(); 1769 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1770 Value *StoredVal = DepSI->getOperand(0); 1771 1772 // The store and load are to a must-aliased pointer, but they may not 1773 // actually have the same type. See if we know how to reuse the stored 1774 // value (depending on its type). 1775 const TargetData *TD = 0; 1776 if (StoredVal->getType() != L->getType()) { 1777 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1778 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1779 L, *TD); 1780 if (StoredVal == 0) 1781 return false; 1782 1783 DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1784 << '\n' << *L << "\n\n\n"); 1785 } 1786 else 1787 return false; 1788 } 1789 1790 // Remove it! 1791 L->replaceAllUsesWith(StoredVal); 1792 if (isa<PointerType>(StoredVal->getType())) 1793 MD->invalidateCachedPointerInfo(StoredVal); 1794 toErase.push_back(L); 1795 NumGVNLoad++; 1796 return true; 1797 } 1798 1799 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1800 Value *AvailableVal = DepLI; 1801 1802 // The loads are of a must-aliased pointer, but they may not actually have 1803 // the same type. See if we know how to reuse the previously loaded value 1804 // (depending on its type). 1805 const TargetData *TD = 0; 1806 if (DepLI->getType() != L->getType()) { 1807 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1808 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1809 if (AvailableVal == 0) 1810 return false; 1811 1812 DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1813 << "\n" << *L << "\n\n\n"); 1814 } 1815 else 1816 return false; 1817 } 1818 1819 // Remove it! 1820 L->replaceAllUsesWith(AvailableVal); 1821 if (isa<PointerType>(DepLI->getType())) 1822 MD->invalidateCachedPointerInfo(DepLI); 1823 toErase.push_back(L); 1824 NumGVNLoad++; 1825 return true; 1826 } 1827 1828 // If this load really doesn't depend on anything, then we must be loading an 1829 // undef value. This can happen when loading for a fresh allocation with no 1830 // intervening stores, for example. 1831 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1832 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1833 toErase.push_back(L); 1834 NumGVNLoad++; 1835 return true; 1836 } 1837 1838 // If this load occurs either right after a lifetime begin, 1839 // then the loaded value is undefined. 1840 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1841 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1842 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1843 toErase.push_back(L); 1844 NumGVNLoad++; 1845 return true; 1846 } 1847 } 1848 1849 return false; 1850} 1851 1852Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1853 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1854 if (I == localAvail.end()) 1855 return 0; 1856 1857 ValueNumberScope *Locals = I->second; 1858 while (Locals) { 1859 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1860 if (I != Locals->table.end()) 1861 return I->second; 1862 Locals = Locals->parent; 1863 } 1864 1865 return 0; 1866} 1867 1868 1869/// processInstruction - When calculating availability, handle an instruction 1870/// by inserting it into the appropriate sets 1871bool GVN::processInstruction(Instruction *I, 1872 SmallVectorImpl<Instruction*> &toErase) { 1873 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1874 bool Changed = processLoad(LI, toErase); 1875 1876 if (!Changed) { 1877 unsigned Num = VN.lookup_or_add(LI); 1878 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1879 } 1880 1881 return Changed; 1882 } 1883 1884 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1885 unsigned Num = VN.lookup_or_add(I); 1886 1887 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1888 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1889 1890 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1891 return false; 1892 1893 Value *BranchCond = BI->getCondition(); 1894 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1895 1896 BasicBlock *TrueSucc = BI->getSuccessor(0); 1897 BasicBlock *FalseSucc = BI->getSuccessor(1); 1898 1899 if (TrueSucc->getSinglePredecessor()) 1900 localAvail[TrueSucc]->table[CondVN] = 1901 ConstantInt::getTrue(TrueSucc->getContext()); 1902 if (FalseSucc->getSinglePredecessor()) 1903 localAvail[FalseSucc]->table[CondVN] = 1904 ConstantInt::getFalse(TrueSucc->getContext()); 1905 1906 return false; 1907 1908 // Allocations are always uniquely numbered, so we can save time and memory 1909 // by fast failing them. 1910 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1911 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1912 return false; 1913 } 1914 1915 // Collapse PHI nodes 1916 if (PHINode* p = dyn_cast<PHINode>(I)) { 1917 Value *constVal = CollapsePhi(p); 1918 1919 if (constVal) { 1920 p->replaceAllUsesWith(constVal); 1921 if (MD && isa<PointerType>(constVal->getType())) 1922 MD->invalidateCachedPointerInfo(constVal); 1923 VN.erase(p); 1924 1925 toErase.push_back(p); 1926 } else { 1927 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1928 } 1929 1930 // If the number we were assigned was a brand new VN, then we don't 1931 // need to do a lookup to see if the number already exists 1932 // somewhere in the domtree: it can't! 1933 } else if (Num == NextNum) { 1934 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1935 1936 // Perform fast-path value-number based elimination of values inherited from 1937 // dominators. 1938 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1939 // Remove it! 1940 VN.erase(I); 1941 I->replaceAllUsesWith(repl); 1942 if (MD && isa<PointerType>(repl->getType())) 1943 MD->invalidateCachedPointerInfo(repl); 1944 toErase.push_back(I); 1945 return true; 1946 1947 } else { 1948 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1949 } 1950 1951 return false; 1952} 1953 1954/// runOnFunction - This is the main transformation entry point for a function. 1955bool GVN::runOnFunction(Function& F) { 1956 if (!NoLoads) 1957 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1958 DT = &getAnalysis<DominatorTree>(); 1959 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1960 VN.setMemDep(MD); 1961 VN.setDomTree(DT); 1962 1963 bool Changed = false; 1964 bool ShouldContinue = true; 1965 1966 // Merge unconditional branches, allowing PRE to catch more 1967 // optimization opportunities. 1968 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 1969 BasicBlock *BB = FI; 1970 ++FI; 1971 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 1972 if (removedBlock) NumGVNBlocks++; 1973 1974 Changed |= removedBlock; 1975 } 1976 1977 unsigned Iteration = 0; 1978 1979 while (ShouldContinue) { 1980 DEBUG(errs() << "GVN iteration: " << Iteration << "\n"); 1981 ShouldContinue = iterateOnFunction(F); 1982 Changed |= ShouldContinue; 1983 ++Iteration; 1984 } 1985 1986 if (EnablePRE) { 1987 bool PREChanged = true; 1988 while (PREChanged) { 1989 PREChanged = performPRE(F); 1990 Changed |= PREChanged; 1991 } 1992 } 1993 // FIXME: Should perform GVN again after PRE does something. PRE can move 1994 // computations into blocks where they become fully redundant. Note that 1995 // we can't do this until PRE's critical edge splitting updates memdep. 1996 // Actually, when this happens, we should just fully integrate PRE into GVN. 1997 1998 cleanupGlobalSets(); 1999 2000 return Changed; 2001} 2002 2003 2004bool GVN::processBlock(BasicBlock *BB) { 2005 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 2006 // incrementing BI before processing an instruction). 2007 SmallVector<Instruction*, 8> toErase; 2008 bool ChangedFunction = false; 2009 2010 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2011 BI != BE;) { 2012 ChangedFunction |= processInstruction(BI, toErase); 2013 if (toErase.empty()) { 2014 ++BI; 2015 continue; 2016 } 2017 2018 // If we need some instructions deleted, do it now. 2019 NumGVNInstr += toErase.size(); 2020 2021 // Avoid iterator invalidation. 2022 bool AtStart = BI == BB->begin(); 2023 if (!AtStart) 2024 --BI; 2025 2026 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2027 E = toErase.end(); I != E; ++I) { 2028 DEBUG(errs() << "GVN removed: " << **I << '\n'); 2029 if (MD) MD->removeInstruction(*I); 2030 (*I)->eraseFromParent(); 2031 DEBUG(verifyRemoved(*I)); 2032 } 2033 toErase.clear(); 2034 2035 if (AtStart) 2036 BI = BB->begin(); 2037 else 2038 ++BI; 2039 } 2040 2041 return ChangedFunction; 2042} 2043 2044/// performPRE - Perform a purely local form of PRE that looks for diamond 2045/// control flow patterns and attempts to perform simple PRE at the join point. 2046bool GVN::performPRE(Function &F) { 2047 bool Changed = false; 2048 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 2049 DenseMap<BasicBlock*, Value*> predMap; 2050 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2051 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2052 BasicBlock *CurrentBlock = *DI; 2053 2054 // Nothing to PRE in the entry block. 2055 if (CurrentBlock == &F.getEntryBlock()) continue; 2056 2057 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2058 BE = CurrentBlock->end(); BI != BE; ) { 2059 Instruction *CurInst = BI++; 2060 2061 if (isa<AllocaInst>(CurInst) || 2062 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2063 CurInst->getType()->isVoidTy() || 2064 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2065 isa<DbgInfoIntrinsic>(CurInst)) 2066 continue; 2067 2068 uint32_t ValNo = VN.lookup(CurInst); 2069 2070 // Look for the predecessors for PRE opportunities. We're 2071 // only trying to solve the basic diamond case, where 2072 // a value is computed in the successor and one predecessor, 2073 // but not the other. We also explicitly disallow cases 2074 // where the successor is its own predecessor, because they're 2075 // more complicated to get right. 2076 unsigned NumWith = 0; 2077 unsigned NumWithout = 0; 2078 BasicBlock *PREPred = 0; 2079 predMap.clear(); 2080 2081 for (pred_iterator PI = pred_begin(CurrentBlock), 2082 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2083 // We're not interested in PRE where the block is its 2084 // own predecessor, on in blocks with predecessors 2085 // that are not reachable. 2086 if (*PI == CurrentBlock) { 2087 NumWithout = 2; 2088 break; 2089 } else if (!localAvail.count(*PI)) { 2090 NumWithout = 2; 2091 break; 2092 } 2093 2094 DenseMap<uint32_t, Value*>::iterator predV = 2095 localAvail[*PI]->table.find(ValNo); 2096 if (predV == localAvail[*PI]->table.end()) { 2097 PREPred = *PI; 2098 NumWithout++; 2099 } else if (predV->second == CurInst) { 2100 NumWithout = 2; 2101 } else { 2102 predMap[*PI] = predV->second; 2103 NumWith++; 2104 } 2105 } 2106 2107 // Don't do PRE when it might increase code size, i.e. when 2108 // we would need to insert instructions in more than one pred. 2109 if (NumWithout != 1 || NumWith == 0) 2110 continue; 2111 2112 // Don't do PRE across indirect branch. 2113 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2114 continue; 2115 2116 // We can't do PRE safely on a critical edge, so instead we schedule 2117 // the edge to be split and perform the PRE the next time we iterate 2118 // on the function. 2119 unsigned SuccNum = 0; 2120 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors(); 2121 i != e; ++i) 2122 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) { 2123 SuccNum = i; 2124 break; 2125 } 2126 2127 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2128 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2129 continue; 2130 } 2131 2132 // Instantiate the expression the in predecessor that lacked it. 2133 // Because we are going top-down through the block, all value numbers 2134 // will be available in the predecessor by the time we need them. Any 2135 // that weren't original present will have been instantiated earlier 2136 // in this loop. 2137 Instruction *PREInstr = CurInst->clone(); 2138 bool success = true; 2139 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2140 Value *Op = PREInstr->getOperand(i); 2141 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2142 continue; 2143 2144 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2145 PREInstr->setOperand(i, V); 2146 } else { 2147 success = false; 2148 break; 2149 } 2150 } 2151 2152 // Fail out if we encounter an operand that is not available in 2153 // the PRE predecessor. This is typically because of loads which 2154 // are not value numbered precisely. 2155 if (!success) { 2156 delete PREInstr; 2157 DEBUG(verifyRemoved(PREInstr)); 2158 continue; 2159 } 2160 2161 PREInstr->insertBefore(PREPred->getTerminator()); 2162 PREInstr->setName(CurInst->getName() + ".pre"); 2163 predMap[PREPred] = PREInstr; 2164 VN.add(PREInstr, ValNo); 2165 NumGVNPRE++; 2166 2167 // Update the availability map to include the new instruction. 2168 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2169 2170 // Create a PHI to make the value available in this block. 2171 PHINode* Phi = PHINode::Create(CurInst->getType(), 2172 CurInst->getName() + ".pre-phi", 2173 CurrentBlock->begin()); 2174 for (pred_iterator PI = pred_begin(CurrentBlock), 2175 PE = pred_end(CurrentBlock); PI != PE; ++PI) 2176 Phi->addIncoming(predMap[*PI], *PI); 2177 2178 VN.add(Phi, ValNo); 2179 localAvail[CurrentBlock]->table[ValNo] = Phi; 2180 2181 CurInst->replaceAllUsesWith(Phi); 2182 if (MD && isa<PointerType>(Phi->getType())) 2183 MD->invalidateCachedPointerInfo(Phi); 2184 VN.erase(CurInst); 2185 2186 DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n'); 2187 if (MD) MD->removeInstruction(CurInst); 2188 CurInst->eraseFromParent(); 2189 DEBUG(verifyRemoved(CurInst)); 2190 Changed = true; 2191 } 2192 } 2193 2194 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator 2195 I = toSplit.begin(), E = toSplit.end(); I != E; ++I) 2196 SplitCriticalEdge(I->first, I->second, this); 2197 2198 return Changed || toSplit.size(); 2199} 2200 2201/// iterateOnFunction - Executes one iteration of GVN 2202bool GVN::iterateOnFunction(Function &F) { 2203 cleanupGlobalSets(); 2204 2205 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2206 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2207 if (DI->getIDom()) 2208 localAvail[DI->getBlock()] = 2209 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2210 else 2211 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2212 } 2213 2214 // Top-down walk of the dominator tree 2215 bool Changed = false; 2216#if 0 2217 // Needed for value numbering with phi construction to work. 2218 ReversePostOrderTraversal<Function*> RPOT(&F); 2219 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2220 RE = RPOT.end(); RI != RE; ++RI) 2221 Changed |= processBlock(*RI); 2222#else 2223 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2224 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2225 Changed |= processBlock(DI->getBlock()); 2226#endif 2227 2228 return Changed; 2229} 2230 2231void GVN::cleanupGlobalSets() { 2232 VN.clear(); 2233 2234 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2235 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2236 delete I->second; 2237 localAvail.clear(); 2238} 2239 2240/// verifyRemoved - Verify that the specified instruction does not occur in our 2241/// internal data structures. 2242void GVN::verifyRemoved(const Instruction *Inst) const { 2243 VN.verifyRemoved(Inst); 2244 2245 // Walk through the value number scope to make sure the instruction isn't 2246 // ferreted away in it. 2247 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2248 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2249 const ValueNumberScope *VNS = I->second; 2250 2251 while (VNS) { 2252 for (DenseMap<uint32_t, Value*>::const_iterator 2253 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2254 assert(II->second != Inst && "Inst still in value numbering scope!"); 2255 } 2256 2257 VNS = VNS->parent; 2258 } 2259 } 2260} 2261