GVN.cpp revision 6f7b210b2577fbc9247a9fc5223655390008ae89
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/IntrinsicInst.h" 25#include "llvm/LLVMContext.h" 26#include "llvm/Operator.h" 27#include "llvm/Value.h" 28#include "llvm/ADT/DenseMap.h" 29#include "llvm/ADT/DepthFirstIterator.h" 30#include "llvm/ADT/PostOrderIterator.h" 31#include "llvm/ADT/SmallPtrSet.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/Analysis/Dominators.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/MemoryBuiltins.h" 37#include "llvm/Analysis/MemoryDependenceAnalysis.h" 38#include "llvm/Support/CFG.h" 39#include "llvm/Support/CommandLine.h" 40#include "llvm/Support/Debug.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Support/raw_ostream.h" 44#include "llvm/Target/TargetData.h" 45#include "llvm/Transforms/Utils/BasicBlockUtils.h" 46#include "llvm/Transforms/Utils/Local.h" 47#include "llvm/Transforms/Utils/SSAUpdater.h" 48#include <cstdio> 49using namespace llvm; 50 51STATISTIC(NumGVNInstr, "Number of instructions deleted"); 52STATISTIC(NumGVNLoad, "Number of loads deleted"); 53STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 54STATISTIC(NumGVNBlocks, "Number of blocks merged"); 55STATISTIC(NumPRELoad, "Number of loads PRE'd"); 56 57static cl::opt<bool> EnablePRE("enable-pre", 58 cl::init(true), cl::Hidden); 59static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 60 61//===----------------------------------------------------------------------===// 62// ValueTable Class 63//===----------------------------------------------------------------------===// 64 65/// This class holds the mapping between values and value numbers. It is used 66/// as an efficient mechanism to determine the expression-wise equivalence of 67/// two values. 68namespace { 69 struct Expression { 70 enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL, 71 UDIV, SDIV, FDIV, UREM, SREM, 72 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ, 73 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 74 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 75 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 76 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 77 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 78 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI, 79 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT, 80 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT, 81 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 82 83 ExpressionOpcode opcode; 84 const Type* type; 85 SmallVector<uint32_t, 4> varargs; 86 Value *function; 87 88 Expression() { } 89 Expression(ExpressionOpcode o) : opcode(o) { } 90 91 bool operator==(const Expression &other) const { 92 if (opcode != other.opcode) 93 return false; 94 else if (opcode == EMPTY || opcode == TOMBSTONE) 95 return true; 96 else if (type != other.type) 97 return false; 98 else if (function != other.function) 99 return false; 100 else { 101 if (varargs.size() != other.varargs.size()) 102 return false; 103 104 for (size_t i = 0; i < varargs.size(); ++i) 105 if (varargs[i] != other.varargs[i]) 106 return false; 107 108 return true; 109 } 110 } 111 112 bool operator!=(const Expression &other) const { 113 return !(*this == other); 114 } 115 }; 116 117 class ValueTable { 118 private: 119 DenseMap<Value*, uint32_t> valueNumbering; 120 DenseMap<Expression, uint32_t> expressionNumbering; 121 AliasAnalysis* AA; 122 MemoryDependenceAnalysis* MD; 123 DominatorTree* DT; 124 125 uint32_t nextValueNumber; 126 127 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO); 128 Expression::ExpressionOpcode getOpcode(CmpInst* C); 129 Expression::ExpressionOpcode getOpcode(CastInst* C); 130 Expression create_expression(BinaryOperator* BO); 131 Expression create_expression(CmpInst* C); 132 Expression create_expression(ShuffleVectorInst* V); 133 Expression create_expression(ExtractElementInst* C); 134 Expression create_expression(InsertElementInst* V); 135 Expression create_expression(SelectInst* V); 136 Expression create_expression(CastInst* C); 137 Expression create_expression(GetElementPtrInst* G); 138 Expression create_expression(CallInst* C); 139 Expression create_expression(Constant* C); 140 Expression create_expression(ExtractValueInst* C); 141 Expression create_expression(InsertValueInst* C); 142 143 uint32_t lookup_or_add_call(CallInst* C); 144 public: 145 ValueTable() : nextValueNumber(1) { } 146 uint32_t lookup_or_add(Value *V); 147 uint32_t lookup(Value *V) const; 148 void add(Value *V, uint32_t num); 149 void clear(); 150 void erase(Value *v); 151 unsigned size(); 152 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 153 AliasAnalysis *getAliasAnalysis() const { return AA; } 154 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 155 void setDomTree(DominatorTree* D) { DT = D; } 156 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 157 void verifyRemoved(const Value *) const; 158 }; 159} 160 161namespace llvm { 162template <> struct DenseMapInfo<Expression> { 163 static inline Expression getEmptyKey() { 164 return Expression(Expression::EMPTY); 165 } 166 167 static inline Expression getTombstoneKey() { 168 return Expression(Expression::TOMBSTONE); 169 } 170 171 static unsigned getHashValue(const Expression e) { 172 unsigned hash = e.opcode; 173 174 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 175 (unsigned)((uintptr_t)e.type >> 9)); 176 177 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 178 E = e.varargs.end(); I != E; ++I) 179 hash = *I + hash * 37; 180 181 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 182 (unsigned)((uintptr_t)e.function >> 9)) + 183 hash * 37; 184 185 return hash; 186 } 187 static bool isEqual(const Expression &LHS, const Expression &RHS) { 188 return LHS == RHS; 189 } 190 static bool isPod() { return true; } 191}; 192} 193 194//===----------------------------------------------------------------------===// 195// ValueTable Internal Functions 196//===----------------------------------------------------------------------===// 197Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) { 198 switch(BO->getOpcode()) { 199 default: // THIS SHOULD NEVER HAPPEN 200 llvm_unreachable("Binary operator with unknown opcode?"); 201 case Instruction::Add: return Expression::ADD; 202 case Instruction::FAdd: return Expression::FADD; 203 case Instruction::Sub: return Expression::SUB; 204 case Instruction::FSub: return Expression::FSUB; 205 case Instruction::Mul: return Expression::MUL; 206 case Instruction::FMul: return Expression::FMUL; 207 case Instruction::UDiv: return Expression::UDIV; 208 case Instruction::SDiv: return Expression::SDIV; 209 case Instruction::FDiv: return Expression::FDIV; 210 case Instruction::URem: return Expression::UREM; 211 case Instruction::SRem: return Expression::SREM; 212 case Instruction::FRem: return Expression::FREM; 213 case Instruction::Shl: return Expression::SHL; 214 case Instruction::LShr: return Expression::LSHR; 215 case Instruction::AShr: return Expression::ASHR; 216 case Instruction::And: return Expression::AND; 217 case Instruction::Or: return Expression::OR; 218 case Instruction::Xor: return Expression::XOR; 219 } 220} 221 222Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 223 if (isa<ICmpInst>(C)) { 224 switch (C->getPredicate()) { 225 default: // THIS SHOULD NEVER HAPPEN 226 llvm_unreachable("Comparison with unknown predicate?"); 227 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 228 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 229 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 230 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 231 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 232 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 233 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 234 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 235 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 236 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 237 } 238 } else { 239 switch (C->getPredicate()) { 240 default: // THIS SHOULD NEVER HAPPEN 241 llvm_unreachable("Comparison with unknown predicate?"); 242 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 243 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 244 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 245 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 246 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 247 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 248 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 249 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 250 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 251 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 252 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 253 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 254 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 255 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 256 } 257 } 258} 259 260Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) { 261 switch(C->getOpcode()) { 262 default: // THIS SHOULD NEVER HAPPEN 263 llvm_unreachable("Cast operator with unknown opcode?"); 264 case Instruction::Trunc: return Expression::TRUNC; 265 case Instruction::ZExt: return Expression::ZEXT; 266 case Instruction::SExt: return Expression::SEXT; 267 case Instruction::FPToUI: return Expression::FPTOUI; 268 case Instruction::FPToSI: return Expression::FPTOSI; 269 case Instruction::UIToFP: return Expression::UITOFP; 270 case Instruction::SIToFP: return Expression::SITOFP; 271 case Instruction::FPTrunc: return Expression::FPTRUNC; 272 case Instruction::FPExt: return Expression::FPEXT; 273 case Instruction::PtrToInt: return Expression::PTRTOINT; 274 case Instruction::IntToPtr: return Expression::INTTOPTR; 275 case Instruction::BitCast: return Expression::BITCAST; 276 } 277} 278 279Expression ValueTable::create_expression(CallInst* C) { 280 Expression e; 281 282 e.type = C->getType(); 283 e.function = C->getCalledFunction(); 284 e.opcode = Expression::CALL; 285 286 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); 287 I != E; ++I) 288 e.varargs.push_back(lookup_or_add(*I)); 289 290 return e; 291} 292 293Expression ValueTable::create_expression(BinaryOperator* BO) { 294 Expression e; 295 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 296 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 297 e.function = 0; 298 e.type = BO->getType(); 299 e.opcode = getOpcode(BO); 300 301 return e; 302} 303 304Expression ValueTable::create_expression(CmpInst* C) { 305 Expression e; 306 307 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 308 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 309 e.function = 0; 310 e.type = C->getType(); 311 e.opcode = getOpcode(C); 312 313 return e; 314} 315 316Expression ValueTable::create_expression(CastInst* C) { 317 Expression e; 318 319 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 320 e.function = 0; 321 e.type = C->getType(); 322 e.opcode = getOpcode(C); 323 324 return e; 325} 326 327Expression ValueTable::create_expression(ShuffleVectorInst* S) { 328 Expression e; 329 330 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 331 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 332 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 333 e.function = 0; 334 e.type = S->getType(); 335 e.opcode = Expression::SHUFFLE; 336 337 return e; 338} 339 340Expression ValueTable::create_expression(ExtractElementInst* E) { 341 Expression e; 342 343 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 344 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 345 e.function = 0; 346 e.type = E->getType(); 347 e.opcode = Expression::EXTRACT; 348 349 return e; 350} 351 352Expression ValueTable::create_expression(InsertElementInst* I) { 353 Expression e; 354 355 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 356 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 357 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 358 e.function = 0; 359 e.type = I->getType(); 360 e.opcode = Expression::INSERT; 361 362 return e; 363} 364 365Expression ValueTable::create_expression(SelectInst* I) { 366 Expression e; 367 368 e.varargs.push_back(lookup_or_add(I->getCondition())); 369 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 370 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 371 e.function = 0; 372 e.type = I->getType(); 373 e.opcode = Expression::SELECT; 374 375 return e; 376} 377 378Expression ValueTable::create_expression(GetElementPtrInst* G) { 379 Expression e; 380 381 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 382 e.function = 0; 383 e.type = G->getType(); 384 e.opcode = Expression::GEP; 385 386 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 387 I != E; ++I) 388 e.varargs.push_back(lookup_or_add(*I)); 389 390 return e; 391} 392 393Expression ValueTable::create_expression(ExtractValueInst* E) { 394 Expression e; 395 396 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 397 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 398 II != IE; ++II) 399 e.varargs.push_back(*II); 400 e.function = 0; 401 e.type = E->getType(); 402 e.opcode = Expression::EXTRACTVALUE; 403 404 return e; 405} 406 407Expression ValueTable::create_expression(InsertValueInst* E) { 408 Expression e; 409 410 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 411 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 412 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 413 II != IE; ++II) 414 e.varargs.push_back(*II); 415 e.function = 0; 416 e.type = E->getType(); 417 e.opcode = Expression::INSERTVALUE; 418 419 return e; 420} 421 422//===----------------------------------------------------------------------===// 423// ValueTable External Functions 424//===----------------------------------------------------------------------===// 425 426/// add - Insert a value into the table with a specified value number. 427void ValueTable::add(Value *V, uint32_t num) { 428 valueNumbering.insert(std::make_pair(V, num)); 429} 430 431uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 432 if (AA->doesNotAccessMemory(C)) { 433 Expression exp = create_expression(C); 434 uint32_t& e = expressionNumbering[exp]; 435 if (!e) e = nextValueNumber++; 436 valueNumbering[C] = e; 437 return e; 438 } else if (AA->onlyReadsMemory(C)) { 439 Expression exp = create_expression(C); 440 uint32_t& e = expressionNumbering[exp]; 441 if (!e) { 442 e = nextValueNumber++; 443 valueNumbering[C] = e; 444 return e; 445 } 446 if (!MD) { 447 e = nextValueNumber++; 448 valueNumbering[C] = e; 449 return e; 450 } 451 452 MemDepResult local_dep = MD->getDependency(C); 453 454 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 455 valueNumbering[C] = nextValueNumber; 456 return nextValueNumber++; 457 } 458 459 if (local_dep.isDef()) { 460 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 461 462 if (local_cdep->getNumOperands() != C->getNumOperands()) { 463 valueNumbering[C] = nextValueNumber; 464 return nextValueNumber++; 465 } 466 467 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 468 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 469 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i)); 470 if (c_vn != cd_vn) { 471 valueNumbering[C] = nextValueNumber; 472 return nextValueNumber++; 473 } 474 } 475 476 uint32_t v = lookup_or_add(local_cdep); 477 valueNumbering[C] = v; 478 return v; 479 } 480 481 // Non-local case. 482 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 483 MD->getNonLocalCallDependency(CallSite(C)); 484 // FIXME: call/call dependencies for readonly calls should return def, not 485 // clobber! Move the checking logic to MemDep! 486 CallInst* cdep = 0; 487 488 // Check to see if we have a single dominating call instruction that is 489 // identical to C. 490 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 491 const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i]; 492 // Ignore non-local dependencies. 493 if (I->second.isNonLocal()) 494 continue; 495 496 // We don't handle non-depedencies. If we already have a call, reject 497 // instruction dependencies. 498 if (I->second.isClobber() || cdep != 0) { 499 cdep = 0; 500 break; 501 } 502 503 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst()); 504 // FIXME: All duplicated with non-local case. 505 if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){ 506 cdep = NonLocalDepCall; 507 continue; 508 } 509 510 cdep = 0; 511 break; 512 } 513 514 if (!cdep) { 515 valueNumbering[C] = nextValueNumber; 516 return nextValueNumber++; 517 } 518 519 if (cdep->getNumOperands() != C->getNumOperands()) { 520 valueNumbering[C] = nextValueNumber; 521 return nextValueNumber++; 522 } 523 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 524 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 525 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i)); 526 if (c_vn != cd_vn) { 527 valueNumbering[C] = nextValueNumber; 528 return nextValueNumber++; 529 } 530 } 531 532 uint32_t v = lookup_or_add(cdep); 533 valueNumbering[C] = v; 534 return v; 535 536 } else { 537 valueNumbering[C] = nextValueNumber; 538 return nextValueNumber++; 539 } 540} 541 542/// lookup_or_add - Returns the value number for the specified value, assigning 543/// it a new number if it did not have one before. 544uint32_t ValueTable::lookup_or_add(Value *V) { 545 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 546 if (VI != valueNumbering.end()) 547 return VI->second; 548 549 if (!isa<Instruction>(V)) { 550 valueNumbering[V] = nextValueNumber; 551 return nextValueNumber++; 552 } 553 554 Instruction* I = cast<Instruction>(V); 555 Expression exp; 556 switch (I->getOpcode()) { 557 case Instruction::Call: 558 return lookup_or_add_call(cast<CallInst>(I)); 559 case Instruction::Add: 560 case Instruction::FAdd: 561 case Instruction::Sub: 562 case Instruction::FSub: 563 case Instruction::Mul: 564 case Instruction::FMul: 565 case Instruction::UDiv: 566 case Instruction::SDiv: 567 case Instruction::FDiv: 568 case Instruction::URem: 569 case Instruction::SRem: 570 case Instruction::FRem: 571 case Instruction::Shl: 572 case Instruction::LShr: 573 case Instruction::AShr: 574 case Instruction::And: 575 case Instruction::Or : 576 case Instruction::Xor: 577 exp = create_expression(cast<BinaryOperator>(I)); 578 break; 579 case Instruction::ICmp: 580 case Instruction::FCmp: 581 exp = create_expression(cast<CmpInst>(I)); 582 break; 583 case Instruction::Trunc: 584 case Instruction::ZExt: 585 case Instruction::SExt: 586 case Instruction::FPToUI: 587 case Instruction::FPToSI: 588 case Instruction::UIToFP: 589 case Instruction::SIToFP: 590 case Instruction::FPTrunc: 591 case Instruction::FPExt: 592 case Instruction::PtrToInt: 593 case Instruction::IntToPtr: 594 case Instruction::BitCast: 595 exp = create_expression(cast<CastInst>(I)); 596 break; 597 case Instruction::Select: 598 exp = create_expression(cast<SelectInst>(I)); 599 break; 600 case Instruction::ExtractElement: 601 exp = create_expression(cast<ExtractElementInst>(I)); 602 break; 603 case Instruction::InsertElement: 604 exp = create_expression(cast<InsertElementInst>(I)); 605 break; 606 case Instruction::ShuffleVector: 607 exp = create_expression(cast<ShuffleVectorInst>(I)); 608 break; 609 case Instruction::ExtractValue: 610 exp = create_expression(cast<ExtractValueInst>(I)); 611 break; 612 case Instruction::InsertValue: 613 exp = create_expression(cast<InsertValueInst>(I)); 614 break; 615 case Instruction::GetElementPtr: 616 exp = create_expression(cast<GetElementPtrInst>(I)); 617 break; 618 default: 619 valueNumbering[V] = nextValueNumber; 620 return nextValueNumber++; 621 } 622 623 uint32_t& e = expressionNumbering[exp]; 624 if (!e) e = nextValueNumber++; 625 valueNumbering[V] = e; 626 return e; 627} 628 629/// lookup - Returns the value number of the specified value. Fails if 630/// the value has not yet been numbered. 631uint32_t ValueTable::lookup(Value *V) const { 632 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 633 assert(VI != valueNumbering.end() && "Value not numbered?"); 634 return VI->second; 635} 636 637/// clear - Remove all entries from the ValueTable 638void ValueTable::clear() { 639 valueNumbering.clear(); 640 expressionNumbering.clear(); 641 nextValueNumber = 1; 642} 643 644/// erase - Remove a value from the value numbering 645void ValueTable::erase(Value *V) { 646 valueNumbering.erase(V); 647} 648 649/// verifyRemoved - Verify that the value is removed from all internal data 650/// structures. 651void ValueTable::verifyRemoved(const Value *V) const { 652 for (DenseMap<Value*, uint32_t>::const_iterator 653 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 654 assert(I->first != V && "Inst still occurs in value numbering map!"); 655 } 656} 657 658//===----------------------------------------------------------------------===// 659// GVN Pass 660//===----------------------------------------------------------------------===// 661 662namespace { 663 struct ValueNumberScope { 664 ValueNumberScope* parent; 665 DenseMap<uint32_t, Value*> table; 666 667 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 668 }; 669} 670 671namespace { 672 673 class GVN : public FunctionPass { 674 bool runOnFunction(Function &F); 675 public: 676 static char ID; // Pass identification, replacement for typeid 677 explicit GVN(bool nopre = false, bool noloads = false) 678 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { } 679 680 private: 681 bool NoPRE; 682 bool NoLoads; 683 MemoryDependenceAnalysis *MD; 684 DominatorTree *DT; 685 686 ValueTable VN; 687 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 688 689 // This transformation requires dominator postdominator info 690 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 691 AU.addRequired<DominatorTree>(); 692 if (!NoLoads) 693 AU.addRequired<MemoryDependenceAnalysis>(); 694 AU.addRequired<AliasAnalysis>(); 695 696 AU.addPreserved<DominatorTree>(); 697 AU.addPreserved<AliasAnalysis>(); 698 } 699 700 // Helper fuctions 701 // FIXME: eliminate or document these better 702 bool processLoad(LoadInst* L, 703 SmallVectorImpl<Instruction*> &toErase); 704 bool processInstruction(Instruction *I, 705 SmallVectorImpl<Instruction*> &toErase); 706 bool processNonLocalLoad(LoadInst* L, 707 SmallVectorImpl<Instruction*> &toErase); 708 bool processBlock(BasicBlock *BB); 709 void dump(DenseMap<uint32_t, Value*>& d); 710 bool iterateOnFunction(Function &F); 711 Value *CollapsePhi(PHINode* p); 712 bool performPRE(Function& F); 713 Value *lookupNumber(BasicBlock *BB, uint32_t num); 714 void cleanupGlobalSets(); 715 void verifyRemoved(const Instruction *I) const; 716 }; 717 718 char GVN::ID = 0; 719} 720 721// createGVNPass - The public interface to this file... 722FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) { 723 return new GVN(NoPRE, NoLoads); 724} 725 726static RegisterPass<GVN> X("gvn", 727 "Global Value Numbering"); 728 729void GVN::dump(DenseMap<uint32_t, Value*>& d) { 730 printf("{\n"); 731 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 732 E = d.end(); I != E; ++I) { 733 printf("%d\n", I->first); 734 I->second->dump(); 735 } 736 printf("}\n"); 737} 738 739static bool isSafeReplacement(PHINode* p, Instruction *inst) { 740 if (!isa<PHINode>(inst)) 741 return true; 742 743 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 744 UI != E; ++UI) 745 if (PHINode* use_phi = dyn_cast<PHINode>(UI)) 746 if (use_phi->getParent() == inst->getParent()) 747 return false; 748 749 return true; 750} 751 752Value *GVN::CollapsePhi(PHINode *PN) { 753 Value *ConstVal = PN->hasConstantValue(DT); 754 if (!ConstVal) return 0; 755 756 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 757 if (!Inst) 758 return ConstVal; 759 760 if (DT->dominates(Inst, PN)) 761 if (isSafeReplacement(PN, Inst)) 762 return Inst; 763 return 0; 764} 765 766/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 767/// we're analyzing is fully available in the specified block. As we go, keep 768/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 769/// map is actually a tri-state map with the following values: 770/// 0) we know the block *is not* fully available. 771/// 1) we know the block *is* fully available. 772/// 2) we do not know whether the block is fully available or not, but we are 773/// currently speculating that it will be. 774/// 3) we are speculating for this block and have used that to speculate for 775/// other blocks. 776static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 777 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 778 // Optimistically assume that the block is fully available and check to see 779 // if we already know about this block in one lookup. 780 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 781 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 782 783 // If the entry already existed for this block, return the precomputed value. 784 if (!IV.second) { 785 // If this is a speculative "available" value, mark it as being used for 786 // speculation of other blocks. 787 if (IV.first->second == 2) 788 IV.first->second = 3; 789 return IV.first->second != 0; 790 } 791 792 // Otherwise, see if it is fully available in all predecessors. 793 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 794 795 // If this block has no predecessors, it isn't live-in here. 796 if (PI == PE) 797 goto SpeculationFailure; 798 799 for (; PI != PE; ++PI) 800 // If the value isn't fully available in one of our predecessors, then it 801 // isn't fully available in this block either. Undo our previous 802 // optimistic assumption and bail out. 803 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 804 goto SpeculationFailure; 805 806 return true; 807 808// SpeculationFailure - If we get here, we found out that this is not, after 809// all, a fully-available block. We have a problem if we speculated on this and 810// used the speculation to mark other blocks as available. 811SpeculationFailure: 812 char &BBVal = FullyAvailableBlocks[BB]; 813 814 // If we didn't speculate on this, just return with it set to false. 815 if (BBVal == 2) { 816 BBVal = 0; 817 return false; 818 } 819 820 // If we did speculate on this value, we could have blocks set to 1 that are 821 // incorrect. Walk the (transitive) successors of this block and mark them as 822 // 0 if set to one. 823 SmallVector<BasicBlock*, 32> BBWorklist; 824 BBWorklist.push_back(BB); 825 826 while (!BBWorklist.empty()) { 827 BasicBlock *Entry = BBWorklist.pop_back_val(); 828 // Note that this sets blocks to 0 (unavailable) if they happen to not 829 // already be in FullyAvailableBlocks. This is safe. 830 char &EntryVal = FullyAvailableBlocks[Entry]; 831 if (EntryVal == 0) continue; // Already unavailable. 832 833 // Mark as unavailable. 834 EntryVal = 0; 835 836 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 837 BBWorklist.push_back(*I); 838 } 839 840 return false; 841} 842 843 844/// CanCoerceMustAliasedValueToLoad - Return true if 845/// CoerceAvailableValueToLoadType will succeed. 846static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 847 const Type *LoadTy, 848 const TargetData &TD) { 849 // If the loaded or stored value is an first class array or struct, don't try 850 // to transform them. We need to be able to bitcast to integer. 851 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) || 852 isa<StructType>(StoredVal->getType()) || 853 isa<ArrayType>(StoredVal->getType())) 854 return false; 855 856 // The store has to be at least as big as the load. 857 if (TD.getTypeSizeInBits(StoredVal->getType()) < 858 TD.getTypeSizeInBits(LoadTy)) 859 return false; 860 861 return true; 862} 863 864 865/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 866/// then a load from a must-aliased pointer of a different type, try to coerce 867/// the stored value. LoadedTy is the type of the load we want to replace and 868/// InsertPt is the place to insert new instructions. 869/// 870/// If we can't do it, return null. 871static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 872 const Type *LoadedTy, 873 Instruction *InsertPt, 874 const TargetData &TD) { 875 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 876 return 0; 877 878 const Type *StoredValTy = StoredVal->getType(); 879 880 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); 881 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 882 883 // If the store and reload are the same size, we can always reuse it. 884 if (StoreSize == LoadSize) { 885 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) { 886 // Pointer to Pointer -> use bitcast. 887 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 888 } 889 890 // Convert source pointers to integers, which can be bitcast. 891 if (isa<PointerType>(StoredValTy)) { 892 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 893 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 894 } 895 896 const Type *TypeToCastTo = LoadedTy; 897 if (isa<PointerType>(TypeToCastTo)) 898 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 899 900 if (StoredValTy != TypeToCastTo) 901 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 902 903 // Cast to pointer if the load needs a pointer type. 904 if (isa<PointerType>(LoadedTy)) 905 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 906 907 return StoredVal; 908 } 909 910 // If the loaded value is smaller than the available value, then we can 911 // extract out a piece from it. If the available value is too small, then we 912 // can't do anything. 913 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 914 915 // Convert source pointers to integers, which can be manipulated. 916 if (isa<PointerType>(StoredValTy)) { 917 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 918 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 919 } 920 921 // Convert vectors and fp to integer, which can be manipulated. 922 if (!isa<IntegerType>(StoredValTy)) { 923 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 924 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 925 } 926 927 // If this is a big-endian system, we need to shift the value down to the low 928 // bits so that a truncate will work. 929 if (TD.isBigEndian()) { 930 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 931 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 932 } 933 934 // Truncate the integer to the right size now. 935 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 936 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 937 938 if (LoadedTy == NewIntTy) 939 return StoredVal; 940 941 // If the result is a pointer, inttoptr. 942 if (isa<PointerType>(LoadedTy)) 943 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 944 945 // Otherwise, bitcast. 946 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 947} 948 949/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 950/// be expressed as a base pointer plus a constant offset. Return the base and 951/// offset to the caller. 952static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 953 const TargetData &TD) { 954 Operator *PtrOp = dyn_cast<Operator>(Ptr); 955 if (PtrOp == 0) return Ptr; 956 957 // Just look through bitcasts. 958 if (PtrOp->getOpcode() == Instruction::BitCast) 959 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 960 961 // If this is a GEP with constant indices, we can look through it. 962 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 963 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 964 965 gep_type_iterator GTI = gep_type_begin(GEP); 966 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 967 ++I, ++GTI) { 968 ConstantInt *OpC = cast<ConstantInt>(*I); 969 if (OpC->isZero()) continue; 970 971 // Handle a struct and array indices which add their offset to the pointer. 972 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 973 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 974 } else { 975 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 976 Offset += OpC->getSExtValue()*Size; 977 } 978 } 979 980 // Re-sign extend from the pointer size if needed to get overflow edge cases 981 // right. 982 unsigned PtrSize = TD.getPointerSizeInBits(); 983 if (PtrSize < 64) 984 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 985 986 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 987} 988 989 990/// AnalyzeLoadFromClobberingStore - This function is called when we have a 991/// memdep query of a load that ends up being a clobbering store. This means 992/// that the store *may* provide bits used by the load but we can't be sure 993/// because the pointers don't mustalias. Check this case to see if there is 994/// anything more we can do before we give up. This returns -1 if we have to 995/// give up, or a byte number in the stored value of the piece that feeds the 996/// load. 997static int AnalyzeLoadFromClobberingStore(LoadInst *L, StoreInst *DepSI, 998 const TargetData &TD) { 999 // If the loaded or stored value is an first class array or struct, don't try 1000 // to transform them. We need to be able to bitcast to integer. 1001 if (isa<StructType>(L->getType()) || isa<ArrayType>(L->getType()) || 1002 isa<StructType>(DepSI->getOperand(0)->getType()) || 1003 isa<ArrayType>(DepSI->getOperand(0)->getType())) 1004 return -1; 1005 1006 int64_t StoreOffset = 0, LoadOffset = 0; 1007 Value *StoreBase = 1008 GetBaseWithConstantOffset(DepSI->getPointerOperand(), StoreOffset, TD); 1009 Value *LoadBase = 1010 GetBaseWithConstantOffset(L->getPointerOperand(), LoadOffset, TD); 1011 if (StoreBase != LoadBase) 1012 return -1; 1013 1014 // If the load and store are to the exact same address, they should have been 1015 // a must alias. AA must have gotten confused. 1016 // FIXME: Study to see if/when this happens. 1017 if (LoadOffset == StoreOffset) { 1018#if 0 1019 errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1020 << "Base = " << *StoreBase << "\n" 1021 << "Store Ptr = " << *DepSI->getPointerOperand() << "\n" 1022 << "Store Offs = " << StoreOffset << " - " << *DepSI << "\n" 1023 << "Load Ptr = " << *L->getPointerOperand() << "\n" 1024 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1025 errs() << "'" << L->getParent()->getParent()->getName() << "'" 1026 << *L->getParent(); 1027#endif 1028 return -1; 1029 } 1030 1031 // If the load and store don't overlap at all, the store doesn't provide 1032 // anything to the load. In this case, they really don't alias at all, AA 1033 // must have gotten confused. 1034 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1035 // remove this check, as it is duplicated with what we have below. 1036 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1037 uint64_t LoadSize = TD.getTypeSizeInBits(L->getType()); 1038 1039 if ((StoreSize & 7) | (LoadSize & 7)) 1040 return -1; 1041 StoreSize >>= 3; // Convert to bytes. 1042 LoadSize >>= 3; 1043 1044 1045 bool isAAFailure = false; 1046 if (StoreOffset < LoadOffset) { 1047 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1048 } else { 1049 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1050 } 1051 if (isAAFailure) { 1052#if 0 1053 errs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1054 << "Base = " << *StoreBase << "\n" 1055 << "Store Ptr = " << *DepSI->getPointerOperand() << "\n" 1056 << "Store Offs = " << StoreOffset << " - " << *DepSI << "\n" 1057 << "Load Ptr = " << *L->getPointerOperand() << "\n" 1058 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1059 errs() << "'" << L->getParent()->getParent()->getName() << "'" 1060 << *L->getParent(); 1061#endif 1062 return -1; 1063 } 1064 1065 // If the Load isn't completely contained within the stored bits, we don't 1066 // have all the bits to feed it. We could do something crazy in the future 1067 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1068 // valuable. 1069 if (StoreOffset > LoadOffset || 1070 StoreOffset+StoreSize < LoadOffset+LoadSize) 1071 return -1; 1072 1073 // Okay, we can do this transformation. Return the number of bytes into the 1074 // store that the load is. 1075 return LoadOffset-StoreOffset; 1076} 1077 1078 1079/// GetStoreValueForLoad - This function is called when we have a 1080/// memdep query of a load that ends up being a clobbering store. This means 1081/// that the store *may* provide bits used by the load but we can't be sure 1082/// because the pointers don't mustalias. Check this case to see if there is 1083/// anything more we can do before we give up. 1084static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1085 const Type *LoadTy, 1086 Instruction *InsertPt, const TargetData &TD){ 1087 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1088 1089 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8; 1090 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1091 1092 1093 // Compute which bits of the stored value are being used by the load. Convert 1094 // to an integer type to start with. 1095 if (isa<PointerType>(SrcVal->getType())) 1096 SrcVal = new PtrToIntInst(SrcVal, TD.getIntPtrType(Ctx), "tmp", InsertPt); 1097 if (!isa<IntegerType>(SrcVal->getType())) 1098 SrcVal = new BitCastInst(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1099 "tmp", InsertPt); 1100 1101 // Shift the bits to the least significant depending on endianness. 1102 unsigned ShiftAmt; 1103 if (TD.isLittleEndian()) { 1104 ShiftAmt = Offset*8; 1105 } else { 1106 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1107 } 1108 1109 if (ShiftAmt) 1110 SrcVal = BinaryOperator::CreateLShr(SrcVal, 1111 ConstantInt::get(SrcVal->getType(), ShiftAmt), "tmp", InsertPt); 1112 1113 if (LoadSize != StoreSize) 1114 SrcVal = new TruncInst(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1115 "tmp", InsertPt); 1116 1117 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1118} 1119 1120struct AvailableValueInBlock { 1121 /// BB - The basic block in question. 1122 BasicBlock *BB; 1123 /// V - The value that is live out of the block. 1124 Value *V; 1125 /// Offset - The byte offset in V that is interesting for the load query. 1126 unsigned Offset; 1127 1128 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1129 unsigned Offset = 0) { 1130 AvailableValueInBlock Res; 1131 Res.BB = BB; 1132 Res.V = V; 1133 Res.Offset = Offset; 1134 return Res; 1135 } 1136}; 1137 1138/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1139/// construct SSA form, allowing us to eliminate LI. This returns the value 1140/// that should be used at LI's definition site. 1141static Value *ConstructSSAForLoadSet(LoadInst *LI, 1142 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1143 const TargetData *TD, 1144 AliasAnalysis *AA) { 1145 SmallVector<PHINode*, 8> NewPHIs; 1146 SSAUpdater SSAUpdate(&NewPHIs); 1147 SSAUpdate.Initialize(LI); 1148 1149 const Type *LoadTy = LI->getType(); 1150 1151 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1152 BasicBlock *BB = ValuesPerBlock[i].BB; 1153 Value *AvailableVal = ValuesPerBlock[i].V; 1154 unsigned Offset = ValuesPerBlock[i].Offset; 1155 1156 if (SSAUpdate.HasValueForBlock(BB)) 1157 continue; 1158 1159 if (AvailableVal->getType() != LoadTy) { 1160 assert(TD && "Need target data to handle type mismatch case"); 1161 AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy, 1162 BB->getTerminator(), *TD); 1163 1164 if (Offset) { 1165 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n" 1166 << *ValuesPerBlock[i].V << '\n' 1167 << *AvailableVal << '\n' << "\n\n\n"); 1168 } 1169 1170 1171 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n" 1172 << *ValuesPerBlock[i].V << '\n' 1173 << *AvailableVal << '\n' << "\n\n\n"); 1174 } 1175 1176 SSAUpdate.AddAvailableValue(BB, AvailableVal); 1177 } 1178 1179 // Perform PHI construction. 1180 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1181 1182 // If new PHI nodes were created, notify alias analysis. 1183 if (isa<PointerType>(V->getType())) 1184 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1185 AA->copyValue(LI, NewPHIs[i]); 1186 1187 return V; 1188} 1189 1190/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1191/// non-local by performing PHI construction. 1192bool GVN::processNonLocalLoad(LoadInst *LI, 1193 SmallVectorImpl<Instruction*> &toErase) { 1194 // Find the non-local dependencies of the load. 1195 SmallVector<MemoryDependenceAnalysis::NonLocalDepEntry, 64> Deps; 1196 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1197 Deps); 1198 //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: " 1199 // << Deps.size() << *LI << '\n'); 1200 1201 // If we had to process more than one hundred blocks to find the 1202 // dependencies, this load isn't worth worrying about. Optimizing 1203 // it will be too expensive. 1204 if (Deps.size() > 100) 1205 return false; 1206 1207 // If we had a phi translation failure, we'll have a single entry which is a 1208 // clobber in the current block. Reject this early. 1209 if (Deps.size() == 1 && Deps[0].second.isClobber()) { 1210 DEBUG( 1211 errs() << "GVN: non-local load "; 1212 WriteAsOperand(errs(), LI); 1213 errs() << " is clobbered by " << *Deps[0].second.getInst() << '\n'; 1214 ); 1215 return false; 1216 } 1217 1218 // Filter out useless results (non-locals, etc). Keep track of the blocks 1219 // where we have a value available in repl, also keep track of whether we see 1220 // dependencies that produce an unknown value for the load (such as a call 1221 // that could potentially clobber the load). 1222 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1223 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1224 1225 const TargetData *TD = 0; 1226 1227 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1228 BasicBlock *DepBB = Deps[i].first; 1229 MemDepResult DepInfo = Deps[i].second; 1230 1231 if (DepInfo.isClobber()) { 1232 // If the dependence is to a store that writes to a superset of the bits 1233 // read by the load, we can extract the bits we need for the load from the 1234 // stored value. 1235 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1236 if (TD == 0) 1237 TD = getAnalysisIfAvailable<TargetData>(); 1238 if (TD) { 1239 int Offset = AnalyzeLoadFromClobberingStore(LI, DepSI, *TD); 1240 if (Offset != -1) { 1241 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1242 DepSI->getOperand(0), 1243 Offset)); 1244 continue; 1245 } 1246 } 1247 } 1248 1249 // FIXME: Handle memset/memcpy. 1250 UnavailableBlocks.push_back(DepBB); 1251 continue; 1252 } 1253 1254 Instruction *DepInst = DepInfo.getInst(); 1255 1256 // Loading the allocation -> undef. 1257 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1258 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1259 UndefValue::get(LI->getType()))); 1260 continue; 1261 } 1262 1263 // Loading immediately after lifetime begin or end -> undef. 1264 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1265 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 1266 II->getIntrinsicID() == Intrinsic::lifetime_end) { 1267 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1268 UndefValue::get(LI->getType()))); 1269 } 1270 } 1271 1272 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1273 // Reject loads and stores that are to the same address but are of 1274 // different types if we have to. 1275 if (S->getOperand(0)->getType() != LI->getType()) { 1276 if (TD == 0) 1277 TD = getAnalysisIfAvailable<TargetData>(); 1278 1279 // If the stored value is larger or equal to the loaded value, we can 1280 // reuse it. 1281 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1282 LI->getType(), *TD)) { 1283 UnavailableBlocks.push_back(DepBB); 1284 continue; 1285 } 1286 } 1287 1288 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1289 S->getOperand(0))); 1290 continue; 1291 } 1292 1293 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1294 // If the types mismatch and we can't handle it, reject reuse of the load. 1295 if (LD->getType() != LI->getType()) { 1296 if (TD == 0) 1297 TD = getAnalysisIfAvailable<TargetData>(); 1298 1299 // If the stored value is larger or equal to the loaded value, we can 1300 // reuse it. 1301 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1302 UnavailableBlocks.push_back(DepBB); 1303 continue; 1304 } 1305 } 1306 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1307 continue; 1308 } 1309 1310 UnavailableBlocks.push_back(DepBB); 1311 continue; 1312 } 1313 1314 // If we have no predecessors that produce a known value for this load, exit 1315 // early. 1316 if (ValuesPerBlock.empty()) return false; 1317 1318 // If all of the instructions we depend on produce a known value for this 1319 // load, then it is fully redundant and we can use PHI insertion to compute 1320 // its value. Insert PHIs and remove the fully redundant value now. 1321 if (UnavailableBlocks.empty()) { 1322 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1323 1324 // Perform PHI construction. 1325 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1326 VN.getAliasAnalysis()); 1327 LI->replaceAllUsesWith(V); 1328 1329 if (isa<PHINode>(V)) 1330 V->takeName(LI); 1331 if (isa<PointerType>(V->getType())) 1332 MD->invalidateCachedPointerInfo(V); 1333 toErase.push_back(LI); 1334 NumGVNLoad++; 1335 return true; 1336 } 1337 1338 if (!EnablePRE || !EnableLoadPRE) 1339 return false; 1340 1341 // Okay, we have *some* definitions of the value. This means that the value 1342 // is available in some of our (transitive) predecessors. Lets think about 1343 // doing PRE of this load. This will involve inserting a new load into the 1344 // predecessor when it's not available. We could do this in general, but 1345 // prefer to not increase code size. As such, we only do this when we know 1346 // that we only have to insert *one* load (which means we're basically moving 1347 // the load, not inserting a new one). 1348 1349 SmallPtrSet<BasicBlock *, 4> Blockers; 1350 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1351 Blockers.insert(UnavailableBlocks[i]); 1352 1353 // Lets find first basic block with more than one predecessor. Walk backwards 1354 // through predecessors if needed. 1355 BasicBlock *LoadBB = LI->getParent(); 1356 BasicBlock *TmpBB = LoadBB; 1357 1358 bool isSinglePred = false; 1359 bool allSingleSucc = true; 1360 while (TmpBB->getSinglePredecessor()) { 1361 isSinglePred = true; 1362 TmpBB = TmpBB->getSinglePredecessor(); 1363 if (!TmpBB) // If haven't found any, bail now. 1364 return false; 1365 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1366 return false; 1367 if (Blockers.count(TmpBB)) 1368 return false; 1369 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1370 allSingleSucc = false; 1371 } 1372 1373 assert(TmpBB); 1374 LoadBB = TmpBB; 1375 1376 // If we have a repl set with LI itself in it, this means we have a loop where 1377 // at least one of the values is LI. Since this means that we won't be able 1378 // to eliminate LI even if we insert uses in the other predecessors, we will 1379 // end up increasing code size. Reject this by scanning for LI. 1380 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1381 if (ValuesPerBlock[i].V == LI) 1382 return false; 1383 1384 if (isSinglePred) { 1385 bool isHot = false; 1386 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1387 if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].V)) 1388 // "Hot" Instruction is in some loop (because it dominates its dep. 1389 // instruction). 1390 if (DT->dominates(LI, I)) { 1391 isHot = true; 1392 break; 1393 } 1394 1395 // We are interested only in "hot" instructions. We don't want to do any 1396 // mis-optimizations here. 1397 if (!isHot) 1398 return false; 1399 } 1400 1401 // Okay, we have some hope :). Check to see if the loaded value is fully 1402 // available in all but one predecessor. 1403 // FIXME: If we could restructure the CFG, we could make a common pred with 1404 // all the preds that don't have an available LI and insert a new load into 1405 // that one block. 1406 BasicBlock *UnavailablePred = 0; 1407 1408 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1409 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1410 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1411 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1412 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1413 1414 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1415 PI != E; ++PI) { 1416 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 1417 continue; 1418 1419 // If this load is not available in multiple predecessors, reject it. 1420 if (UnavailablePred && UnavailablePred != *PI) 1421 return false; 1422 UnavailablePred = *PI; 1423 } 1424 1425 assert(UnavailablePred != 0 && 1426 "Fully available value should be eliminated above!"); 1427 1428 // We don't currently handle critical edges :( 1429 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) { 1430 DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '" 1431 << UnavailablePred->getName() << "': " << *LI << '\n'); 1432 return false; 1433 } 1434 1435 // Do PHI translation to get its value in the predecessor if necessary. The 1436 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1437 // 1438 // FIXME: This may insert a computation, but we don't tell scalar GVN 1439 // optimization stuff about it. How do we do this? 1440 Value *LoadPtr = 1441 MD->InsertPHITranslatedPointer(LI->getOperand(0), LoadBB, 1442 UnavailablePred, TD, *DT); 1443 1444 // If we couldn't find or insert a computation of this phi translated value, 1445 // we fail PRE. 1446 if (LoadPtr == 0) { 1447 DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1448 << *LI->getOperand(0) << "\n"); 1449 return false; 1450 } 1451 1452 // Make sure it is valid to move this load here. We have to watch out for: 1453 // @1 = getelementptr (i8* p, ... 1454 // test p and branch if == 0 1455 // load @1 1456 // It is valid to have the getelementptr before the test, even if p can be 0, 1457 // as getelementptr only does address arithmetic. 1458 // If we are not pushing the value through any multiple-successor blocks 1459 // we do not have this case. Otherwise, check that the load is safe to 1460 // put anywhere; this can be improved, but should be conservatively safe. 1461 if (!allSingleSucc && 1462 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) 1463 return false; 1464 1465 // Okay, we can eliminate this load by inserting a reload in the predecessor 1466 // and using PHI construction to get the value in the other predecessors, do 1467 // it. 1468 DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1469 1470 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1471 LI->getAlignment(), 1472 UnavailablePred->getTerminator()); 1473 1474 // Add the newly created load. 1475 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad)); 1476 1477 // Perform PHI construction. 1478 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1479 VN.getAliasAnalysis()); 1480 LI->replaceAllUsesWith(V); 1481 if (isa<PHINode>(V)) 1482 V->takeName(LI); 1483 if (isa<PointerType>(V->getType())) 1484 MD->invalidateCachedPointerInfo(V); 1485 toErase.push_back(LI); 1486 NumPRELoad++; 1487 return true; 1488} 1489 1490/// processLoad - Attempt to eliminate a load, first by eliminating it 1491/// locally, and then attempting non-local elimination if that fails. 1492bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1493 if (!MD) 1494 return false; 1495 1496 if (L->isVolatile()) 1497 return false; 1498 1499 // ... to a pointer that has been loaded from before... 1500 MemDepResult Dep = MD->getDependency(L); 1501 1502 // If the value isn't available, don't do anything! 1503 if (Dep.isClobber()) { 1504 // FIXME: We should handle memset/memcpy/memmove as dependent instructions 1505 // to forward the value if available. 1506 //if (isa<MemIntrinsic>(Dep.getInst())) 1507 //errs() << "LOAD DEPENDS ON MEM: " << *L << "\n" << *Dep.getInst()<<"\n\n"; 1508 1509 // Check to see if we have something like this: 1510 // store i32 123, i32* %P 1511 // %A = bitcast i32* %P to i8* 1512 // %B = gep i8* %A, i32 1 1513 // %C = load i8* %B 1514 // 1515 // We could do that by recognizing if the clobber instructions are obviously 1516 // a common base + constant offset, and if the previous store (or memset) 1517 // completely covers this load. This sort of thing can happen in bitfield 1518 // access code. 1519 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1520 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1521 int Offset = AnalyzeLoadFromClobberingStore(L, DepSI, *TD); 1522 if (Offset != -1) { 1523 Value *AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1524 L->getType(), L, *TD); 1525 DEBUG(errs() << "GVN COERCED STORE BITS:\n" << *DepSI << '\n' 1526 << *AvailVal << '\n' << *L << "\n\n\n"); 1527 1528 // Replace the load! 1529 L->replaceAllUsesWith(AvailVal); 1530 if (isa<PointerType>(AvailVal->getType())) 1531 MD->invalidateCachedPointerInfo(AvailVal); 1532 toErase.push_back(L); 1533 NumGVNLoad++; 1534 return true; 1535 } 1536 } 1537 1538 DEBUG( 1539 // fast print dep, using operator<< on instruction would be too slow 1540 errs() << "GVN: load "; 1541 WriteAsOperand(errs(), L); 1542 Instruction *I = Dep.getInst(); 1543 errs() << " is clobbered by " << *I << '\n'; 1544 ); 1545 return false; 1546 } 1547 1548 // If it is defined in another block, try harder. 1549 if (Dep.isNonLocal()) 1550 return processNonLocalLoad(L, toErase); 1551 1552 Instruction *DepInst = Dep.getInst(); 1553 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1554 Value *StoredVal = DepSI->getOperand(0); 1555 1556 // The store and load are to a must-aliased pointer, but they may not 1557 // actually have the same type. See if we know how to reuse the stored 1558 // value (depending on its type). 1559 const TargetData *TD = 0; 1560 if (StoredVal->getType() != L->getType()) { 1561 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1562 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1563 L, *TD); 1564 if (StoredVal == 0) 1565 return false; 1566 1567 DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1568 << '\n' << *L << "\n\n\n"); 1569 } 1570 else 1571 return false; 1572 } 1573 1574 // Remove it! 1575 L->replaceAllUsesWith(StoredVal); 1576 if (isa<PointerType>(StoredVal->getType())) 1577 MD->invalidateCachedPointerInfo(StoredVal); 1578 toErase.push_back(L); 1579 NumGVNLoad++; 1580 return true; 1581 } 1582 1583 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1584 Value *AvailableVal = DepLI; 1585 1586 // The loads are of a must-aliased pointer, but they may not actually have 1587 // the same type. See if we know how to reuse the previously loaded value 1588 // (depending on its type). 1589 const TargetData *TD = 0; 1590 if (DepLI->getType() != L->getType()) { 1591 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1592 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1593 if (AvailableVal == 0) 1594 return false; 1595 1596 DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1597 << "\n" << *L << "\n\n\n"); 1598 } 1599 else 1600 return false; 1601 } 1602 1603 // Remove it! 1604 L->replaceAllUsesWith(AvailableVal); 1605 if (isa<PointerType>(DepLI->getType())) 1606 MD->invalidateCachedPointerInfo(DepLI); 1607 toErase.push_back(L); 1608 NumGVNLoad++; 1609 return true; 1610 } 1611 1612 // If this load really doesn't depend on anything, then we must be loading an 1613 // undef value. This can happen when loading for a fresh allocation with no 1614 // intervening stores, for example. 1615 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1616 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1617 toErase.push_back(L); 1618 NumGVNLoad++; 1619 return true; 1620 } 1621 1622 // If this load occurs either right after a lifetime begin or a lifetime end, 1623 // then the loaded value is undefined. 1624 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1625 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 1626 II->getIntrinsicID() == Intrinsic::lifetime_end) { 1627 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1628 toErase.push_back(L); 1629 NumGVNLoad++; 1630 return true; 1631 } 1632 } 1633 1634 return false; 1635} 1636 1637Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1638 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1639 if (I == localAvail.end()) 1640 return 0; 1641 1642 ValueNumberScope *Locals = I->second; 1643 while (Locals) { 1644 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1645 if (I != Locals->table.end()) 1646 return I->second; 1647 Locals = Locals->parent; 1648 } 1649 1650 return 0; 1651} 1652 1653 1654/// processInstruction - When calculating availability, handle an instruction 1655/// by inserting it into the appropriate sets 1656bool GVN::processInstruction(Instruction *I, 1657 SmallVectorImpl<Instruction*> &toErase) { 1658 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1659 bool Changed = processLoad(LI, toErase); 1660 1661 if (!Changed) { 1662 unsigned Num = VN.lookup_or_add(LI); 1663 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1664 } 1665 1666 return Changed; 1667 } 1668 1669 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1670 unsigned Num = VN.lookup_or_add(I); 1671 1672 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1673 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1674 1675 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1676 return false; 1677 1678 Value *BranchCond = BI->getCondition(); 1679 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1680 1681 BasicBlock *TrueSucc = BI->getSuccessor(0); 1682 BasicBlock *FalseSucc = BI->getSuccessor(1); 1683 1684 if (TrueSucc->getSinglePredecessor()) 1685 localAvail[TrueSucc]->table[CondVN] = 1686 ConstantInt::getTrue(TrueSucc->getContext()); 1687 if (FalseSucc->getSinglePredecessor()) 1688 localAvail[FalseSucc]->table[CondVN] = 1689 ConstantInt::getFalse(TrueSucc->getContext()); 1690 1691 return false; 1692 1693 // Allocations are always uniquely numbered, so we can save time and memory 1694 // by fast failing them. 1695 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1696 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1697 return false; 1698 } 1699 1700 // Collapse PHI nodes 1701 if (PHINode* p = dyn_cast<PHINode>(I)) { 1702 Value *constVal = CollapsePhi(p); 1703 1704 if (constVal) { 1705 p->replaceAllUsesWith(constVal); 1706 if (MD && isa<PointerType>(constVal->getType())) 1707 MD->invalidateCachedPointerInfo(constVal); 1708 VN.erase(p); 1709 1710 toErase.push_back(p); 1711 } else { 1712 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1713 } 1714 1715 // If the number we were assigned was a brand new VN, then we don't 1716 // need to do a lookup to see if the number already exists 1717 // somewhere in the domtree: it can't! 1718 } else if (Num == NextNum) { 1719 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1720 1721 // Perform fast-path value-number based elimination of values inherited from 1722 // dominators. 1723 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1724 // Remove it! 1725 VN.erase(I); 1726 I->replaceAllUsesWith(repl); 1727 if (MD && isa<PointerType>(repl->getType())) 1728 MD->invalidateCachedPointerInfo(repl); 1729 toErase.push_back(I); 1730 return true; 1731 1732 } else { 1733 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1734 } 1735 1736 return false; 1737} 1738 1739/// runOnFunction - This is the main transformation entry point for a function. 1740bool GVN::runOnFunction(Function& F) { 1741 if (!NoLoads) 1742 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1743 DT = &getAnalysis<DominatorTree>(); 1744 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1745 VN.setMemDep(MD); 1746 VN.setDomTree(DT); 1747 1748 bool Changed = false; 1749 bool ShouldContinue = true; 1750 1751 // Merge unconditional branches, allowing PRE to catch more 1752 // optimization opportunities. 1753 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 1754 BasicBlock *BB = FI; 1755 ++FI; 1756 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 1757 if (removedBlock) NumGVNBlocks++; 1758 1759 Changed |= removedBlock; 1760 } 1761 1762 unsigned Iteration = 0; 1763 1764 while (ShouldContinue) { 1765 DEBUG(errs() << "GVN iteration: " << Iteration << "\n"); 1766 ShouldContinue = iterateOnFunction(F); 1767 Changed |= ShouldContinue; 1768 ++Iteration; 1769 } 1770 1771 if (EnablePRE) { 1772 bool PREChanged = true; 1773 while (PREChanged) { 1774 PREChanged = performPRE(F); 1775 Changed |= PREChanged; 1776 } 1777 } 1778 // FIXME: Should perform GVN again after PRE does something. PRE can move 1779 // computations into blocks where they become fully redundant. Note that 1780 // we can't do this until PRE's critical edge splitting updates memdep. 1781 // Actually, when this happens, we should just fully integrate PRE into GVN. 1782 1783 cleanupGlobalSets(); 1784 1785 return Changed; 1786} 1787 1788 1789bool GVN::processBlock(BasicBlock *BB) { 1790 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 1791 // incrementing BI before processing an instruction). 1792 SmallVector<Instruction*, 8> toErase; 1793 bool ChangedFunction = false; 1794 1795 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 1796 BI != BE;) { 1797 ChangedFunction |= processInstruction(BI, toErase); 1798 if (toErase.empty()) { 1799 ++BI; 1800 continue; 1801 } 1802 1803 // If we need some instructions deleted, do it now. 1804 NumGVNInstr += toErase.size(); 1805 1806 // Avoid iterator invalidation. 1807 bool AtStart = BI == BB->begin(); 1808 if (!AtStart) 1809 --BI; 1810 1811 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 1812 E = toErase.end(); I != E; ++I) { 1813 DEBUG(errs() << "GVN removed: " << **I << '\n'); 1814 if (MD) MD->removeInstruction(*I); 1815 (*I)->eraseFromParent(); 1816 DEBUG(verifyRemoved(*I)); 1817 } 1818 toErase.clear(); 1819 1820 if (AtStart) 1821 BI = BB->begin(); 1822 else 1823 ++BI; 1824 } 1825 1826 return ChangedFunction; 1827} 1828 1829/// performPRE - Perform a purely local form of PRE that looks for diamond 1830/// control flow patterns and attempts to perform simple PRE at the join point. 1831bool GVN::performPRE(Function &F) { 1832 bool Changed = false; 1833 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 1834 DenseMap<BasicBlock*, Value*> predMap; 1835 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 1836 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 1837 BasicBlock *CurrentBlock = *DI; 1838 1839 // Nothing to PRE in the entry block. 1840 if (CurrentBlock == &F.getEntryBlock()) continue; 1841 1842 for (BasicBlock::iterator BI = CurrentBlock->begin(), 1843 BE = CurrentBlock->end(); BI != BE; ) { 1844 Instruction *CurInst = BI++; 1845 1846 if (isa<AllocaInst>(CurInst) || 1847 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 1848 CurInst->getType()->isVoidTy() || 1849 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 1850 isa<DbgInfoIntrinsic>(CurInst)) 1851 continue; 1852 1853 uint32_t ValNo = VN.lookup(CurInst); 1854 1855 // Look for the predecessors for PRE opportunities. We're 1856 // only trying to solve the basic diamond case, where 1857 // a value is computed in the successor and one predecessor, 1858 // but not the other. We also explicitly disallow cases 1859 // where the successor is its own predecessor, because they're 1860 // more complicated to get right. 1861 unsigned NumWith = 0; 1862 unsigned NumWithout = 0; 1863 BasicBlock *PREPred = 0; 1864 predMap.clear(); 1865 1866 for (pred_iterator PI = pred_begin(CurrentBlock), 1867 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 1868 // We're not interested in PRE where the block is its 1869 // own predecessor, on in blocks with predecessors 1870 // that are not reachable. 1871 if (*PI == CurrentBlock) { 1872 NumWithout = 2; 1873 break; 1874 } else if (!localAvail.count(*PI)) { 1875 NumWithout = 2; 1876 break; 1877 } 1878 1879 DenseMap<uint32_t, Value*>::iterator predV = 1880 localAvail[*PI]->table.find(ValNo); 1881 if (predV == localAvail[*PI]->table.end()) { 1882 PREPred = *PI; 1883 NumWithout++; 1884 } else if (predV->second == CurInst) { 1885 NumWithout = 2; 1886 } else { 1887 predMap[*PI] = predV->second; 1888 NumWith++; 1889 } 1890 } 1891 1892 // Don't do PRE when it might increase code size, i.e. when 1893 // we would need to insert instructions in more than one pred. 1894 if (NumWithout != 1 || NumWith == 0) 1895 continue; 1896 1897 // Don't do PRE across indirect branch. 1898 if (isa<IndirectBrInst>(PREPred->getTerminator())) 1899 continue; 1900 1901 // We can't do PRE safely on a critical edge, so instead we schedule 1902 // the edge to be split and perform the PRE the next time we iterate 1903 // on the function. 1904 unsigned SuccNum = 0; 1905 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors(); 1906 i != e; ++i) 1907 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) { 1908 SuccNum = i; 1909 break; 1910 } 1911 1912 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 1913 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 1914 continue; 1915 } 1916 1917 // Instantiate the expression the in predecessor that lacked it. 1918 // Because we are going top-down through the block, all value numbers 1919 // will be available in the predecessor by the time we need them. Any 1920 // that weren't original present will have been instantiated earlier 1921 // in this loop. 1922 Instruction *PREInstr = CurInst->clone(); 1923 bool success = true; 1924 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 1925 Value *Op = PREInstr->getOperand(i); 1926 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 1927 continue; 1928 1929 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 1930 PREInstr->setOperand(i, V); 1931 } else { 1932 success = false; 1933 break; 1934 } 1935 } 1936 1937 // Fail out if we encounter an operand that is not available in 1938 // the PRE predecessor. This is typically because of loads which 1939 // are not value numbered precisely. 1940 if (!success) { 1941 delete PREInstr; 1942 DEBUG(verifyRemoved(PREInstr)); 1943 continue; 1944 } 1945 1946 PREInstr->insertBefore(PREPred->getTerminator()); 1947 PREInstr->setName(CurInst->getName() + ".pre"); 1948 predMap[PREPred] = PREInstr; 1949 VN.add(PREInstr, ValNo); 1950 NumGVNPRE++; 1951 1952 // Update the availability map to include the new instruction. 1953 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 1954 1955 // Create a PHI to make the value available in this block. 1956 PHINode* Phi = PHINode::Create(CurInst->getType(), 1957 CurInst->getName() + ".pre-phi", 1958 CurrentBlock->begin()); 1959 for (pred_iterator PI = pred_begin(CurrentBlock), 1960 PE = pred_end(CurrentBlock); PI != PE; ++PI) 1961 Phi->addIncoming(predMap[*PI], *PI); 1962 1963 VN.add(Phi, ValNo); 1964 localAvail[CurrentBlock]->table[ValNo] = Phi; 1965 1966 CurInst->replaceAllUsesWith(Phi); 1967 if (MD && isa<PointerType>(Phi->getType())) 1968 MD->invalidateCachedPointerInfo(Phi); 1969 VN.erase(CurInst); 1970 1971 DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n'); 1972 if (MD) MD->removeInstruction(CurInst); 1973 CurInst->eraseFromParent(); 1974 DEBUG(verifyRemoved(CurInst)); 1975 Changed = true; 1976 } 1977 } 1978 1979 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator 1980 I = toSplit.begin(), E = toSplit.end(); I != E; ++I) 1981 SplitCriticalEdge(I->first, I->second, this); 1982 1983 return Changed || toSplit.size(); 1984} 1985 1986/// iterateOnFunction - Executes one iteration of GVN 1987bool GVN::iterateOnFunction(Function &F) { 1988 cleanupGlobalSets(); 1989 1990 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 1991 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 1992 if (DI->getIDom()) 1993 localAvail[DI->getBlock()] = 1994 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 1995 else 1996 localAvail[DI->getBlock()] = new ValueNumberScope(0); 1997 } 1998 1999 // Top-down walk of the dominator tree 2000 bool Changed = false; 2001#if 0 2002 // Needed for value numbering with phi construction to work. 2003 ReversePostOrderTraversal<Function*> RPOT(&F); 2004 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2005 RE = RPOT.end(); RI != RE; ++RI) 2006 Changed |= processBlock(*RI); 2007#else 2008 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2009 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2010 Changed |= processBlock(DI->getBlock()); 2011#endif 2012 2013 return Changed; 2014} 2015 2016void GVN::cleanupGlobalSets() { 2017 VN.clear(); 2018 2019 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2020 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2021 delete I->second; 2022 localAvail.clear(); 2023} 2024 2025/// verifyRemoved - Verify that the specified instruction does not occur in our 2026/// internal data structures. 2027void GVN::verifyRemoved(const Instruction *Inst) const { 2028 VN.verifyRemoved(Inst); 2029 2030 // Walk through the value number scope to make sure the instruction isn't 2031 // ferreted away in it. 2032 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2033 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2034 const ValueNumberScope *VNS = I->second; 2035 2036 while (VNS) { 2037 for (DenseMap<uint32_t, Value*>::const_iterator 2038 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2039 assert(II->second != Inst && "Inst still in value numbering scope!"); 2040 } 2041 2042 VNS = VNS->parent; 2043 } 2044 } 2045} 2046