GVN.cpp revision fb6e701f08d7ec065222abbbc6727dc80de3d17e
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/IntrinsicInst.h" 25#include "llvm/LLVMContext.h" 26#include "llvm/Operator.h" 27#include "llvm/Value.h" 28#include "llvm/ADT/DenseMap.h" 29#include "llvm/ADT/DepthFirstIterator.h" 30#include "llvm/ADT/PostOrderIterator.h" 31#include "llvm/ADT/SmallPtrSet.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/Analysis/Dominators.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Analysis/MemoryBuiltins.h" 37#include "llvm/Analysis/MemoryDependenceAnalysis.h" 38#include "llvm/Support/CFG.h" 39#include "llvm/Support/CommandLine.h" 40#include "llvm/Support/Debug.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Support/raw_ostream.h" 44#include "llvm/Target/TargetData.h" 45#include "llvm/Transforms/Utils/BasicBlockUtils.h" 46#include "llvm/Transforms/Utils/Local.h" 47#include "llvm/Transforms/Utils/SSAUpdater.h" 48#include <cstdio> 49using namespace llvm; 50 51STATISTIC(NumGVNInstr, "Number of instructions deleted"); 52STATISTIC(NumGVNLoad, "Number of loads deleted"); 53STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 54STATISTIC(NumGVNBlocks, "Number of blocks merged"); 55STATISTIC(NumPRELoad, "Number of loads PRE'd"); 56 57static cl::opt<bool> EnablePRE("enable-pre", 58 cl::init(true), cl::Hidden); 59static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 60 61//===----------------------------------------------------------------------===// 62// ValueTable Class 63//===----------------------------------------------------------------------===// 64 65/// This class holds the mapping between values and value numbers. It is used 66/// as an efficient mechanism to determine the expression-wise equivalence of 67/// two values. 68namespace { 69 struct Expression { 70 enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL, 71 UDIV, SDIV, FDIV, UREM, SREM, 72 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ, 73 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 74 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 75 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 76 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 77 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 78 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI, 79 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT, 80 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT, 81 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 82 83 ExpressionOpcode opcode; 84 const Type* type; 85 SmallVector<uint32_t, 4> varargs; 86 Value *function; 87 88 Expression() { } 89 Expression(ExpressionOpcode o) : opcode(o) { } 90 91 bool operator==(const Expression &other) const { 92 if (opcode != other.opcode) 93 return false; 94 else if (opcode == EMPTY || opcode == TOMBSTONE) 95 return true; 96 else if (type != other.type) 97 return false; 98 else if (function != other.function) 99 return false; 100 else { 101 if (varargs.size() != other.varargs.size()) 102 return false; 103 104 for (size_t i = 0; i < varargs.size(); ++i) 105 if (varargs[i] != other.varargs[i]) 106 return false; 107 108 return true; 109 } 110 } 111 112 bool operator!=(const Expression &other) const { 113 return !(*this == other); 114 } 115 }; 116 117 class ValueTable { 118 private: 119 DenseMap<Value*, uint32_t> valueNumbering; 120 DenseMap<Expression, uint32_t> expressionNumbering; 121 AliasAnalysis* AA; 122 MemoryDependenceAnalysis* MD; 123 DominatorTree* DT; 124 125 uint32_t nextValueNumber; 126 127 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO); 128 Expression::ExpressionOpcode getOpcode(CmpInst* C); 129 Expression::ExpressionOpcode getOpcode(CastInst* C); 130 Expression create_expression(BinaryOperator* BO); 131 Expression create_expression(CmpInst* C); 132 Expression create_expression(ShuffleVectorInst* V); 133 Expression create_expression(ExtractElementInst* C); 134 Expression create_expression(InsertElementInst* V); 135 Expression create_expression(SelectInst* V); 136 Expression create_expression(CastInst* C); 137 Expression create_expression(GetElementPtrInst* G); 138 Expression create_expression(CallInst* C); 139 Expression create_expression(Constant* C); 140 Expression create_expression(ExtractValueInst* C); 141 Expression create_expression(InsertValueInst* C); 142 143 uint32_t lookup_or_add_call(CallInst* C); 144 public: 145 ValueTable() : nextValueNumber(1) { } 146 uint32_t lookup_or_add(Value *V); 147 uint32_t lookup(Value *V) const; 148 void add(Value *V, uint32_t num); 149 void clear(); 150 void erase(Value *v); 151 unsigned size(); 152 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 153 AliasAnalysis *getAliasAnalysis() const { return AA; } 154 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 155 void setDomTree(DominatorTree* D) { DT = D; } 156 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 157 void verifyRemoved(const Value *) const; 158 }; 159} 160 161namespace llvm { 162template <> struct DenseMapInfo<Expression> { 163 static inline Expression getEmptyKey() { 164 return Expression(Expression::EMPTY); 165 } 166 167 static inline Expression getTombstoneKey() { 168 return Expression(Expression::TOMBSTONE); 169 } 170 171 static unsigned getHashValue(const Expression e) { 172 unsigned hash = e.opcode; 173 174 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 175 (unsigned)((uintptr_t)e.type >> 9)); 176 177 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 178 E = e.varargs.end(); I != E; ++I) 179 hash = *I + hash * 37; 180 181 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 182 (unsigned)((uintptr_t)e.function >> 9)) + 183 hash * 37; 184 185 return hash; 186 } 187 static bool isEqual(const Expression &LHS, const Expression &RHS) { 188 return LHS == RHS; 189 } 190 static bool isPod() { return true; } 191}; 192} 193 194//===----------------------------------------------------------------------===// 195// ValueTable Internal Functions 196//===----------------------------------------------------------------------===// 197Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) { 198 switch(BO->getOpcode()) { 199 default: // THIS SHOULD NEVER HAPPEN 200 llvm_unreachable("Binary operator with unknown opcode?"); 201 case Instruction::Add: return Expression::ADD; 202 case Instruction::FAdd: return Expression::FADD; 203 case Instruction::Sub: return Expression::SUB; 204 case Instruction::FSub: return Expression::FSUB; 205 case Instruction::Mul: return Expression::MUL; 206 case Instruction::FMul: return Expression::FMUL; 207 case Instruction::UDiv: return Expression::UDIV; 208 case Instruction::SDiv: return Expression::SDIV; 209 case Instruction::FDiv: return Expression::FDIV; 210 case Instruction::URem: return Expression::UREM; 211 case Instruction::SRem: return Expression::SREM; 212 case Instruction::FRem: return Expression::FREM; 213 case Instruction::Shl: return Expression::SHL; 214 case Instruction::LShr: return Expression::LSHR; 215 case Instruction::AShr: return Expression::ASHR; 216 case Instruction::And: return Expression::AND; 217 case Instruction::Or: return Expression::OR; 218 case Instruction::Xor: return Expression::XOR; 219 } 220} 221 222Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 223 if (isa<ICmpInst>(C)) { 224 switch (C->getPredicate()) { 225 default: // THIS SHOULD NEVER HAPPEN 226 llvm_unreachable("Comparison with unknown predicate?"); 227 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 228 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 229 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 230 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 231 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 232 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 233 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 234 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 235 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 236 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 237 } 238 } else { 239 switch (C->getPredicate()) { 240 default: // THIS SHOULD NEVER HAPPEN 241 llvm_unreachable("Comparison with unknown predicate?"); 242 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 243 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 244 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 245 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 246 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 247 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 248 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 249 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 250 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 251 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 252 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 253 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 254 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 255 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 256 } 257 } 258} 259 260Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) { 261 switch(C->getOpcode()) { 262 default: // THIS SHOULD NEVER HAPPEN 263 llvm_unreachable("Cast operator with unknown opcode?"); 264 case Instruction::Trunc: return Expression::TRUNC; 265 case Instruction::ZExt: return Expression::ZEXT; 266 case Instruction::SExt: return Expression::SEXT; 267 case Instruction::FPToUI: return Expression::FPTOUI; 268 case Instruction::FPToSI: return Expression::FPTOSI; 269 case Instruction::UIToFP: return Expression::UITOFP; 270 case Instruction::SIToFP: return Expression::SITOFP; 271 case Instruction::FPTrunc: return Expression::FPTRUNC; 272 case Instruction::FPExt: return Expression::FPEXT; 273 case Instruction::PtrToInt: return Expression::PTRTOINT; 274 case Instruction::IntToPtr: return Expression::INTTOPTR; 275 case Instruction::BitCast: return Expression::BITCAST; 276 } 277} 278 279Expression ValueTable::create_expression(CallInst* C) { 280 Expression e; 281 282 e.type = C->getType(); 283 e.function = C->getCalledFunction(); 284 e.opcode = Expression::CALL; 285 286 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); 287 I != E; ++I) 288 e.varargs.push_back(lookup_or_add(*I)); 289 290 return e; 291} 292 293Expression ValueTable::create_expression(BinaryOperator* BO) { 294 Expression e; 295 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 296 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 297 e.function = 0; 298 e.type = BO->getType(); 299 e.opcode = getOpcode(BO); 300 301 return e; 302} 303 304Expression ValueTable::create_expression(CmpInst* C) { 305 Expression e; 306 307 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 308 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 309 e.function = 0; 310 e.type = C->getType(); 311 e.opcode = getOpcode(C); 312 313 return e; 314} 315 316Expression ValueTable::create_expression(CastInst* C) { 317 Expression e; 318 319 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 320 e.function = 0; 321 e.type = C->getType(); 322 e.opcode = getOpcode(C); 323 324 return e; 325} 326 327Expression ValueTable::create_expression(ShuffleVectorInst* S) { 328 Expression e; 329 330 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 331 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 332 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 333 e.function = 0; 334 e.type = S->getType(); 335 e.opcode = Expression::SHUFFLE; 336 337 return e; 338} 339 340Expression ValueTable::create_expression(ExtractElementInst* E) { 341 Expression e; 342 343 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 344 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 345 e.function = 0; 346 e.type = E->getType(); 347 e.opcode = Expression::EXTRACT; 348 349 return e; 350} 351 352Expression ValueTable::create_expression(InsertElementInst* I) { 353 Expression e; 354 355 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 356 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 357 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 358 e.function = 0; 359 e.type = I->getType(); 360 e.opcode = Expression::INSERT; 361 362 return e; 363} 364 365Expression ValueTable::create_expression(SelectInst* I) { 366 Expression e; 367 368 e.varargs.push_back(lookup_or_add(I->getCondition())); 369 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 370 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 371 e.function = 0; 372 e.type = I->getType(); 373 e.opcode = Expression::SELECT; 374 375 return e; 376} 377 378Expression ValueTable::create_expression(GetElementPtrInst* G) { 379 Expression e; 380 381 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 382 e.function = 0; 383 e.type = G->getType(); 384 e.opcode = Expression::GEP; 385 386 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 387 I != E; ++I) 388 e.varargs.push_back(lookup_or_add(*I)); 389 390 return e; 391} 392 393Expression ValueTable::create_expression(ExtractValueInst* E) { 394 Expression e; 395 396 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 397 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 398 II != IE; ++II) 399 e.varargs.push_back(*II); 400 e.function = 0; 401 e.type = E->getType(); 402 e.opcode = Expression::EXTRACTVALUE; 403 404 return e; 405} 406 407Expression ValueTable::create_expression(InsertValueInst* E) { 408 Expression e; 409 410 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 411 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 412 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 413 II != IE; ++II) 414 e.varargs.push_back(*II); 415 e.function = 0; 416 e.type = E->getType(); 417 e.opcode = Expression::INSERTVALUE; 418 419 return e; 420} 421 422//===----------------------------------------------------------------------===// 423// ValueTable External Functions 424//===----------------------------------------------------------------------===// 425 426/// add - Insert a value into the table with a specified value number. 427void ValueTable::add(Value *V, uint32_t num) { 428 valueNumbering.insert(std::make_pair(V, num)); 429} 430 431uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 432 if (AA->doesNotAccessMemory(C)) { 433 Expression exp = create_expression(C); 434 uint32_t& e = expressionNumbering[exp]; 435 if (!e) e = nextValueNumber++; 436 valueNumbering[C] = e; 437 return e; 438 } else if (AA->onlyReadsMemory(C)) { 439 Expression exp = create_expression(C); 440 uint32_t& e = expressionNumbering[exp]; 441 if (!e) { 442 e = nextValueNumber++; 443 valueNumbering[C] = e; 444 return e; 445 } 446 447 MemDepResult local_dep = MD->getDependency(C); 448 449 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 450 valueNumbering[C] = nextValueNumber; 451 return nextValueNumber++; 452 } 453 454 if (local_dep.isDef()) { 455 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 456 457 if (local_cdep->getNumOperands() != C->getNumOperands()) { 458 valueNumbering[C] = nextValueNumber; 459 return nextValueNumber++; 460 } 461 462 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 463 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 464 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i)); 465 if (c_vn != cd_vn) { 466 valueNumbering[C] = nextValueNumber; 467 return nextValueNumber++; 468 } 469 } 470 471 uint32_t v = lookup_or_add(local_cdep); 472 valueNumbering[C] = v; 473 return v; 474 } 475 476 // Non-local case. 477 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 478 MD->getNonLocalCallDependency(CallSite(C)); 479 // FIXME: call/call dependencies for readonly calls should return def, not 480 // clobber! Move the checking logic to MemDep! 481 CallInst* cdep = 0; 482 483 // Check to see if we have a single dominating call instruction that is 484 // identical to C. 485 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 486 const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i]; 487 // Ignore non-local dependencies. 488 if (I->second.isNonLocal()) 489 continue; 490 491 // We don't handle non-depedencies. If we already have a call, reject 492 // instruction dependencies. 493 if (I->second.isClobber() || cdep != 0) { 494 cdep = 0; 495 break; 496 } 497 498 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst()); 499 // FIXME: All duplicated with non-local case. 500 if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){ 501 cdep = NonLocalDepCall; 502 continue; 503 } 504 505 cdep = 0; 506 break; 507 } 508 509 if (!cdep) { 510 valueNumbering[C] = nextValueNumber; 511 return nextValueNumber++; 512 } 513 514 if (cdep->getNumOperands() != C->getNumOperands()) { 515 valueNumbering[C] = nextValueNumber; 516 return nextValueNumber++; 517 } 518 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 519 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 520 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i)); 521 if (c_vn != cd_vn) { 522 valueNumbering[C] = nextValueNumber; 523 return nextValueNumber++; 524 } 525 } 526 527 uint32_t v = lookup_or_add(cdep); 528 valueNumbering[C] = v; 529 return v; 530 531 } else { 532 valueNumbering[C] = nextValueNumber; 533 return nextValueNumber++; 534 } 535} 536 537/// lookup_or_add - Returns the value number for the specified value, assigning 538/// it a new number if it did not have one before. 539uint32_t ValueTable::lookup_or_add(Value *V) { 540 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 541 if (VI != valueNumbering.end()) 542 return VI->second; 543 544 if (!isa<Instruction>(V)) { 545 valueNumbering[V] = nextValueNumber; 546 return nextValueNumber++; 547 } 548 549 Instruction* I = cast<Instruction>(V); 550 Expression exp; 551 switch (I->getOpcode()) { 552 case Instruction::Call: 553 return lookup_or_add_call(cast<CallInst>(I)); 554 case Instruction::Add: 555 case Instruction::FAdd: 556 case Instruction::Sub: 557 case Instruction::FSub: 558 case Instruction::Mul: 559 case Instruction::FMul: 560 case Instruction::UDiv: 561 case Instruction::SDiv: 562 case Instruction::FDiv: 563 case Instruction::URem: 564 case Instruction::SRem: 565 case Instruction::FRem: 566 case Instruction::Shl: 567 case Instruction::LShr: 568 case Instruction::AShr: 569 case Instruction::And: 570 case Instruction::Or : 571 case Instruction::Xor: 572 exp = create_expression(cast<BinaryOperator>(I)); 573 break; 574 case Instruction::ICmp: 575 case Instruction::FCmp: 576 exp = create_expression(cast<CmpInst>(I)); 577 break; 578 case Instruction::Trunc: 579 case Instruction::ZExt: 580 case Instruction::SExt: 581 case Instruction::FPToUI: 582 case Instruction::FPToSI: 583 case Instruction::UIToFP: 584 case Instruction::SIToFP: 585 case Instruction::FPTrunc: 586 case Instruction::FPExt: 587 case Instruction::PtrToInt: 588 case Instruction::IntToPtr: 589 case Instruction::BitCast: 590 exp = create_expression(cast<CastInst>(I)); 591 break; 592 case Instruction::Select: 593 exp = create_expression(cast<SelectInst>(I)); 594 break; 595 case Instruction::ExtractElement: 596 exp = create_expression(cast<ExtractElementInst>(I)); 597 break; 598 case Instruction::InsertElement: 599 exp = create_expression(cast<InsertElementInst>(I)); 600 break; 601 case Instruction::ShuffleVector: 602 exp = create_expression(cast<ShuffleVectorInst>(I)); 603 break; 604 case Instruction::ExtractValue: 605 exp = create_expression(cast<ExtractValueInst>(I)); 606 break; 607 case Instruction::InsertValue: 608 exp = create_expression(cast<InsertValueInst>(I)); 609 break; 610 case Instruction::GetElementPtr: 611 exp = create_expression(cast<GetElementPtrInst>(I)); 612 break; 613 default: 614 valueNumbering[V] = nextValueNumber; 615 return nextValueNumber++; 616 } 617 618 uint32_t& e = expressionNumbering[exp]; 619 if (!e) e = nextValueNumber++; 620 valueNumbering[V] = e; 621 return e; 622} 623 624/// lookup - Returns the value number of the specified value. Fails if 625/// the value has not yet been numbered. 626uint32_t ValueTable::lookup(Value *V) const { 627 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 628 assert(VI != valueNumbering.end() && "Value not numbered?"); 629 return VI->second; 630} 631 632/// clear - Remove all entries from the ValueTable 633void ValueTable::clear() { 634 valueNumbering.clear(); 635 expressionNumbering.clear(); 636 nextValueNumber = 1; 637} 638 639/// erase - Remove a value from the value numbering 640void ValueTable::erase(Value *V) { 641 valueNumbering.erase(V); 642} 643 644/// verifyRemoved - Verify that the value is removed from all internal data 645/// structures. 646void ValueTable::verifyRemoved(const Value *V) const { 647 for (DenseMap<Value*, uint32_t>::iterator 648 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 649 assert(I->first != V && "Inst still occurs in value numbering map!"); 650 } 651} 652 653//===----------------------------------------------------------------------===// 654// GVN Pass 655//===----------------------------------------------------------------------===// 656 657namespace { 658 struct ValueNumberScope { 659 ValueNumberScope* parent; 660 DenseMap<uint32_t, Value*> table; 661 662 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 663 }; 664} 665 666namespace { 667 668 class GVN : public FunctionPass { 669 bool runOnFunction(Function &F); 670 public: 671 static char ID; // Pass identification, replacement for typeid 672 GVN(bool nopre = false) : FunctionPass(&ID), NoPRE(nopre) { } 673 674 private: 675 bool NoPRE; 676 MemoryDependenceAnalysis *MD; 677 DominatorTree *DT; 678 679 ValueTable VN; 680 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 681 682 // This transformation requires dominator postdominator info 683 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 684 AU.addRequired<DominatorTree>(); 685 AU.addRequired<MemoryDependenceAnalysis>(); 686 AU.addRequired<AliasAnalysis>(); 687 688 AU.addPreserved<DominatorTree>(); 689 AU.addPreserved<AliasAnalysis>(); 690 } 691 692 // Helper fuctions 693 // FIXME: eliminate or document these better 694 bool processLoad(LoadInst* L, 695 SmallVectorImpl<Instruction*> &toErase); 696 bool processInstruction(Instruction *I, 697 SmallVectorImpl<Instruction*> &toErase); 698 bool processNonLocalLoad(LoadInst* L, 699 SmallVectorImpl<Instruction*> &toErase); 700 bool processBlock(BasicBlock *BB); 701 void dump(DenseMap<uint32_t, Value*>& d); 702 bool iterateOnFunction(Function &F); 703 Value *CollapsePhi(PHINode* p); 704 bool performPRE(Function& F); 705 Value *lookupNumber(BasicBlock *BB, uint32_t num); 706 void cleanupGlobalSets(); 707 void verifyRemoved(const Instruction *I) const; 708 }; 709 710 char GVN::ID = 0; 711} 712 713// createGVNPass - The public interface to this file... 714FunctionPass *llvm::createGVNPass(bool NoPRE) { return new GVN(NoPRE); } 715 716static RegisterPass<GVN> X("gvn", 717 "Global Value Numbering"); 718 719void GVN::dump(DenseMap<uint32_t, Value*>& d) { 720 printf("{\n"); 721 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 722 E = d.end(); I != E; ++I) { 723 printf("%d\n", I->first); 724 I->second->dump(); 725 } 726 printf("}\n"); 727} 728 729static bool isSafeReplacement(PHINode* p, Instruction *inst) { 730 if (!isa<PHINode>(inst)) 731 return true; 732 733 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 734 UI != E; ++UI) 735 if (PHINode* use_phi = dyn_cast<PHINode>(UI)) 736 if (use_phi->getParent() == inst->getParent()) 737 return false; 738 739 return true; 740} 741 742Value *GVN::CollapsePhi(PHINode *PN) { 743 Value *ConstVal = PN->hasConstantValue(DT); 744 if (!ConstVal) return 0; 745 746 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 747 if (!Inst) 748 return ConstVal; 749 750 if (DT->dominates(Inst, PN)) 751 if (isSafeReplacement(PN, Inst)) 752 return Inst; 753 return 0; 754} 755 756/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 757/// we're analyzing is fully available in the specified block. As we go, keep 758/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 759/// map is actually a tri-state map with the following values: 760/// 0) we know the block *is not* fully available. 761/// 1) we know the block *is* fully available. 762/// 2) we do not know whether the block is fully available or not, but we are 763/// currently speculating that it will be. 764/// 3) we are speculating for this block and have used that to speculate for 765/// other blocks. 766static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 767 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 768 // Optimistically assume that the block is fully available and check to see 769 // if we already know about this block in one lookup. 770 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 771 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 772 773 // If the entry already existed for this block, return the precomputed value. 774 if (!IV.second) { 775 // If this is a speculative "available" value, mark it as being used for 776 // speculation of other blocks. 777 if (IV.first->second == 2) 778 IV.first->second = 3; 779 return IV.first->second != 0; 780 } 781 782 // Otherwise, see if it is fully available in all predecessors. 783 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 784 785 // If this block has no predecessors, it isn't live-in here. 786 if (PI == PE) 787 goto SpeculationFailure; 788 789 for (; PI != PE; ++PI) 790 // If the value isn't fully available in one of our predecessors, then it 791 // isn't fully available in this block either. Undo our previous 792 // optimistic assumption and bail out. 793 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 794 goto SpeculationFailure; 795 796 return true; 797 798// SpeculationFailure - If we get here, we found out that this is not, after 799// all, a fully-available block. We have a problem if we speculated on this and 800// used the speculation to mark other blocks as available. 801SpeculationFailure: 802 char &BBVal = FullyAvailableBlocks[BB]; 803 804 // If we didn't speculate on this, just return with it set to false. 805 if (BBVal == 2) { 806 BBVal = 0; 807 return false; 808 } 809 810 // If we did speculate on this value, we could have blocks set to 1 that are 811 // incorrect. Walk the (transitive) successors of this block and mark them as 812 // 0 if set to one. 813 SmallVector<BasicBlock*, 32> BBWorklist; 814 BBWorklist.push_back(BB); 815 816 while (!BBWorklist.empty()) { 817 BasicBlock *Entry = BBWorklist.pop_back_val(); 818 // Note that this sets blocks to 0 (unavailable) if they happen to not 819 // already be in FullyAvailableBlocks. This is safe. 820 char &EntryVal = FullyAvailableBlocks[Entry]; 821 if (EntryVal == 0) continue; // Already unavailable. 822 823 // Mark as unavailable. 824 EntryVal = 0; 825 826 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 827 BBWorklist.push_back(*I); 828 } 829 830 return false; 831} 832 833 834/// CanCoerceMustAliasedValueToLoad - Return true if 835/// CoerceAvailableValueToLoadType will succeed. 836static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 837 const Type *LoadTy, 838 const TargetData &TD) { 839 // If the loaded or stored value is an first class array or struct, don't try 840 // to transform them. We need to be able to bitcast to integer. 841 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) || 842 isa<StructType>(StoredVal->getType()) || 843 isa<ArrayType>(StoredVal->getType())) 844 return false; 845 846 // The store has to be at least as big as the load. 847 if (TD.getTypeSizeInBits(StoredVal->getType()) < 848 TD.getTypeSizeInBits(LoadTy)) 849 return false; 850 851 return true; 852} 853 854 855/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 856/// then a load from a must-aliased pointer of a different type, try to coerce 857/// the stored value. LoadedTy is the type of the load we want to replace and 858/// InsertPt is the place to insert new instructions. 859/// 860/// If we can't do it, return null. 861static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 862 const Type *LoadedTy, 863 Instruction *InsertPt, 864 const TargetData &TD) { 865 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 866 return 0; 867 868 const Type *StoredValTy = StoredVal->getType(); 869 870 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); 871 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 872 873 // If the store and reload are the same size, we can always reuse it. 874 if (StoreSize == LoadSize) { 875 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) { 876 // Pointer to Pointer -> use bitcast. 877 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 878 } 879 880 // Convert source pointers to integers, which can be bitcast. 881 if (isa<PointerType>(StoredValTy)) { 882 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 883 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 884 } 885 886 const Type *TypeToCastTo = LoadedTy; 887 if (isa<PointerType>(TypeToCastTo)) 888 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 889 890 if (StoredValTy != TypeToCastTo) 891 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 892 893 // Cast to pointer if the load needs a pointer type. 894 if (isa<PointerType>(LoadedTy)) 895 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 896 897 return StoredVal; 898 } 899 900 // If the loaded value is smaller than the available value, then we can 901 // extract out a piece from it. If the available value is too small, then we 902 // can't do anything. 903 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 904 905 // Convert source pointers to integers, which can be manipulated. 906 if (isa<PointerType>(StoredValTy)) { 907 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 908 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 909 } 910 911 // Convert vectors and fp to integer, which can be manipulated. 912 if (!isa<IntegerType>(StoredValTy)) { 913 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 914 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 915 } 916 917 // If this is a big-endian system, we need to shift the value down to the low 918 // bits so that a truncate will work. 919 if (TD.isBigEndian()) { 920 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 921 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 922 } 923 924 // Truncate the integer to the right size now. 925 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 926 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 927 928 if (LoadedTy == NewIntTy) 929 return StoredVal; 930 931 // If the result is a pointer, inttoptr. 932 if (isa<PointerType>(LoadedTy)) 933 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 934 935 // Otherwise, bitcast. 936 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 937} 938 939/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 940/// be expressed as a base pointer plus a constant offset. Return the base and 941/// offset to the caller. 942static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 943 const TargetData &TD) { 944 Operator *PtrOp = dyn_cast<Operator>(Ptr); 945 if (PtrOp == 0) return Ptr; 946 947 // Just look through bitcasts. 948 if (PtrOp->getOpcode() == Instruction::BitCast) 949 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 950 951 // If this is a GEP with constant indices, we can look through it. 952 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 953 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 954 955 gep_type_iterator GTI = gep_type_begin(GEP); 956 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 957 ++I, ++GTI) { 958 ConstantInt *OpC = cast<ConstantInt>(*I); 959 if (OpC->isZero()) continue; 960 961 // Handle a struct and array indices which add their offset to the pointer. 962 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 963 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 964 } else { 965 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 966 Offset += OpC->getSExtValue()*Size; 967 } 968 } 969 970 // Re-sign extend from the pointer size if needed to get overflow edge cases 971 // right. 972 unsigned PtrSize = TD.getPointerSizeInBits(); 973 if (PtrSize < 64) 974 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 975 976 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 977} 978 979 980/// AnalyzeLoadFromClobberingStore - This function is called when we have a 981/// memdep query of a load that ends up being a clobbering store. This means 982/// that the store *may* provide bits used by the load but we can't be sure 983/// because the pointers don't mustalias. Check this case to see if there is 984/// anything more we can do before we give up. This returns -1 if we have to 985/// give up, or a byte number in the stored value of the piece that feeds the 986/// load. 987static int AnalyzeLoadFromClobberingStore(LoadInst *L, StoreInst *DepSI, 988 const TargetData &TD) { 989 // If the loaded or stored value is an first class array or struct, don't try 990 // to transform them. We need to be able to bitcast to integer. 991 if (isa<StructType>(L->getType()) || isa<ArrayType>(L->getType()) || 992 isa<StructType>(DepSI->getOperand(0)->getType()) || 993 isa<ArrayType>(DepSI->getOperand(0)->getType())) 994 return -1; 995 996 int64_t StoreOffset = 0, LoadOffset = 0; 997 Value *StoreBase = 998 GetBaseWithConstantOffset(DepSI->getPointerOperand(), StoreOffset, TD); 999 Value *LoadBase = 1000 GetBaseWithConstantOffset(L->getPointerOperand(), LoadOffset, TD); 1001 if (StoreBase != LoadBase) 1002 return -1; 1003 1004 // If the load and store are to the exact same address, they should have been 1005 // a must alias. AA must have gotten confused. 1006 // FIXME: Study to see if/when this happens. 1007 if (LoadOffset == StoreOffset) { 1008#if 0 1009 errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1010 << "Base = " << *StoreBase << "\n" 1011 << "Store Ptr = " << *DepSI->getPointerOperand() << "\n" 1012 << "Store Offs = " << StoreOffset << " - " << *DepSI << "\n" 1013 << "Load Ptr = " << *L->getPointerOperand() << "\n" 1014 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1015 errs() << "'" << L->getParent()->getParent()->getName() << "'" 1016 << *L->getParent(); 1017#endif 1018 return -1; 1019 } 1020 1021 // If the load and store don't overlap at all, the store doesn't provide 1022 // anything to the load. In this case, they really don't alias at all, AA 1023 // must have gotten confused. 1024 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1025 // remove this check, as it is duplicated with what we have below. 1026 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType()); 1027 uint64_t LoadSize = TD.getTypeSizeInBits(L->getType()); 1028 1029 if ((StoreSize & 7) | (LoadSize & 7)) 1030 return -1; 1031 StoreSize >>= 3; // Convert to bytes. 1032 LoadSize >>= 3; 1033 1034 1035 bool isAAFailure = false; 1036 if (StoreOffset < LoadOffset) { 1037 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1038 } else { 1039 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1040 } 1041 if (isAAFailure) { 1042#if 0 1043 errs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1044 << "Base = " << *StoreBase << "\n" 1045 << "Store Ptr = " << *DepSI->getPointerOperand() << "\n" 1046 << "Store Offs = " << StoreOffset << " - " << *DepSI << "\n" 1047 << "Load Ptr = " << *L->getPointerOperand() << "\n" 1048 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1049 errs() << "'" << L->getParent()->getParent()->getName() << "'" 1050 << *L->getParent(); 1051#endif 1052 return -1; 1053 } 1054 1055 // If the Load isn't completely contained within the stored bits, we don't 1056 // have all the bits to feed it. We could do something crazy in the future 1057 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1058 // valuable. 1059 if (StoreOffset > LoadOffset || 1060 StoreOffset+StoreSize < LoadOffset+LoadSize) 1061 return -1; 1062 1063 // Okay, we can do this transformation. Return the number of bytes into the 1064 // store that the load is. 1065 return LoadOffset-StoreOffset; 1066} 1067 1068 1069/// GetStoreValueForLoad - This function is called when we have a 1070/// memdep query of a load that ends up being a clobbering store. This means 1071/// that the store *may* provide bits used by the load but we can't be sure 1072/// because the pointers don't mustalias. Check this case to see if there is 1073/// anything more we can do before we give up. 1074static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1075 const Type *LoadTy, 1076 Instruction *InsertPt, const TargetData &TD){ 1077 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1078 1079 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8; 1080 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1081 1082 1083 // Compute which bits of the stored value are being used by the load. Convert 1084 // to an integer type to start with. 1085 if (isa<PointerType>(SrcVal->getType())) 1086 SrcVal = new PtrToIntInst(SrcVal, TD.getIntPtrType(Ctx), "tmp", InsertPt); 1087 if (!isa<IntegerType>(SrcVal->getType())) 1088 SrcVal = new BitCastInst(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1089 "tmp", InsertPt); 1090 1091 // Shift the bits to the least significant depending on endianness. 1092 unsigned ShiftAmt; 1093 if (TD.isLittleEndian()) { 1094 ShiftAmt = Offset*8; 1095 } else { 1096 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1097 } 1098 1099 if (ShiftAmt) 1100 SrcVal = BinaryOperator::CreateLShr(SrcVal, 1101 ConstantInt::get(SrcVal->getType(), ShiftAmt), "tmp", InsertPt); 1102 1103 if (LoadSize != StoreSize) 1104 SrcVal = new TruncInst(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1105 "tmp", InsertPt); 1106 1107 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1108} 1109 1110struct AvailableValueInBlock { 1111 /// BB - The basic block in question. 1112 BasicBlock *BB; 1113 /// V - The value that is live out of the block. 1114 Value *V; 1115 /// Offset - The byte offset in V that is interesting for the load query. 1116 unsigned Offset; 1117 1118 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1119 unsigned Offset = 0) { 1120 AvailableValueInBlock Res; 1121 Res.BB = BB; 1122 Res.V = V; 1123 Res.Offset = Offset; 1124 return Res; 1125 } 1126}; 1127 1128/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1129/// construct SSA form, allowing us to eliminate LI. This returns the value 1130/// that should be used at LI's definition site. 1131static Value *ConstructSSAForLoadSet(LoadInst *LI, 1132 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1133 const TargetData *TD, 1134 AliasAnalysis *AA) { 1135 SmallVector<PHINode*, 8> NewPHIs; 1136 SSAUpdater SSAUpdate(&NewPHIs); 1137 SSAUpdate.Initialize(LI); 1138 1139 const Type *LoadTy = LI->getType(); 1140 1141 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1142 BasicBlock *BB = ValuesPerBlock[i].BB; 1143 Value *AvailableVal = ValuesPerBlock[i].V; 1144 unsigned Offset = ValuesPerBlock[i].Offset; 1145 1146 if (SSAUpdate.HasValueForBlock(BB)) 1147 continue; 1148 1149 if (AvailableVal->getType() != LoadTy) { 1150 assert(TD && "Need target data to handle type mismatch case"); 1151 AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy, 1152 BB->getTerminator(), *TD); 1153 1154 if (Offset) { 1155 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n" 1156 << *ValuesPerBlock[i].V << '\n' 1157 << *AvailableVal << '\n' << "\n\n\n"); 1158 } 1159 1160 1161 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n" 1162 << *ValuesPerBlock[i].V << '\n' 1163 << *AvailableVal << '\n' << "\n\n\n"); 1164 } 1165 1166 SSAUpdate.AddAvailableValue(BB, AvailableVal); 1167 } 1168 1169 // Perform PHI construction. 1170 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1171 1172 // If new PHI nodes were created, notify alias analysis. 1173 if (isa<PointerType>(V->getType())) 1174 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1175 AA->copyValue(LI, NewPHIs[i]); 1176 1177 return V; 1178} 1179 1180/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1181/// non-local by performing PHI construction. 1182bool GVN::processNonLocalLoad(LoadInst *LI, 1183 SmallVectorImpl<Instruction*> &toErase) { 1184 // Find the non-local dependencies of the load. 1185 SmallVector<MemoryDependenceAnalysis::NonLocalDepEntry, 64> Deps; 1186 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1187 Deps); 1188 //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: " 1189 // << Deps.size() << *LI << '\n'); 1190 1191 // If we had to process more than one hundred blocks to find the 1192 // dependencies, this load isn't worth worrying about. Optimizing 1193 // it will be too expensive. 1194 if (Deps.size() > 100) 1195 return false; 1196 1197 // If we had a phi translation failure, we'll have a single entry which is a 1198 // clobber in the current block. Reject this early. 1199 if (Deps.size() == 1 && Deps[0].second.isClobber()) { 1200 DEBUG( 1201 errs() << "GVN: non-local load "; 1202 WriteAsOperand(errs(), LI); 1203 errs() << " is clobbered by " << *Deps[0].second.getInst() << '\n'; 1204 ); 1205 return false; 1206 } 1207 1208 // Filter out useless results (non-locals, etc). Keep track of the blocks 1209 // where we have a value available in repl, also keep track of whether we see 1210 // dependencies that produce an unknown value for the load (such as a call 1211 // that could potentially clobber the load). 1212 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1213 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1214 1215 const TargetData *TD = 0; 1216 1217 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1218 BasicBlock *DepBB = Deps[i].first; 1219 MemDepResult DepInfo = Deps[i].second; 1220 1221 if (DepInfo.isClobber()) { 1222 // If the dependence is to a store that writes to a superset of the bits 1223 // read by the load, we can extract the bits we need for the load from the 1224 // stored value. 1225 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1226 if (TD == 0) 1227 TD = getAnalysisIfAvailable<TargetData>(); 1228 if (TD) { 1229 int Offset = AnalyzeLoadFromClobberingStore(LI, DepSI, *TD); 1230 if (Offset != -1) { 1231 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1232 DepSI->getOperand(0), 1233 Offset)); 1234 continue; 1235 } 1236 } 1237 } 1238 1239 // FIXME: Handle memset/memcpy. 1240 UnavailableBlocks.push_back(DepBB); 1241 continue; 1242 } 1243 1244 Instruction *DepInst = DepInfo.getInst(); 1245 1246 // Loading the allocation -> undef. 1247 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1248 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1249 UndefValue::get(LI->getType()))); 1250 continue; 1251 } 1252 1253 // Loading immediately after lifetime begin or end -> undef. 1254 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1255 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 1256 II->getIntrinsicID() == Intrinsic::lifetime_end) { 1257 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1258 UndefValue::get(LI->getType()))); 1259 } 1260 } 1261 1262 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1263 // Reject loads and stores that are to the same address but are of 1264 // different types if we have to. 1265 if (S->getOperand(0)->getType() != LI->getType()) { 1266 if (TD == 0) 1267 TD = getAnalysisIfAvailable<TargetData>(); 1268 1269 // If the stored value is larger or equal to the loaded value, we can 1270 // reuse it. 1271 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1272 LI->getType(), *TD)) { 1273 UnavailableBlocks.push_back(DepBB); 1274 continue; 1275 } 1276 } 1277 1278 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1279 S->getOperand(0))); 1280 continue; 1281 } 1282 1283 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1284 // If the types mismatch and we can't handle it, reject reuse of the load. 1285 if (LD->getType() != LI->getType()) { 1286 if (TD == 0) 1287 TD = getAnalysisIfAvailable<TargetData>(); 1288 1289 // If the stored value is larger or equal to the loaded value, we can 1290 // reuse it. 1291 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1292 UnavailableBlocks.push_back(DepBB); 1293 continue; 1294 } 1295 } 1296 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1297 continue; 1298 } 1299 1300 UnavailableBlocks.push_back(DepBB); 1301 continue; 1302 } 1303 1304 // If we have no predecessors that produce a known value for this load, exit 1305 // early. 1306 if (ValuesPerBlock.empty()) return false; 1307 1308 // If all of the instructions we depend on produce a known value for this 1309 // load, then it is fully redundant and we can use PHI insertion to compute 1310 // its value. Insert PHIs and remove the fully redundant value now. 1311 if (UnavailableBlocks.empty()) { 1312 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1313 1314 // Perform PHI construction. 1315 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1316 VN.getAliasAnalysis()); 1317 LI->replaceAllUsesWith(V); 1318 1319 if (isa<PHINode>(V)) 1320 V->takeName(LI); 1321 if (isa<PointerType>(V->getType())) 1322 MD->invalidateCachedPointerInfo(V); 1323 toErase.push_back(LI); 1324 NumGVNLoad++; 1325 return true; 1326 } 1327 1328 if (!EnablePRE || !EnableLoadPRE) 1329 return false; 1330 1331 // Okay, we have *some* definitions of the value. This means that the value 1332 // is available in some of our (transitive) predecessors. Lets think about 1333 // doing PRE of this load. This will involve inserting a new load into the 1334 // predecessor when it's not available. We could do this in general, but 1335 // prefer to not increase code size. As such, we only do this when we know 1336 // that we only have to insert *one* load (which means we're basically moving 1337 // the load, not inserting a new one). 1338 1339 SmallPtrSet<BasicBlock *, 4> Blockers; 1340 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1341 Blockers.insert(UnavailableBlocks[i]); 1342 1343 // Lets find first basic block with more than one predecessor. Walk backwards 1344 // through predecessors if needed. 1345 BasicBlock *LoadBB = LI->getParent(); 1346 BasicBlock *TmpBB = LoadBB; 1347 1348 bool isSinglePred = false; 1349 bool allSingleSucc = true; 1350 while (TmpBB->getSinglePredecessor()) { 1351 isSinglePred = true; 1352 TmpBB = TmpBB->getSinglePredecessor(); 1353 if (!TmpBB) // If haven't found any, bail now. 1354 return false; 1355 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1356 return false; 1357 if (Blockers.count(TmpBB)) 1358 return false; 1359 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1360 allSingleSucc = false; 1361 } 1362 1363 assert(TmpBB); 1364 LoadBB = TmpBB; 1365 1366 // If we have a repl set with LI itself in it, this means we have a loop where 1367 // at least one of the values is LI. Since this means that we won't be able 1368 // to eliminate LI even if we insert uses in the other predecessors, we will 1369 // end up increasing code size. Reject this by scanning for LI. 1370 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1371 if (ValuesPerBlock[i].V == LI) 1372 return false; 1373 1374 if (isSinglePred) { 1375 bool isHot = false; 1376 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1377 if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].V)) 1378 // "Hot" Instruction is in some loop (because it dominates its dep. 1379 // instruction). 1380 if (DT->dominates(LI, I)) { 1381 isHot = true; 1382 break; 1383 } 1384 1385 // We are interested only in "hot" instructions. We don't want to do any 1386 // mis-optimizations here. 1387 if (!isHot) 1388 return false; 1389 } 1390 1391 // Okay, we have some hope :). Check to see if the loaded value is fully 1392 // available in all but one predecessor. 1393 // FIXME: If we could restructure the CFG, we could make a common pred with 1394 // all the preds that don't have an available LI and insert a new load into 1395 // that one block. 1396 BasicBlock *UnavailablePred = 0; 1397 1398 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1399 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1400 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1401 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1402 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1403 1404 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1405 PI != E; ++PI) { 1406 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 1407 continue; 1408 1409 // If this load is not available in multiple predecessors, reject it. 1410 if (UnavailablePred && UnavailablePred != *PI) 1411 return false; 1412 UnavailablePred = *PI; 1413 } 1414 1415 assert(UnavailablePred != 0 && 1416 "Fully available value should be eliminated above!"); 1417 1418 // If the loaded pointer is PHI node defined in this block, do PHI translation 1419 // to get its value in the predecessor. 1420 Value *LoadPtr = LI->getOperand(0)->DoPHITranslation(LoadBB, UnavailablePred); 1421 1422 // Make sure the value is live in the predecessor. If it was defined by a 1423 // non-PHI instruction in this block, we don't know how to recompute it above. 1424 if (Instruction *LPInst = dyn_cast<Instruction>(LoadPtr)) 1425 if (!DT->dominates(LPInst->getParent(), UnavailablePred)) { 1426 DEBUG(errs() << "COULDN'T PRE LOAD BECAUSE PTR IS UNAVAILABLE IN PRED: " 1427 << *LPInst << '\n' << *LI << "\n"); 1428 return false; 1429 } 1430 1431 // We don't currently handle critical edges :( 1432 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) { 1433 DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '" 1434 << UnavailablePred->getName() << "': " << *LI << '\n'); 1435 return false; 1436 } 1437 1438 // Make sure it is valid to move this load here. We have to watch out for: 1439 // @1 = getelementptr (i8* p, ... 1440 // test p and branch if == 0 1441 // load @1 1442 // It is valid to have the getelementptr before the test, even if p can be 0, 1443 // as getelementptr only does address arithmetic. 1444 // If we are not pushing the value through any multiple-successor blocks 1445 // we do not have this case. Otherwise, check that the load is safe to 1446 // put anywhere; this can be improved, but should be conservatively safe. 1447 if (!allSingleSucc && 1448 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) 1449 return false; 1450 1451 // Okay, we can eliminate this load by inserting a reload in the predecessor 1452 // and using PHI construction to get the value in the other predecessors, do 1453 // it. 1454 DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1455 1456 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1457 LI->getAlignment(), 1458 UnavailablePred->getTerminator()); 1459 1460 // Add the newly created load. 1461 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad)); 1462 1463 // Perform PHI construction. 1464 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1465 VN.getAliasAnalysis()); 1466 LI->replaceAllUsesWith(V); 1467 if (isa<PHINode>(V)) 1468 V->takeName(LI); 1469 if (isa<PointerType>(V->getType())) 1470 MD->invalidateCachedPointerInfo(V); 1471 toErase.push_back(LI); 1472 NumPRELoad++; 1473 return true; 1474} 1475 1476/// processLoad - Attempt to eliminate a load, first by eliminating it 1477/// locally, and then attempting non-local elimination if that fails. 1478bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1479 if (L->isVolatile()) 1480 return false; 1481 1482 // ... to a pointer that has been loaded from before... 1483 MemDepResult Dep = MD->getDependency(L); 1484 1485 // If the value isn't available, don't do anything! 1486 if (Dep.isClobber()) { 1487 // FIXME: We should handle memset/memcpy/memmove as dependent instructions 1488 // to forward the value if available. 1489 //if (isa<MemIntrinsic>(Dep.getInst())) 1490 //errs() << "LOAD DEPENDS ON MEM: " << *L << "\n" << *Dep.getInst()<<"\n\n"; 1491 1492 // Check to see if we have something like this: 1493 // store i32 123, i32* %P 1494 // %A = bitcast i32* %P to i8* 1495 // %B = gep i8* %A, i32 1 1496 // %C = load i8* %B 1497 // 1498 // We could do that by recognizing if the clobber instructions are obviously 1499 // a common base + constant offset, and if the previous store (or memset) 1500 // completely covers this load. This sort of thing can happen in bitfield 1501 // access code. 1502 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1503 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1504 int Offset = AnalyzeLoadFromClobberingStore(L, DepSI, *TD); 1505 if (Offset != -1) { 1506 Value *AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1507 L->getType(), L, *TD); 1508 DEBUG(errs() << "GVN COERCED STORE BITS:\n" << *DepSI << '\n' 1509 << *AvailVal << '\n' << *L << "\n\n\n"); 1510 1511 // Replace the load! 1512 L->replaceAllUsesWith(AvailVal); 1513 if (isa<PointerType>(AvailVal->getType())) 1514 MD->invalidateCachedPointerInfo(AvailVal); 1515 toErase.push_back(L); 1516 NumGVNLoad++; 1517 return true; 1518 } 1519 } 1520 1521 DEBUG( 1522 // fast print dep, using operator<< on instruction would be too slow 1523 errs() << "GVN: load "; 1524 WriteAsOperand(errs(), L); 1525 Instruction *I = Dep.getInst(); 1526 errs() << " is clobbered by " << *I << '\n'; 1527 ); 1528 return false; 1529 } 1530 1531 // If it is defined in another block, try harder. 1532 if (Dep.isNonLocal()) 1533 return processNonLocalLoad(L, toErase); 1534 1535 Instruction *DepInst = Dep.getInst(); 1536 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1537 Value *StoredVal = DepSI->getOperand(0); 1538 1539 // The store and load are to a must-aliased pointer, but they may not 1540 // actually have the same type. See if we know how to reuse the stored 1541 // value (depending on its type). 1542 const TargetData *TD = 0; 1543 if (StoredVal->getType() != L->getType()) { 1544 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1545 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1546 L, *TD); 1547 if (StoredVal == 0) 1548 return false; 1549 1550 DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1551 << '\n' << *L << "\n\n\n"); 1552 } 1553 else 1554 return false; 1555 } 1556 1557 // Remove it! 1558 L->replaceAllUsesWith(StoredVal); 1559 if (isa<PointerType>(StoredVal->getType())) 1560 MD->invalidateCachedPointerInfo(StoredVal); 1561 toErase.push_back(L); 1562 NumGVNLoad++; 1563 return true; 1564 } 1565 1566 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1567 Value *AvailableVal = DepLI; 1568 1569 // The loads are of a must-aliased pointer, but they may not actually have 1570 // the same type. See if we know how to reuse the previously loaded value 1571 // (depending on its type). 1572 const TargetData *TD = 0; 1573 if (DepLI->getType() != L->getType()) { 1574 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1575 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1576 if (AvailableVal == 0) 1577 return false; 1578 1579 DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1580 << "\n" << *L << "\n\n\n"); 1581 } 1582 else 1583 return false; 1584 } 1585 1586 // Remove it! 1587 L->replaceAllUsesWith(AvailableVal); 1588 if (isa<PointerType>(DepLI->getType())) 1589 MD->invalidateCachedPointerInfo(DepLI); 1590 toErase.push_back(L); 1591 NumGVNLoad++; 1592 return true; 1593 } 1594 1595 // If this load really doesn't depend on anything, then we must be loading an 1596 // undef value. This can happen when loading for a fresh allocation with no 1597 // intervening stores, for example. 1598 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1599 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1600 toErase.push_back(L); 1601 NumGVNLoad++; 1602 return true; 1603 } 1604 1605 // If this load occurs either right after a lifetime begin or a lifetime end, 1606 // then the loaded value is undefined. 1607 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1608 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 1609 II->getIntrinsicID() == Intrinsic::lifetime_end) { 1610 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1611 toErase.push_back(L); 1612 NumGVNLoad++; 1613 return true; 1614 } 1615 } 1616 1617 return false; 1618} 1619 1620Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1621 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1622 if (I == localAvail.end()) 1623 return 0; 1624 1625 ValueNumberScope *Locals = I->second; 1626 while (Locals) { 1627 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1628 if (I != Locals->table.end()) 1629 return I->second; 1630 Locals = Locals->parent; 1631 } 1632 1633 return 0; 1634} 1635 1636 1637/// processInstruction - When calculating availability, handle an instruction 1638/// by inserting it into the appropriate sets 1639bool GVN::processInstruction(Instruction *I, 1640 SmallVectorImpl<Instruction*> &toErase) { 1641 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1642 bool Changed = processLoad(LI, toErase); 1643 1644 if (!Changed) { 1645 unsigned Num = VN.lookup_or_add(LI); 1646 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1647 } 1648 1649 return Changed; 1650 } 1651 1652 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1653 unsigned Num = VN.lookup_or_add(I); 1654 1655 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1656 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1657 1658 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1659 return false; 1660 1661 Value *BranchCond = BI->getCondition(); 1662 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1663 1664 BasicBlock *TrueSucc = BI->getSuccessor(0); 1665 BasicBlock *FalseSucc = BI->getSuccessor(1); 1666 1667 if (TrueSucc->getSinglePredecessor()) 1668 localAvail[TrueSucc]->table[CondVN] = 1669 ConstantInt::getTrue(TrueSucc->getContext()); 1670 if (FalseSucc->getSinglePredecessor()) 1671 localAvail[FalseSucc]->table[CondVN] = 1672 ConstantInt::getFalse(TrueSucc->getContext()); 1673 1674 return false; 1675 1676 // Allocations are always uniquely numbered, so we can save time and memory 1677 // by fast failing them. 1678 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1679 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1680 return false; 1681 } 1682 1683 // Collapse PHI nodes 1684 if (PHINode* p = dyn_cast<PHINode>(I)) { 1685 Value *constVal = CollapsePhi(p); 1686 1687 if (constVal) { 1688 p->replaceAllUsesWith(constVal); 1689 if (isa<PointerType>(constVal->getType())) 1690 MD->invalidateCachedPointerInfo(constVal); 1691 VN.erase(p); 1692 1693 toErase.push_back(p); 1694 } else { 1695 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1696 } 1697 1698 // If the number we were assigned was a brand new VN, then we don't 1699 // need to do a lookup to see if the number already exists 1700 // somewhere in the domtree: it can't! 1701 } else if (Num == NextNum) { 1702 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1703 1704 // Perform fast-path value-number based elimination of values inherited from 1705 // dominators. 1706 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1707 // Remove it! 1708 VN.erase(I); 1709 I->replaceAllUsesWith(repl); 1710 if (isa<PointerType>(repl->getType())) 1711 MD->invalidateCachedPointerInfo(repl); 1712 toErase.push_back(I); 1713 return true; 1714 1715 } else { 1716 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1717 } 1718 1719 return false; 1720} 1721 1722/// runOnFunction - This is the main transformation entry point for a function. 1723bool GVN::runOnFunction(Function& F) { 1724 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1725 DT = &getAnalysis<DominatorTree>(); 1726 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1727 VN.setMemDep(MD); 1728 VN.setDomTree(DT); 1729 1730 bool Changed = false; 1731 bool ShouldContinue = true; 1732 1733 // Merge unconditional branches, allowing PRE to catch more 1734 // optimization opportunities. 1735 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 1736 BasicBlock *BB = FI; 1737 ++FI; 1738 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 1739 if (removedBlock) NumGVNBlocks++; 1740 1741 Changed |= removedBlock; 1742 } 1743 1744 unsigned Iteration = 0; 1745 1746 while (ShouldContinue) { 1747 DEBUG(errs() << "GVN iteration: " << Iteration << "\n"); 1748 ShouldContinue = iterateOnFunction(F); 1749 Changed |= ShouldContinue; 1750 ++Iteration; 1751 } 1752 1753 if (EnablePRE) { 1754 bool PREChanged = true; 1755 while (PREChanged) { 1756 PREChanged = performPRE(F); 1757 Changed |= PREChanged; 1758 } 1759 } 1760 // FIXME: Should perform GVN again after PRE does something. PRE can move 1761 // computations into blocks where they become fully redundant. Note that 1762 // we can't do this until PRE's critical edge splitting updates memdep. 1763 // Actually, when this happens, we should just fully integrate PRE into GVN. 1764 1765 cleanupGlobalSets(); 1766 1767 return Changed; 1768} 1769 1770 1771bool GVN::processBlock(BasicBlock *BB) { 1772 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 1773 // incrementing BI before processing an instruction). 1774 SmallVector<Instruction*, 8> toErase; 1775 bool ChangedFunction = false; 1776 1777 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 1778 BI != BE;) { 1779 ChangedFunction |= processInstruction(BI, toErase); 1780 if (toErase.empty()) { 1781 ++BI; 1782 continue; 1783 } 1784 1785 // If we need some instructions deleted, do it now. 1786 NumGVNInstr += toErase.size(); 1787 1788 // Avoid iterator invalidation. 1789 bool AtStart = BI == BB->begin(); 1790 if (!AtStart) 1791 --BI; 1792 1793 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 1794 E = toErase.end(); I != E; ++I) { 1795 DEBUG(errs() << "GVN removed: " << **I << '\n'); 1796 MD->removeInstruction(*I); 1797 (*I)->eraseFromParent(); 1798 DEBUG(verifyRemoved(*I)); 1799 } 1800 toErase.clear(); 1801 1802 if (AtStart) 1803 BI = BB->begin(); 1804 else 1805 ++BI; 1806 } 1807 1808 return ChangedFunction; 1809} 1810 1811/// performPRE - Perform a purely local form of PRE that looks for diamond 1812/// control flow patterns and attempts to perform simple PRE at the join point. 1813bool GVN::performPRE(Function &F) { 1814 bool Changed = false; 1815 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 1816 DenseMap<BasicBlock*, Value*> predMap; 1817 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 1818 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 1819 BasicBlock *CurrentBlock = *DI; 1820 1821 // Nothing to PRE in the entry block. 1822 if (CurrentBlock == &F.getEntryBlock()) continue; 1823 1824 for (BasicBlock::iterator BI = CurrentBlock->begin(), 1825 BE = CurrentBlock->end(); BI != BE; ) { 1826 Instruction *CurInst = BI++; 1827 1828 if (isa<AllocaInst>(CurInst) || 1829 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 1830 CurInst->getType()->isVoidTy() || 1831 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 1832 isa<DbgInfoIntrinsic>(CurInst)) 1833 continue; 1834 1835 uint32_t ValNo = VN.lookup(CurInst); 1836 1837 // Look for the predecessors for PRE opportunities. We're 1838 // only trying to solve the basic diamond case, where 1839 // a value is computed in the successor and one predecessor, 1840 // but not the other. We also explicitly disallow cases 1841 // where the successor is its own predecessor, because they're 1842 // more complicated to get right. 1843 unsigned NumWith = 0; 1844 unsigned NumWithout = 0; 1845 BasicBlock *PREPred = 0; 1846 predMap.clear(); 1847 1848 for (pred_iterator PI = pred_begin(CurrentBlock), 1849 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 1850 // We're not interested in PRE where the block is its 1851 // own predecessor, on in blocks with predecessors 1852 // that are not reachable. 1853 if (*PI == CurrentBlock) { 1854 NumWithout = 2; 1855 break; 1856 } else if (!localAvail.count(*PI)) { 1857 NumWithout = 2; 1858 break; 1859 } 1860 1861 DenseMap<uint32_t, Value*>::iterator predV = 1862 localAvail[*PI]->table.find(ValNo); 1863 if (predV == localAvail[*PI]->table.end()) { 1864 PREPred = *PI; 1865 NumWithout++; 1866 } else if (predV->second == CurInst) { 1867 NumWithout = 2; 1868 } else { 1869 predMap[*PI] = predV->second; 1870 NumWith++; 1871 } 1872 } 1873 1874 // Don't do PRE when it might increase code size, i.e. when 1875 // we would need to insert instructions in more than one pred. 1876 if (NumWithout != 1 || NumWith == 0) 1877 continue; 1878 1879 // Don't do PRE across indirect branch. 1880 if (isa<IndirectBrInst>(PREPred->getTerminator())) 1881 continue; 1882 1883 // We can't do PRE safely on a critical edge, so instead we schedule 1884 // the edge to be split and perform the PRE the next time we iterate 1885 // on the function. 1886 unsigned SuccNum = 0; 1887 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors(); 1888 i != e; ++i) 1889 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) { 1890 SuccNum = i; 1891 break; 1892 } 1893 1894 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 1895 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 1896 continue; 1897 } 1898 1899 // Instantiate the expression the in predecessor that lacked it. 1900 // Because we are going top-down through the block, all value numbers 1901 // will be available in the predecessor by the time we need them. Any 1902 // that weren't original present will have been instantiated earlier 1903 // in this loop. 1904 Instruction *PREInstr = CurInst->clone(); 1905 bool success = true; 1906 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 1907 Value *Op = PREInstr->getOperand(i); 1908 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 1909 continue; 1910 1911 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 1912 PREInstr->setOperand(i, V); 1913 } else { 1914 success = false; 1915 break; 1916 } 1917 } 1918 1919 // Fail out if we encounter an operand that is not available in 1920 // the PRE predecessor. This is typically because of loads which 1921 // are not value numbered precisely. 1922 if (!success) { 1923 delete PREInstr; 1924 DEBUG(verifyRemoved(PREInstr)); 1925 continue; 1926 } 1927 1928 PREInstr->insertBefore(PREPred->getTerminator()); 1929 PREInstr->setName(CurInst->getName() + ".pre"); 1930 predMap[PREPred] = PREInstr; 1931 VN.add(PREInstr, ValNo); 1932 NumGVNPRE++; 1933 1934 // Update the availability map to include the new instruction. 1935 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 1936 1937 // Create a PHI to make the value available in this block. 1938 PHINode* Phi = PHINode::Create(CurInst->getType(), 1939 CurInst->getName() + ".pre-phi", 1940 CurrentBlock->begin()); 1941 for (pred_iterator PI = pred_begin(CurrentBlock), 1942 PE = pred_end(CurrentBlock); PI != PE; ++PI) 1943 Phi->addIncoming(predMap[*PI], *PI); 1944 1945 VN.add(Phi, ValNo); 1946 localAvail[CurrentBlock]->table[ValNo] = Phi; 1947 1948 CurInst->replaceAllUsesWith(Phi); 1949 if (isa<PointerType>(Phi->getType())) 1950 MD->invalidateCachedPointerInfo(Phi); 1951 VN.erase(CurInst); 1952 1953 DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n'); 1954 MD->removeInstruction(CurInst); 1955 CurInst->eraseFromParent(); 1956 DEBUG(verifyRemoved(CurInst)); 1957 Changed = true; 1958 } 1959 } 1960 1961 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator 1962 I = toSplit.begin(), E = toSplit.end(); I != E; ++I) 1963 SplitCriticalEdge(I->first, I->second, this); 1964 1965 return Changed || toSplit.size(); 1966} 1967 1968/// iterateOnFunction - Executes one iteration of GVN 1969bool GVN::iterateOnFunction(Function &F) { 1970 cleanupGlobalSets(); 1971 1972 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 1973 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 1974 if (DI->getIDom()) 1975 localAvail[DI->getBlock()] = 1976 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 1977 else 1978 localAvail[DI->getBlock()] = new ValueNumberScope(0); 1979 } 1980 1981 // Top-down walk of the dominator tree 1982 bool Changed = false; 1983#if 0 1984 // Needed for value numbering with phi construction to work. 1985 ReversePostOrderTraversal<Function*> RPOT(&F); 1986 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 1987 RE = RPOT.end(); RI != RE; ++RI) 1988 Changed |= processBlock(*RI); 1989#else 1990 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 1991 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 1992 Changed |= processBlock(DI->getBlock()); 1993#endif 1994 1995 return Changed; 1996} 1997 1998void GVN::cleanupGlobalSets() { 1999 VN.clear(); 2000 2001 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2002 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2003 delete I->second; 2004 localAvail.clear(); 2005} 2006 2007/// verifyRemoved - Verify that the specified instruction does not occur in our 2008/// internal data structures. 2009void GVN::verifyRemoved(const Instruction *Inst) const { 2010 VN.verifyRemoved(Inst); 2011 2012 // Walk through the value number scope to make sure the instruction isn't 2013 // ferreted away in it. 2014 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2015 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2016 const ValueNumberScope *VNS = I->second; 2017 2018 while (VNS) { 2019 for (DenseMap<uint32_t, Value*>::iterator 2020 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2021 assert(II->second != Inst && "Inst still in value numbering scope!"); 2022 } 2023 2024 VNS = VNS->parent; 2025 } 2026 } 2027} 2028