GVN.cpp revision b3f927f6ddde88e12ec74dd7622d28df587e768b
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs global value numbering to eliminate fully redundant 11// instructions. It also performs simple dead load elimination. 12// 13// Note that this pass does the value numbering itself; it does not use the 14// ValueNumbering analysis passes. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "gvn" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/BasicBlock.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/IntrinsicInst.h" 25#include "llvm/LLVMContext.h" 26#include "llvm/Operator.h" 27#include "llvm/Value.h" 28#include "llvm/ADT/DenseMap.h" 29#include "llvm/ADT/DepthFirstIterator.h" 30#include "llvm/ADT/PostOrderIterator.h" 31#include "llvm/ADT/SmallPtrSet.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/Analysis/AliasAnalysis.h" 35#include "llvm/Analysis/ConstantFolding.h" 36#include "llvm/Analysis/Dominators.h" 37#include "llvm/Analysis/MemoryBuiltins.h" 38#include "llvm/Analysis/MemoryDependenceAnalysis.h" 39#include "llvm/Analysis/PHITransAddr.h" 40#include "llvm/Support/CFG.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Support/IRBuilder.h" 46#include "llvm/Support/raw_ostream.h" 47#include "llvm/Target/TargetData.h" 48#include "llvm/Transforms/Utils/BasicBlockUtils.h" 49#include "llvm/Transforms/Utils/Local.h" 50#include "llvm/Transforms/Utils/SSAUpdater.h" 51#include <cstdio> 52using namespace llvm; 53 54STATISTIC(NumGVNInstr, "Number of instructions deleted"); 55STATISTIC(NumGVNLoad, "Number of loads deleted"); 56STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 57STATISTIC(NumGVNBlocks, "Number of blocks merged"); 58STATISTIC(NumPRELoad, "Number of loads PRE'd"); 59 60static cl::opt<bool> EnablePRE("enable-pre", 61 cl::init(true), cl::Hidden); 62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 63 64//===----------------------------------------------------------------------===// 65// ValueTable Class 66//===----------------------------------------------------------------------===// 67 68/// This class holds the mapping between values and value numbers. It is used 69/// as an efficient mechanism to determine the expression-wise equivalence of 70/// two values. 71namespace { 72 struct Expression { 73 enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL, 74 UDIV, SDIV, FDIV, UREM, SREM, 75 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ, 76 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE, 77 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ, 78 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE, 79 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE, 80 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, 81 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI, 82 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT, 83 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT, 84 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE }; 85 86 ExpressionOpcode opcode; 87 const Type* type; 88 SmallVector<uint32_t, 4> varargs; 89 Value *function; 90 91 Expression() { } 92 Expression(ExpressionOpcode o) : opcode(o) { } 93 94 bool operator==(const Expression &other) const { 95 if (opcode != other.opcode) 96 return false; 97 else if (opcode == EMPTY || opcode == TOMBSTONE) 98 return true; 99 else if (type != other.type) 100 return false; 101 else if (function != other.function) 102 return false; 103 else { 104 if (varargs.size() != other.varargs.size()) 105 return false; 106 107 for (size_t i = 0; i < varargs.size(); ++i) 108 if (varargs[i] != other.varargs[i]) 109 return false; 110 111 return true; 112 } 113 } 114 115 bool operator!=(const Expression &other) const { 116 return !(*this == other); 117 } 118 }; 119 120 class ValueTable { 121 private: 122 DenseMap<Value*, uint32_t> valueNumbering; 123 DenseMap<Expression, uint32_t> expressionNumbering; 124 AliasAnalysis* AA; 125 MemoryDependenceAnalysis* MD; 126 DominatorTree* DT; 127 128 uint32_t nextValueNumber; 129 130 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO); 131 Expression::ExpressionOpcode getOpcode(CmpInst* C); 132 Expression::ExpressionOpcode getOpcode(CastInst* C); 133 Expression create_expression(BinaryOperator* BO); 134 Expression create_expression(CmpInst* C); 135 Expression create_expression(ShuffleVectorInst* V); 136 Expression create_expression(ExtractElementInst* C); 137 Expression create_expression(InsertElementInst* V); 138 Expression create_expression(SelectInst* V); 139 Expression create_expression(CastInst* C); 140 Expression create_expression(GetElementPtrInst* G); 141 Expression create_expression(CallInst* C); 142 Expression create_expression(Constant* C); 143 Expression create_expression(ExtractValueInst* C); 144 Expression create_expression(InsertValueInst* C); 145 146 uint32_t lookup_or_add_call(CallInst* C); 147 public: 148 ValueTable() : nextValueNumber(1) { } 149 uint32_t lookup_or_add(Value *V); 150 uint32_t lookup(Value *V) const; 151 void add(Value *V, uint32_t num); 152 void clear(); 153 void erase(Value *v); 154 unsigned size(); 155 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 156 AliasAnalysis *getAliasAnalysis() const { return AA; } 157 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 158 void setDomTree(DominatorTree* D) { DT = D; } 159 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 160 void verifyRemoved(const Value *) const; 161 }; 162} 163 164namespace llvm { 165template <> struct DenseMapInfo<Expression> { 166 static inline Expression getEmptyKey() { 167 return Expression(Expression::EMPTY); 168 } 169 170 static inline Expression getTombstoneKey() { 171 return Expression(Expression::TOMBSTONE); 172 } 173 174 static unsigned getHashValue(const Expression e) { 175 unsigned hash = e.opcode; 176 177 hash = ((unsigned)((uintptr_t)e.type >> 4) ^ 178 (unsigned)((uintptr_t)e.type >> 9)); 179 180 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(), 181 E = e.varargs.end(); I != E; ++I) 182 hash = *I + hash * 37; 183 184 hash = ((unsigned)((uintptr_t)e.function >> 4) ^ 185 (unsigned)((uintptr_t)e.function >> 9)) + 186 hash * 37; 187 188 return hash; 189 } 190 static bool isEqual(const Expression &LHS, const Expression &RHS) { 191 return LHS == RHS; 192 } 193 static bool isPod() { return true; } 194}; 195} 196 197//===----------------------------------------------------------------------===// 198// ValueTable Internal Functions 199//===----------------------------------------------------------------------===// 200Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) { 201 switch(BO->getOpcode()) { 202 default: // THIS SHOULD NEVER HAPPEN 203 llvm_unreachable("Binary operator with unknown opcode?"); 204 case Instruction::Add: return Expression::ADD; 205 case Instruction::FAdd: return Expression::FADD; 206 case Instruction::Sub: return Expression::SUB; 207 case Instruction::FSub: return Expression::FSUB; 208 case Instruction::Mul: return Expression::MUL; 209 case Instruction::FMul: return Expression::FMUL; 210 case Instruction::UDiv: return Expression::UDIV; 211 case Instruction::SDiv: return Expression::SDIV; 212 case Instruction::FDiv: return Expression::FDIV; 213 case Instruction::URem: return Expression::UREM; 214 case Instruction::SRem: return Expression::SREM; 215 case Instruction::FRem: return Expression::FREM; 216 case Instruction::Shl: return Expression::SHL; 217 case Instruction::LShr: return Expression::LSHR; 218 case Instruction::AShr: return Expression::ASHR; 219 case Instruction::And: return Expression::AND; 220 case Instruction::Or: return Expression::OR; 221 case Instruction::Xor: return Expression::XOR; 222 } 223} 224 225Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { 226 if (isa<ICmpInst>(C)) { 227 switch (C->getPredicate()) { 228 default: // THIS SHOULD NEVER HAPPEN 229 llvm_unreachable("Comparison with unknown predicate?"); 230 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; 231 case ICmpInst::ICMP_NE: return Expression::ICMPNE; 232 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; 233 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; 234 case ICmpInst::ICMP_ULT: return Expression::ICMPULT; 235 case ICmpInst::ICMP_ULE: return Expression::ICMPULE; 236 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; 237 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; 238 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; 239 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; 240 } 241 } else { 242 switch (C->getPredicate()) { 243 default: // THIS SHOULD NEVER HAPPEN 244 llvm_unreachable("Comparison with unknown predicate?"); 245 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; 246 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; 247 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; 248 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; 249 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; 250 case FCmpInst::FCMP_ONE: return Expression::FCMPONE; 251 case FCmpInst::FCMP_ORD: return Expression::FCMPORD; 252 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; 253 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; 254 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; 255 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; 256 case FCmpInst::FCMP_ULT: return Expression::FCMPULT; 257 case FCmpInst::FCMP_ULE: return Expression::FCMPULE; 258 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; 259 } 260 } 261} 262 263Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) { 264 switch(C->getOpcode()) { 265 default: // THIS SHOULD NEVER HAPPEN 266 llvm_unreachable("Cast operator with unknown opcode?"); 267 case Instruction::Trunc: return Expression::TRUNC; 268 case Instruction::ZExt: return Expression::ZEXT; 269 case Instruction::SExt: return Expression::SEXT; 270 case Instruction::FPToUI: return Expression::FPTOUI; 271 case Instruction::FPToSI: return Expression::FPTOSI; 272 case Instruction::UIToFP: return Expression::UITOFP; 273 case Instruction::SIToFP: return Expression::SITOFP; 274 case Instruction::FPTrunc: return Expression::FPTRUNC; 275 case Instruction::FPExt: return Expression::FPEXT; 276 case Instruction::PtrToInt: return Expression::PTRTOINT; 277 case Instruction::IntToPtr: return Expression::INTTOPTR; 278 case Instruction::BitCast: return Expression::BITCAST; 279 } 280} 281 282Expression ValueTable::create_expression(CallInst* C) { 283 Expression e; 284 285 e.type = C->getType(); 286 e.function = C->getCalledFunction(); 287 e.opcode = Expression::CALL; 288 289 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); 290 I != E; ++I) 291 e.varargs.push_back(lookup_or_add(*I)); 292 293 return e; 294} 295 296Expression ValueTable::create_expression(BinaryOperator* BO) { 297 Expression e; 298 e.varargs.push_back(lookup_or_add(BO->getOperand(0))); 299 e.varargs.push_back(lookup_or_add(BO->getOperand(1))); 300 e.function = 0; 301 e.type = BO->getType(); 302 e.opcode = getOpcode(BO); 303 304 return e; 305} 306 307Expression ValueTable::create_expression(CmpInst* C) { 308 Expression e; 309 310 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 311 e.varargs.push_back(lookup_or_add(C->getOperand(1))); 312 e.function = 0; 313 e.type = C->getType(); 314 e.opcode = getOpcode(C); 315 316 return e; 317} 318 319Expression ValueTable::create_expression(CastInst* C) { 320 Expression e; 321 322 e.varargs.push_back(lookup_or_add(C->getOperand(0))); 323 e.function = 0; 324 e.type = C->getType(); 325 e.opcode = getOpcode(C); 326 327 return e; 328} 329 330Expression ValueTable::create_expression(ShuffleVectorInst* S) { 331 Expression e; 332 333 e.varargs.push_back(lookup_or_add(S->getOperand(0))); 334 e.varargs.push_back(lookup_or_add(S->getOperand(1))); 335 e.varargs.push_back(lookup_or_add(S->getOperand(2))); 336 e.function = 0; 337 e.type = S->getType(); 338 e.opcode = Expression::SHUFFLE; 339 340 return e; 341} 342 343Expression ValueTable::create_expression(ExtractElementInst* E) { 344 Expression e; 345 346 e.varargs.push_back(lookup_or_add(E->getOperand(0))); 347 e.varargs.push_back(lookup_or_add(E->getOperand(1))); 348 e.function = 0; 349 e.type = E->getType(); 350 e.opcode = Expression::EXTRACT; 351 352 return e; 353} 354 355Expression ValueTable::create_expression(InsertElementInst* I) { 356 Expression e; 357 358 e.varargs.push_back(lookup_or_add(I->getOperand(0))); 359 e.varargs.push_back(lookup_or_add(I->getOperand(1))); 360 e.varargs.push_back(lookup_or_add(I->getOperand(2))); 361 e.function = 0; 362 e.type = I->getType(); 363 e.opcode = Expression::INSERT; 364 365 return e; 366} 367 368Expression ValueTable::create_expression(SelectInst* I) { 369 Expression e; 370 371 e.varargs.push_back(lookup_or_add(I->getCondition())); 372 e.varargs.push_back(lookup_or_add(I->getTrueValue())); 373 e.varargs.push_back(lookup_or_add(I->getFalseValue())); 374 e.function = 0; 375 e.type = I->getType(); 376 e.opcode = Expression::SELECT; 377 378 return e; 379} 380 381Expression ValueTable::create_expression(GetElementPtrInst* G) { 382 Expression e; 383 384 e.varargs.push_back(lookup_or_add(G->getPointerOperand())); 385 e.function = 0; 386 e.type = G->getType(); 387 e.opcode = Expression::GEP; 388 389 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); 390 I != E; ++I) 391 e.varargs.push_back(lookup_or_add(*I)); 392 393 return e; 394} 395 396Expression ValueTable::create_expression(ExtractValueInst* E) { 397 Expression e; 398 399 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 400 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 401 II != IE; ++II) 402 e.varargs.push_back(*II); 403 e.function = 0; 404 e.type = E->getType(); 405 e.opcode = Expression::EXTRACTVALUE; 406 407 return e; 408} 409 410Expression ValueTable::create_expression(InsertValueInst* E) { 411 Expression e; 412 413 e.varargs.push_back(lookup_or_add(E->getAggregateOperand())); 414 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand())); 415 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 416 II != IE; ++II) 417 e.varargs.push_back(*II); 418 e.function = 0; 419 e.type = E->getType(); 420 e.opcode = Expression::INSERTVALUE; 421 422 return e; 423} 424 425//===----------------------------------------------------------------------===// 426// ValueTable External Functions 427//===----------------------------------------------------------------------===// 428 429/// add - Insert a value into the table with a specified value number. 430void ValueTable::add(Value *V, uint32_t num) { 431 valueNumbering.insert(std::make_pair(V, num)); 432} 433 434uint32_t ValueTable::lookup_or_add_call(CallInst* C) { 435 if (AA->doesNotAccessMemory(C)) { 436 Expression exp = create_expression(C); 437 uint32_t& e = expressionNumbering[exp]; 438 if (!e) e = nextValueNumber++; 439 valueNumbering[C] = e; 440 return e; 441 } else if (AA->onlyReadsMemory(C)) { 442 Expression exp = create_expression(C); 443 uint32_t& e = expressionNumbering[exp]; 444 if (!e) { 445 e = nextValueNumber++; 446 valueNumbering[C] = e; 447 return e; 448 } 449 if (!MD) { 450 e = nextValueNumber++; 451 valueNumbering[C] = e; 452 return e; 453 } 454 455 MemDepResult local_dep = MD->getDependency(C); 456 457 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 458 valueNumbering[C] = nextValueNumber; 459 return nextValueNumber++; 460 } 461 462 if (local_dep.isDef()) { 463 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 464 465 if (local_cdep->getNumOperands() != C->getNumOperands()) { 466 valueNumbering[C] = nextValueNumber; 467 return nextValueNumber++; 468 } 469 470 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 471 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 472 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i)); 473 if (c_vn != cd_vn) { 474 valueNumbering[C] = nextValueNumber; 475 return nextValueNumber++; 476 } 477 } 478 479 uint32_t v = lookup_or_add(local_cdep); 480 valueNumbering[C] = v; 481 return v; 482 } 483 484 // Non-local case. 485 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 486 MD->getNonLocalCallDependency(CallSite(C)); 487 // FIXME: call/call dependencies for readonly calls should return def, not 488 // clobber! Move the checking logic to MemDep! 489 CallInst* cdep = 0; 490 491 // Check to see if we have a single dominating call instruction that is 492 // identical to C. 493 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 494 const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i]; 495 // Ignore non-local dependencies. 496 if (I->second.isNonLocal()) 497 continue; 498 499 // We don't handle non-depedencies. If we already have a call, reject 500 // instruction dependencies. 501 if (I->second.isClobber() || cdep != 0) { 502 cdep = 0; 503 break; 504 } 505 506 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst()); 507 // FIXME: All duplicated with non-local case. 508 if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){ 509 cdep = NonLocalDepCall; 510 continue; 511 } 512 513 cdep = 0; 514 break; 515 } 516 517 if (!cdep) { 518 valueNumbering[C] = nextValueNumber; 519 return nextValueNumber++; 520 } 521 522 if (cdep->getNumOperands() != C->getNumOperands()) { 523 valueNumbering[C] = nextValueNumber; 524 return nextValueNumber++; 525 } 526 for (unsigned i = 1; i < C->getNumOperands(); ++i) { 527 uint32_t c_vn = lookup_or_add(C->getOperand(i)); 528 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i)); 529 if (c_vn != cd_vn) { 530 valueNumbering[C] = nextValueNumber; 531 return nextValueNumber++; 532 } 533 } 534 535 uint32_t v = lookup_or_add(cdep); 536 valueNumbering[C] = v; 537 return v; 538 539 } else { 540 valueNumbering[C] = nextValueNumber; 541 return nextValueNumber++; 542 } 543} 544 545/// lookup_or_add - Returns the value number for the specified value, assigning 546/// it a new number if it did not have one before. 547uint32_t ValueTable::lookup_or_add(Value *V) { 548 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 549 if (VI != valueNumbering.end()) 550 return VI->second; 551 552 if (!isa<Instruction>(V)) { 553 valueNumbering[V] = nextValueNumber; 554 return nextValueNumber++; 555 } 556 557 Instruction* I = cast<Instruction>(V); 558 Expression exp; 559 switch (I->getOpcode()) { 560 case Instruction::Call: 561 return lookup_or_add_call(cast<CallInst>(I)); 562 case Instruction::Add: 563 case Instruction::FAdd: 564 case Instruction::Sub: 565 case Instruction::FSub: 566 case Instruction::Mul: 567 case Instruction::FMul: 568 case Instruction::UDiv: 569 case Instruction::SDiv: 570 case Instruction::FDiv: 571 case Instruction::URem: 572 case Instruction::SRem: 573 case Instruction::FRem: 574 case Instruction::Shl: 575 case Instruction::LShr: 576 case Instruction::AShr: 577 case Instruction::And: 578 case Instruction::Or : 579 case Instruction::Xor: 580 exp = create_expression(cast<BinaryOperator>(I)); 581 break; 582 case Instruction::ICmp: 583 case Instruction::FCmp: 584 exp = create_expression(cast<CmpInst>(I)); 585 break; 586 case Instruction::Trunc: 587 case Instruction::ZExt: 588 case Instruction::SExt: 589 case Instruction::FPToUI: 590 case Instruction::FPToSI: 591 case Instruction::UIToFP: 592 case Instruction::SIToFP: 593 case Instruction::FPTrunc: 594 case Instruction::FPExt: 595 case Instruction::PtrToInt: 596 case Instruction::IntToPtr: 597 case Instruction::BitCast: 598 exp = create_expression(cast<CastInst>(I)); 599 break; 600 case Instruction::Select: 601 exp = create_expression(cast<SelectInst>(I)); 602 break; 603 case Instruction::ExtractElement: 604 exp = create_expression(cast<ExtractElementInst>(I)); 605 break; 606 case Instruction::InsertElement: 607 exp = create_expression(cast<InsertElementInst>(I)); 608 break; 609 case Instruction::ShuffleVector: 610 exp = create_expression(cast<ShuffleVectorInst>(I)); 611 break; 612 case Instruction::ExtractValue: 613 exp = create_expression(cast<ExtractValueInst>(I)); 614 break; 615 case Instruction::InsertValue: 616 exp = create_expression(cast<InsertValueInst>(I)); 617 break; 618 case Instruction::GetElementPtr: 619 exp = create_expression(cast<GetElementPtrInst>(I)); 620 break; 621 default: 622 valueNumbering[V] = nextValueNumber; 623 return nextValueNumber++; 624 } 625 626 uint32_t& e = expressionNumbering[exp]; 627 if (!e) e = nextValueNumber++; 628 valueNumbering[V] = e; 629 return e; 630} 631 632/// lookup - Returns the value number of the specified value. Fails if 633/// the value has not yet been numbered. 634uint32_t ValueTable::lookup(Value *V) const { 635 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 636 assert(VI != valueNumbering.end() && "Value not numbered?"); 637 return VI->second; 638} 639 640/// clear - Remove all entries from the ValueTable 641void ValueTable::clear() { 642 valueNumbering.clear(); 643 expressionNumbering.clear(); 644 nextValueNumber = 1; 645} 646 647/// erase - Remove a value from the value numbering 648void ValueTable::erase(Value *V) { 649 valueNumbering.erase(V); 650} 651 652/// verifyRemoved - Verify that the value is removed from all internal data 653/// structures. 654void ValueTable::verifyRemoved(const Value *V) const { 655 for (DenseMap<Value*, uint32_t>::const_iterator 656 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 657 assert(I->first != V && "Inst still occurs in value numbering map!"); 658 } 659} 660 661//===----------------------------------------------------------------------===// 662// GVN Pass 663//===----------------------------------------------------------------------===// 664 665namespace { 666 struct ValueNumberScope { 667 ValueNumberScope* parent; 668 DenseMap<uint32_t, Value*> table; 669 670 ValueNumberScope(ValueNumberScope* p) : parent(p) { } 671 }; 672} 673 674namespace { 675 676 class GVN : public FunctionPass { 677 bool runOnFunction(Function &F); 678 public: 679 static char ID; // Pass identification, replacement for typeid 680 explicit GVN(bool nopre = false, bool noloads = false) 681 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { } 682 683 private: 684 bool NoPRE; 685 bool NoLoads; 686 MemoryDependenceAnalysis *MD; 687 DominatorTree *DT; 688 689 ValueTable VN; 690 DenseMap<BasicBlock*, ValueNumberScope*> localAvail; 691 692 // This transformation requires dominator postdominator info 693 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 694 AU.addRequired<DominatorTree>(); 695 if (!NoLoads) 696 AU.addRequired<MemoryDependenceAnalysis>(); 697 AU.addRequired<AliasAnalysis>(); 698 699 AU.addPreserved<DominatorTree>(); 700 AU.addPreserved<AliasAnalysis>(); 701 } 702 703 // Helper fuctions 704 // FIXME: eliminate or document these better 705 bool processLoad(LoadInst* L, 706 SmallVectorImpl<Instruction*> &toErase); 707 bool processInstruction(Instruction *I, 708 SmallVectorImpl<Instruction*> &toErase); 709 bool processNonLocalLoad(LoadInst* L, 710 SmallVectorImpl<Instruction*> &toErase); 711 bool processBlock(BasicBlock *BB); 712 void dump(DenseMap<uint32_t, Value*>& d); 713 bool iterateOnFunction(Function &F); 714 Value *CollapsePhi(PHINode* p); 715 bool performPRE(Function& F); 716 Value *lookupNumber(BasicBlock *BB, uint32_t num); 717 void cleanupGlobalSets(); 718 void verifyRemoved(const Instruction *I) const; 719 }; 720 721 char GVN::ID = 0; 722} 723 724// createGVNPass - The public interface to this file... 725FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) { 726 return new GVN(NoPRE, NoLoads); 727} 728 729static RegisterPass<GVN> X("gvn", 730 "Global Value Numbering"); 731 732void GVN::dump(DenseMap<uint32_t, Value*>& d) { 733 printf("{\n"); 734 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 735 E = d.end(); I != E; ++I) { 736 printf("%d\n", I->first); 737 I->second->dump(); 738 } 739 printf("}\n"); 740} 741 742static bool isSafeReplacement(PHINode* p, Instruction *inst) { 743 if (!isa<PHINode>(inst)) 744 return true; 745 746 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); 747 UI != E; ++UI) 748 if (PHINode* use_phi = dyn_cast<PHINode>(UI)) 749 if (use_phi->getParent() == inst->getParent()) 750 return false; 751 752 return true; 753} 754 755Value *GVN::CollapsePhi(PHINode *PN) { 756 Value *ConstVal = PN->hasConstantValue(DT); 757 if (!ConstVal) return 0; 758 759 Instruction *Inst = dyn_cast<Instruction>(ConstVal); 760 if (!Inst) 761 return ConstVal; 762 763 if (DT->dominates(Inst, PN)) 764 if (isSafeReplacement(PN, Inst)) 765 return Inst; 766 return 0; 767} 768 769/// IsValueFullyAvailableInBlock - Return true if we can prove that the value 770/// we're analyzing is fully available in the specified block. As we go, keep 771/// track of which blocks we know are fully alive in FullyAvailableBlocks. This 772/// map is actually a tri-state map with the following values: 773/// 0) we know the block *is not* fully available. 774/// 1) we know the block *is* fully available. 775/// 2) we do not know whether the block is fully available or not, but we are 776/// currently speculating that it will be. 777/// 3) we are speculating for this block and have used that to speculate for 778/// other blocks. 779static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 780 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) { 781 // Optimistically assume that the block is fully available and check to see 782 // if we already know about this block in one lookup. 783 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 784 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 785 786 // If the entry already existed for this block, return the precomputed value. 787 if (!IV.second) { 788 // If this is a speculative "available" value, mark it as being used for 789 // speculation of other blocks. 790 if (IV.first->second == 2) 791 IV.first->second = 3; 792 return IV.first->second != 0; 793 } 794 795 // Otherwise, see if it is fully available in all predecessors. 796 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 797 798 // If this block has no predecessors, it isn't live-in here. 799 if (PI == PE) 800 goto SpeculationFailure; 801 802 for (; PI != PE; ++PI) 803 // If the value isn't fully available in one of our predecessors, then it 804 // isn't fully available in this block either. Undo our previous 805 // optimistic assumption and bail out. 806 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 807 goto SpeculationFailure; 808 809 return true; 810 811// SpeculationFailure - If we get here, we found out that this is not, after 812// all, a fully-available block. We have a problem if we speculated on this and 813// used the speculation to mark other blocks as available. 814SpeculationFailure: 815 char &BBVal = FullyAvailableBlocks[BB]; 816 817 // If we didn't speculate on this, just return with it set to false. 818 if (BBVal == 2) { 819 BBVal = 0; 820 return false; 821 } 822 823 // If we did speculate on this value, we could have blocks set to 1 that are 824 // incorrect. Walk the (transitive) successors of this block and mark them as 825 // 0 if set to one. 826 SmallVector<BasicBlock*, 32> BBWorklist; 827 BBWorklist.push_back(BB); 828 829 while (!BBWorklist.empty()) { 830 BasicBlock *Entry = BBWorklist.pop_back_val(); 831 // Note that this sets blocks to 0 (unavailable) if they happen to not 832 // already be in FullyAvailableBlocks. This is safe. 833 char &EntryVal = FullyAvailableBlocks[Entry]; 834 if (EntryVal == 0) continue; // Already unavailable. 835 836 // Mark as unavailable. 837 EntryVal = 0; 838 839 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I) 840 BBWorklist.push_back(*I); 841 } 842 843 return false; 844} 845 846 847/// CanCoerceMustAliasedValueToLoad - Return true if 848/// CoerceAvailableValueToLoadType will succeed. 849static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 850 const Type *LoadTy, 851 const TargetData &TD) { 852 // If the loaded or stored value is an first class array or struct, don't try 853 // to transform them. We need to be able to bitcast to integer. 854 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) || 855 isa<StructType>(StoredVal->getType()) || 856 isa<ArrayType>(StoredVal->getType())) 857 return false; 858 859 // The store has to be at least as big as the load. 860 if (TD.getTypeSizeInBits(StoredVal->getType()) < 861 TD.getTypeSizeInBits(LoadTy)) 862 return false; 863 864 return true; 865} 866 867 868/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and 869/// then a load from a must-aliased pointer of a different type, try to coerce 870/// the stored value. LoadedTy is the type of the load we want to replace and 871/// InsertPt is the place to insert new instructions. 872/// 873/// If we can't do it, return null. 874static Value *CoerceAvailableValueToLoadType(Value *StoredVal, 875 const Type *LoadedTy, 876 Instruction *InsertPt, 877 const TargetData &TD) { 878 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) 879 return 0; 880 881 const Type *StoredValTy = StoredVal->getType(); 882 883 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); 884 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); 885 886 // If the store and reload are the same size, we can always reuse it. 887 if (StoreSize == LoadSize) { 888 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) { 889 // Pointer to Pointer -> use bitcast. 890 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); 891 } 892 893 // Convert source pointers to integers, which can be bitcast. 894 if (isa<PointerType>(StoredValTy)) { 895 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 896 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 897 } 898 899 const Type *TypeToCastTo = LoadedTy; 900 if (isa<PointerType>(TypeToCastTo)) 901 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); 902 903 if (StoredValTy != TypeToCastTo) 904 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); 905 906 // Cast to pointer if the load needs a pointer type. 907 if (isa<PointerType>(LoadedTy)) 908 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); 909 910 return StoredVal; 911 } 912 913 // If the loaded value is smaller than the available value, then we can 914 // extract out a piece from it. If the available value is too small, then we 915 // can't do anything. 916 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 917 918 // Convert source pointers to integers, which can be manipulated. 919 if (isa<PointerType>(StoredValTy)) { 920 StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); 921 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); 922 } 923 924 // Convert vectors and fp to integer, which can be manipulated. 925 if (!isa<IntegerType>(StoredValTy)) { 926 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 927 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt); 928 } 929 930 // If this is a big-endian system, we need to shift the value down to the low 931 // bits so that a truncate will work. 932 if (TD.isBigEndian()) { 933 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); 934 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); 935 } 936 937 // Truncate the integer to the right size now. 938 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 939 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt); 940 941 if (LoadedTy == NewIntTy) 942 return StoredVal; 943 944 // If the result is a pointer, inttoptr. 945 if (isa<PointerType>(LoadedTy)) 946 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); 947 948 // Otherwise, bitcast. 949 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt); 950} 951 952/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can 953/// be expressed as a base pointer plus a constant offset. Return the base and 954/// offset to the caller. 955static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 956 const TargetData &TD) { 957 Operator *PtrOp = dyn_cast<Operator>(Ptr); 958 if (PtrOp == 0) return Ptr; 959 960 // Just look through bitcasts. 961 if (PtrOp->getOpcode() == Instruction::BitCast) 962 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 963 964 // If this is a GEP with constant indices, we can look through it. 965 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 966 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 967 968 gep_type_iterator GTI = gep_type_begin(GEP); 969 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 970 ++I, ++GTI) { 971 ConstantInt *OpC = cast<ConstantInt>(*I); 972 if (OpC->isZero()) continue; 973 974 // Handle a struct and array indices which add their offset to the pointer. 975 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 976 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 977 } else { 978 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 979 Offset += OpC->getSExtValue()*Size; 980 } 981 } 982 983 // Re-sign extend from the pointer size if needed to get overflow edge cases 984 // right. 985 unsigned PtrSize = TD.getPointerSizeInBits(); 986 if (PtrSize < 64) 987 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); 988 989 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 990} 991 992 993/// AnalyzeLoadFromClobberingWrite - This function is called when we have a 994/// memdep query of a load that ends up being a clobbering memory write (store, 995/// memset, memcpy, memmove). This means that the write *may* provide bits used 996/// by the load but we can't be sure because the pointers don't mustalias. 997/// 998/// Check this case to see if there is anything more we can do before we give 999/// up. This returns -1 if we have to give up, or a byte number in the stored 1000/// value of the piece that feeds the load. 1001static int AnalyzeLoadFromClobberingWrite(LoadInst *L, Value *WritePtr, 1002 uint64_t WriteSizeInBits, 1003 const TargetData &TD) { 1004 // If the loaded or stored value is an first class array or struct, don't try 1005 // to transform them. We need to be able to bitcast to integer. 1006 if (isa<StructType>(L->getType()) || isa<ArrayType>(L->getType())) 1007 return -1; 1008 1009 int64_t StoreOffset = 0, LoadOffset = 0; 1010 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD); 1011 Value *LoadBase = 1012 GetBaseWithConstantOffset(L->getPointerOperand(), LoadOffset, TD); 1013 if (StoreBase != LoadBase) 1014 return -1; 1015 1016 // If the load and store are to the exact same address, they should have been 1017 // a must alias. AA must have gotten confused. 1018 // FIXME: Study to see if/when this happens. 1019 if (LoadOffset == StoreOffset) { 1020#if 0 1021 errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 1022 << "Base = " << *StoreBase << "\n" 1023 << "Store Ptr = " << *WritePtr << "\n" 1024 << "Store Offs = " << StoreOffset << "\n" 1025 << "Load Ptr = " << *L->getPointerOperand() << "\n" 1026 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1027 errs() << "'" << L->getParent()->getParent()->getName() << "'" 1028 << *L->getParent(); 1029 abort(); 1030#endif 1031 return -1; 1032 } 1033 1034 // If the load and store don't overlap at all, the store doesn't provide 1035 // anything to the load. In this case, they really don't alias at all, AA 1036 // must have gotten confused. 1037 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then 1038 // remove this check, as it is duplicated with what we have below. 1039 uint64_t LoadSize = TD.getTypeSizeInBits(L->getType()); 1040 1041 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 1042 return -1; 1043 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 1044 LoadSize >>= 3; 1045 1046 1047 bool isAAFailure = false; 1048 if (StoreOffset < LoadOffset) { 1049 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1050 } else { 1051 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1052 } 1053 if (isAAFailure) { 1054#if 0 1055 errs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1056 << "Base = " << *StoreBase << "\n" 1057 << "Store Ptr = " << *WritePtr << "\n" 1058 << "Store Offs = " << StoreOffset << "\n" 1059 << "Load Ptr = " << *L->getPointerOperand() << "\n" 1060 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n"; 1061 errs() << "'" << L->getParent()->getParent()->getName() << "'" 1062 << *L->getParent(); 1063 abort(); 1064#endif 1065 return -1; 1066 } 1067 1068 // If the Load isn't completely contained within the stored bits, we don't 1069 // have all the bits to feed it. We could do something crazy in the future 1070 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1071 // valuable. 1072 if (StoreOffset > LoadOffset || 1073 StoreOffset+StoreSize < LoadOffset+LoadSize) 1074 return -1; 1075 1076 // Okay, we can do this transformation. Return the number of bytes into the 1077 // store that the load is. 1078 return LoadOffset-StoreOffset; 1079} 1080 1081/// AnalyzeLoadFromClobberingStore - This function is called when we have a 1082/// memdep query of a load that ends up being a clobbering store. 1083static int AnalyzeLoadFromClobberingStore(LoadInst *L, StoreInst *DepSI, 1084 const TargetData &TD) { 1085 // Cannot handle reading from store of first-class aggregate yet. 1086 if (isa<StructType>(DepSI->getOperand(0)->getType()) || 1087 isa<ArrayType>(DepSI->getOperand(0)->getType())) 1088 return -1; 1089 1090 Value *StorePtr = DepSI->getPointerOperand(); 1091 uint64_t StoreSize = TD.getTypeSizeInBits(StorePtr->getType()); 1092 return AnalyzeLoadFromClobberingWrite(L, StorePtr, StoreSize, TD); 1093} 1094 1095static int AnalyzeLoadFromClobberingMemInst(LoadInst *L, MemIntrinsic *MI, 1096 const TargetData &TD) { 1097 // If the mem operation is a non-constant size, we can't handle it. 1098 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1099 if (SizeCst == 0) return -1; 1100 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1101 1102 // If this is memset, we just need to see if the offset is valid in the size 1103 // of the memset.. 1104 if (MI->getIntrinsicID() == Intrinsic::memset) 1105 return AnalyzeLoadFromClobberingWrite(L, MI->getDest(), MemSizeInBits, TD); 1106 1107 // If we have a memcpy/memmove, the only case we can handle is if this is a 1108 // copy from constant memory. In that case, we can read directly from the 1109 // constant memory. 1110 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1111 1112 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1113 if (Src == 0) return -1; 1114 1115 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject()); 1116 if (GV == 0 || !GV->isConstant()) return -1; 1117 1118 // See if the access is within the bounds of the transfer. 1119 int Offset = 1120 AnalyzeLoadFromClobberingWrite(L, MI->getDest(), MemSizeInBits, TD); 1121 if (Offset == -1) 1122 return Offset; 1123 1124 // Otherwise, see if we can constant fold a load from the constant with the 1125 // offset applied as appropriate. 1126 Src = ConstantExpr::getBitCast(Src, 1127 llvm::Type::getInt8PtrTy(Src->getContext())); 1128 Constant *OffsetCst = 1129 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1130 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1131 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(L->getType())); 1132 if (ConstantFoldLoadFromConstPtr(Src, &TD)) 1133 return Offset; 1134 return -1; 1135} 1136 1137 1138/// GetStoreValueForLoad - This function is called when we have a 1139/// memdep query of a load that ends up being a clobbering store. This means 1140/// that the store *may* provide bits used by the load but we can't be sure 1141/// because the pointers don't mustalias. Check this case to see if there is 1142/// anything more we can do before we give up. 1143static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1144 const Type *LoadTy, 1145 Instruction *InsertPt, const TargetData &TD){ 1146 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1147 1148 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8; 1149 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1150 1151 1152 // Compute which bits of the stored value are being used by the load. Convert 1153 // to an integer type to start with. 1154 if (isa<PointerType>(SrcVal->getType())) 1155 SrcVal = new PtrToIntInst(SrcVal, TD.getIntPtrType(Ctx), "tmp", InsertPt); 1156 if (!isa<IntegerType>(SrcVal->getType())) 1157 SrcVal = new BitCastInst(SrcVal, IntegerType::get(Ctx, StoreSize*8), 1158 "tmp", InsertPt); 1159 1160 // Shift the bits to the least significant depending on endianness. 1161 unsigned ShiftAmt; 1162 if (TD.isLittleEndian()) 1163 ShiftAmt = Offset*8; 1164 else 1165 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1166 1167 if (ShiftAmt) 1168 SrcVal = BinaryOperator::CreateLShr(SrcVal, 1169 ConstantInt::get(SrcVal->getType(), ShiftAmt), "tmp", InsertPt); 1170 1171 if (LoadSize != StoreSize) 1172 SrcVal = new TruncInst(SrcVal, IntegerType::get(Ctx, LoadSize*8), 1173 "tmp", InsertPt); 1174 1175 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); 1176} 1177 1178/// GetMemInstValueForLoad - This function is called when we have a 1179/// memdep query of a load that ends up being a clobbering mem intrinsic. 1180static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1181 const Type *LoadTy, Instruction *InsertPt, 1182 const TargetData &TD){ 1183 LLVMContext &Ctx = LoadTy->getContext(); 1184 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; 1185 1186 IRBuilder<> Builder(InsertPt->getParent(), InsertPt); 1187 1188 // We know that this method is only called when the mem transfer fully 1189 // provides the bits for the load. 1190 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1191 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1192 // independently of what the offset is. 1193 Value *Val = MSI->getValue(); 1194 if (LoadSize != 1) 1195 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1196 1197 Value *OneElt = Val; 1198 1199 // Splat the value out to the right number of bits. 1200 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1201 // If we can double the number of bytes set, do it. 1202 if (NumBytesSet*2 <= LoadSize) { 1203 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1204 Val = Builder.CreateOr(Val, ShVal); 1205 NumBytesSet <<= 1; 1206 continue; 1207 } 1208 1209 // Otherwise insert one byte at a time. 1210 Value *ShVal = Builder.CreateShl(Val, 1*8); 1211 Val = Builder.CreateOr(OneElt, ShVal); 1212 ++NumBytesSet; 1213 } 1214 1215 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); 1216 } 1217 1218 // Otherwise, this is a memcpy/memmove from a constant global. 1219 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1220 Constant *Src = cast<Constant>(MTI->getSource()); 1221 1222 // Otherwise, see if we can constant fold a load from the constant with the 1223 // offset applied as appropriate. 1224 Src = ConstantExpr::getBitCast(Src, 1225 llvm::Type::getInt8PtrTy(Src->getContext())); 1226 Constant *OffsetCst = 1227 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1228 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1); 1229 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy)); 1230 return ConstantFoldLoadFromConstPtr(Src, &TD); 1231} 1232 1233 1234 1235struct AvailableValueInBlock { 1236 /// BB - The basic block in question. 1237 BasicBlock *BB; 1238 enum ValType { 1239 SimpleVal, // A simple offsetted value that is accessed. 1240 MemIntrin // A memory intrinsic which is loaded from. 1241 }; 1242 1243 /// V - The value that is live out of the block. 1244 PointerIntPair<Value *, 1, ValType> Val; 1245 1246 /// Offset - The byte offset in Val that is interesting for the load query. 1247 unsigned Offset; 1248 1249 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 1250 unsigned Offset = 0) { 1251 AvailableValueInBlock Res; 1252 Res.BB = BB; 1253 Res.Val.setPointer(V); 1254 Res.Val.setInt(SimpleVal); 1255 Res.Offset = Offset; 1256 return Res; 1257 } 1258 1259 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 1260 unsigned Offset = 0) { 1261 AvailableValueInBlock Res; 1262 Res.BB = BB; 1263 Res.Val.setPointer(MI); 1264 Res.Val.setInt(MemIntrin); 1265 Res.Offset = Offset; 1266 return Res; 1267 } 1268 1269 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 1270 Value *getSimpleValue() const { 1271 assert(isSimpleValue() && "Wrong accessor"); 1272 return Val.getPointer(); 1273 } 1274 1275 MemIntrinsic *getMemIntrinValue() const { 1276 assert(!isSimpleValue() && "Wrong accessor"); 1277 return cast<MemIntrinsic>(Val.getPointer()); 1278 } 1279}; 1280 1281/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, 1282/// construct SSA form, allowing us to eliminate LI. This returns the value 1283/// that should be used at LI's definition site. 1284static Value *ConstructSSAForLoadSet(LoadInst *LI, 1285 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1286 const TargetData *TD, 1287 AliasAnalysis *AA) { 1288 SmallVector<PHINode*, 8> NewPHIs; 1289 SSAUpdater SSAUpdate(&NewPHIs); 1290 SSAUpdate.Initialize(LI); 1291 1292 const Type *LoadTy = LI->getType(); 1293 1294 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1295 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1296 BasicBlock *BB = AV.BB; 1297 1298 if (SSAUpdate.HasValueForBlock(BB)) 1299 continue; 1300 1301 unsigned Offset = AV.Offset; 1302 1303 Value *AvailableVal; 1304 if (AV.isSimpleValue()) { 1305 AvailableVal = AV.getSimpleValue(); 1306 if (AvailableVal->getType() != LoadTy) { 1307 assert(TD && "Need target data to handle type mismatch case"); 1308 AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy, 1309 BB->getTerminator(), *TD); 1310 1311 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1312 << *AV.getSimpleValue() << '\n' 1313 << *AvailableVal << '\n' << "\n\n\n"); 1314 } 1315 } else { 1316 AvailableVal = GetMemInstValueForLoad(AV.getMemIntrinValue(), Offset, 1317 LoadTy, BB->getTerminator(), *TD); 1318 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1319 << " " << *AV.getMemIntrinValue() << '\n' 1320 << *AvailableVal << '\n' << "\n\n\n"); 1321 } 1322 SSAUpdate.AddAvailableValue(BB, AvailableVal); 1323 } 1324 1325 // Perform PHI construction. 1326 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1327 1328 // If new PHI nodes were created, notify alias analysis. 1329 if (isa<PointerType>(V->getType())) 1330 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) 1331 AA->copyValue(LI, NewPHIs[i]); 1332 1333 return V; 1334} 1335 1336static bool isLifetimeStart(Instruction *Inst) { 1337 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1338 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1339 return false; 1340} 1341 1342/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are 1343/// non-local by performing PHI construction. 1344bool GVN::processNonLocalLoad(LoadInst *LI, 1345 SmallVectorImpl<Instruction*> &toErase) { 1346 // Find the non-local dependencies of the load. 1347 SmallVector<MemoryDependenceAnalysis::NonLocalDepEntry, 64> Deps; 1348 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), 1349 Deps); 1350 //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: " 1351 // << Deps.size() << *LI << '\n'); 1352 1353 // If we had to process more than one hundred blocks to find the 1354 // dependencies, this load isn't worth worrying about. Optimizing 1355 // it will be too expensive. 1356 if (Deps.size() > 100) 1357 return false; 1358 1359 // If we had a phi translation failure, we'll have a single entry which is a 1360 // clobber in the current block. Reject this early. 1361 if (Deps.size() == 1 && Deps[0].second.isClobber()) { 1362 DEBUG( 1363 errs() << "GVN: non-local load "; 1364 WriteAsOperand(errs(), LI); 1365 errs() << " is clobbered by " << *Deps[0].second.getInst() << '\n'; 1366 ); 1367 return false; 1368 } 1369 1370 // Filter out useless results (non-locals, etc). Keep track of the blocks 1371 // where we have a value available in repl, also keep track of whether we see 1372 // dependencies that produce an unknown value for the load (such as a call 1373 // that could potentially clobber the load). 1374 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock; 1375 SmallVector<BasicBlock*, 16> UnavailableBlocks; 1376 1377 const TargetData *TD = 0; 1378 1379 for (unsigned i = 0, e = Deps.size(); i != e; ++i) { 1380 BasicBlock *DepBB = Deps[i].first; 1381 MemDepResult DepInfo = Deps[i].second; 1382 1383 if (DepInfo.isClobber()) { 1384 // If the dependence is to a store that writes to a superset of the bits 1385 // read by the load, we can extract the bits we need for the load from the 1386 // stored value. 1387 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1388 if (TD == 0) 1389 TD = getAnalysisIfAvailable<TargetData>(); 1390 if (TD) { 1391 int Offset = AnalyzeLoadFromClobberingStore(LI, DepSI, *TD); 1392 if (Offset != -1) { 1393 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1394 DepSI->getOperand(0), 1395 Offset)); 1396 continue; 1397 } 1398 } 1399 } 1400 1401 // If the clobbering value is a memset/memcpy/memmove, see if we can 1402 // forward a value on from it. 1403 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1404 if (TD == 0) 1405 TD = getAnalysisIfAvailable<TargetData>(); 1406 if (TD) { 1407 int Offset = AnalyzeLoadFromClobberingMemInst(LI, DepMI, *TD); 1408 if (Offset != -1) { 1409 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1410 Offset)); 1411 continue; 1412 } 1413 } 1414 } 1415 1416 UnavailableBlocks.push_back(DepBB); 1417 continue; 1418 } 1419 1420 Instruction *DepInst = DepInfo.getInst(); 1421 1422 // Loading the allocation -> undef. 1423 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) || 1424 // Loading immediately after lifetime begin -> undef. 1425 isLifetimeStart(DepInst)) { 1426 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1427 UndefValue::get(LI->getType()))); 1428 continue; 1429 } 1430 1431 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1432 // Reject loads and stores that are to the same address but are of 1433 // different types if we have to. 1434 if (S->getOperand(0)->getType() != LI->getType()) { 1435 if (TD == 0) 1436 TD = getAnalysisIfAvailable<TargetData>(); 1437 1438 // If the stored value is larger or equal to the loaded value, we can 1439 // reuse it. 1440 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0), 1441 LI->getType(), *TD)) { 1442 UnavailableBlocks.push_back(DepBB); 1443 continue; 1444 } 1445 } 1446 1447 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1448 S->getOperand(0))); 1449 continue; 1450 } 1451 1452 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1453 // If the types mismatch and we can't handle it, reject reuse of the load. 1454 if (LD->getType() != LI->getType()) { 1455 if (TD == 0) 1456 TD = getAnalysisIfAvailable<TargetData>(); 1457 1458 // If the stored value is larger or equal to the loaded value, we can 1459 // reuse it. 1460 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ 1461 UnavailableBlocks.push_back(DepBB); 1462 continue; 1463 } 1464 } 1465 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD)); 1466 continue; 1467 } 1468 1469 UnavailableBlocks.push_back(DepBB); 1470 continue; 1471 } 1472 1473 // If we have no predecessors that produce a known value for this load, exit 1474 // early. 1475 if (ValuesPerBlock.empty()) return false; 1476 1477 // If all of the instructions we depend on produce a known value for this 1478 // load, then it is fully redundant and we can use PHI insertion to compute 1479 // its value. Insert PHIs and remove the fully redundant value now. 1480 if (UnavailableBlocks.empty()) { 1481 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1482 1483 // Perform PHI construction. 1484 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1485 VN.getAliasAnalysis()); 1486 LI->replaceAllUsesWith(V); 1487 1488 if (isa<PHINode>(V)) 1489 V->takeName(LI); 1490 if (isa<PointerType>(V->getType())) 1491 MD->invalidateCachedPointerInfo(V); 1492 toErase.push_back(LI); 1493 NumGVNLoad++; 1494 return true; 1495 } 1496 1497 if (!EnablePRE || !EnableLoadPRE) 1498 return false; 1499 1500 // Okay, we have *some* definitions of the value. This means that the value 1501 // is available in some of our (transitive) predecessors. Lets think about 1502 // doing PRE of this load. This will involve inserting a new load into the 1503 // predecessor when it's not available. We could do this in general, but 1504 // prefer to not increase code size. As such, we only do this when we know 1505 // that we only have to insert *one* load (which means we're basically moving 1506 // the load, not inserting a new one). 1507 1508 SmallPtrSet<BasicBlock *, 4> Blockers; 1509 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1510 Blockers.insert(UnavailableBlocks[i]); 1511 1512 // Lets find first basic block with more than one predecessor. Walk backwards 1513 // through predecessors if needed. 1514 BasicBlock *LoadBB = LI->getParent(); 1515 BasicBlock *TmpBB = LoadBB; 1516 1517 bool isSinglePred = false; 1518 bool allSingleSucc = true; 1519 while (TmpBB->getSinglePredecessor()) { 1520 isSinglePred = true; 1521 TmpBB = TmpBB->getSinglePredecessor(); 1522 if (!TmpBB) // If haven't found any, bail now. 1523 return false; 1524 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1525 return false; 1526 if (Blockers.count(TmpBB)) 1527 return false; 1528 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1529 allSingleSucc = false; 1530 } 1531 1532 assert(TmpBB); 1533 LoadBB = TmpBB; 1534 1535 // If we have a repl set with LI itself in it, this means we have a loop where 1536 // at least one of the values is LI. Since this means that we won't be able 1537 // to eliminate LI even if we insert uses in the other predecessors, we will 1538 // end up increasing code size. Reject this by scanning for LI. 1539 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1540 if (ValuesPerBlock[i].isSimpleValue() && 1541 ValuesPerBlock[i].getSimpleValue() == LI) 1542 return false; 1543 1544 // FIXME: It is extremely unclear what this loop is doing, other than 1545 // artificially restricting loadpre. 1546 if (isSinglePred) { 1547 bool isHot = false; 1548 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1549 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1550 if (AV.isSimpleValue()) 1551 // "Hot" Instruction is in some loop (because it dominates its dep. 1552 // instruction). 1553 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue())) 1554 if (DT->dominates(LI, I)) { 1555 isHot = true; 1556 break; 1557 } 1558 } 1559 1560 // We are interested only in "hot" instructions. We don't want to do any 1561 // mis-optimizations here. 1562 if (!isHot) 1563 return false; 1564 } 1565 1566 // Okay, we have some hope :). Check to see if the loaded value is fully 1567 // available in all but one predecessor. 1568 // FIXME: If we could restructure the CFG, we could make a common pred with 1569 // all the preds that don't have an available LI and insert a new load into 1570 // that one block. 1571 BasicBlock *UnavailablePred = 0; 1572 1573 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1574 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1575 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1576 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1577 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1578 1579 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1580 PI != E; ++PI) { 1581 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks)) 1582 continue; 1583 1584 // If this load is not available in multiple predecessors, reject it. 1585 if (UnavailablePred && UnavailablePred != *PI) 1586 return false; 1587 UnavailablePred = *PI; 1588 } 1589 1590 assert(UnavailablePred != 0 && 1591 "Fully available value should be eliminated above!"); 1592 1593 // We don't currently handle critical edges :( 1594 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) { 1595 DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '" 1596 << UnavailablePred->getName() << "': " << *LI << '\n'); 1597 return false; 1598 } 1599 1600 // Do PHI translation to get its value in the predecessor if necessary. The 1601 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1602 // 1603 SmallVector<Instruction*, 8> NewInsts; 1604 1605 // If all preds have a single successor, then we know it is safe to insert the 1606 // load on the pred (?!?), so we can insert code to materialize the pointer if 1607 // it is not available. 1608 PHITransAddr Address(LI->getOperand(0), TD); 1609 Value *LoadPtr = 0; 1610 if (allSingleSucc) { 1611 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1612 *DT, NewInsts); 1613 } else { 1614 Address.PHITranslateValue(LoadBB, UnavailablePred); 1615 LoadPtr = Address.getAddr(); 1616 1617 // Make sure the value is live in the predecessor. 1618 if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr)) 1619 if (!DT->dominates(Inst->getParent(), UnavailablePred)) 1620 LoadPtr = 0; 1621 } 1622 1623 // If we couldn't find or insert a computation of this phi translated value, 1624 // we fail PRE. 1625 if (LoadPtr == 0) { 1626 assert(NewInsts.empty() && "Shouldn't insert insts on failure"); 1627 DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1628 << *LI->getOperand(0) << "\n"); 1629 return false; 1630 } 1631 1632 // Assign value numbers to these new instructions. 1633 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1634 // FIXME: We really _ought_ to insert these value numbers into their 1635 // parent's availability map. However, in doing so, we risk getting into 1636 // ordering issues. If a block hasn't been processed yet, we would be 1637 // marking a value as AVAIL-IN, which isn't what we intend. 1638 VN.lookup_or_add(NewInsts[i]); 1639 } 1640 1641 // Make sure it is valid to move this load here. We have to watch out for: 1642 // @1 = getelementptr (i8* p, ... 1643 // test p and branch if == 0 1644 // load @1 1645 // It is valid to have the getelementptr before the test, even if p can be 0, 1646 // as getelementptr only does address arithmetic. 1647 // If we are not pushing the value through any multiple-successor blocks 1648 // we do not have this case. Otherwise, check that the load is safe to 1649 // put anywhere; this can be improved, but should be conservatively safe. 1650 if (!allSingleSucc && 1651 // FIXME: REEVALUTE THIS. 1652 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) { 1653 assert(NewInsts.empty() && "Should not have inserted instructions"); 1654 return false; 1655 } 1656 1657 // Okay, we can eliminate this load by inserting a reload in the predecessor 1658 // and using PHI construction to get the value in the other predecessors, do 1659 // it. 1660 DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1661 DEBUG(if (!NewInsts.empty()) 1662 errs() << "INSERTED " << NewInsts.size() << " INSTS: " 1663 << *NewInsts.back() << '\n'); 1664 1665 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1666 LI->getAlignment(), 1667 UnavailablePred->getTerminator()); 1668 1669 // Add the newly created load. 1670 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad)); 1671 1672 // Perform PHI construction. 1673 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, 1674 VN.getAliasAnalysis()); 1675 LI->replaceAllUsesWith(V); 1676 if (isa<PHINode>(V)) 1677 V->takeName(LI); 1678 if (isa<PointerType>(V->getType())) 1679 MD->invalidateCachedPointerInfo(V); 1680 toErase.push_back(LI); 1681 NumPRELoad++; 1682 return true; 1683} 1684 1685/// processLoad - Attempt to eliminate a load, first by eliminating it 1686/// locally, and then attempting non-local elimination if that fails. 1687bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { 1688 if (!MD) 1689 return false; 1690 1691 if (L->isVolatile()) 1692 return false; 1693 1694 // ... to a pointer that has been loaded from before... 1695 MemDepResult Dep = MD->getDependency(L); 1696 1697 // If the value isn't available, don't do anything! 1698 if (Dep.isClobber()) { 1699 // Check to see if we have something like this: 1700 // store i32 123, i32* %P 1701 // %A = bitcast i32* %P to i8* 1702 // %B = gep i8* %A, i32 1 1703 // %C = load i8* %B 1704 // 1705 // We could do that by recognizing if the clobber instructions are obviously 1706 // a common base + constant offset, and if the previous store (or memset) 1707 // completely covers this load. This sort of thing can happen in bitfield 1708 // access code. 1709 Value *AvailVal = 0; 1710 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) 1711 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1712 int Offset = AnalyzeLoadFromClobberingStore(L, DepSI, *TD); 1713 if (Offset != -1) 1714 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset, 1715 L->getType(), L, *TD); 1716 } 1717 1718 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1719 // a value on from it. 1720 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1721 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) { 1722 int Offset = AnalyzeLoadFromClobberingMemInst(L, DepMI, *TD); 1723 if (Offset != -1) 1724 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD); 1725 } 1726 } 1727 1728 if (AvailVal) { 1729 DEBUG(errs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1730 << *AvailVal << '\n' << *L << "\n\n\n"); 1731 1732 // Replace the load! 1733 L->replaceAllUsesWith(AvailVal); 1734 if (isa<PointerType>(AvailVal->getType())) 1735 MD->invalidateCachedPointerInfo(AvailVal); 1736 toErase.push_back(L); 1737 NumGVNLoad++; 1738 return true; 1739 } 1740 1741 DEBUG( 1742 // fast print dep, using operator<< on instruction would be too slow 1743 errs() << "GVN: load "; 1744 WriteAsOperand(errs(), L); 1745 Instruction *I = Dep.getInst(); 1746 errs() << " is clobbered by " << *I << '\n'; 1747 ); 1748 return false; 1749 } 1750 1751 // If it is defined in another block, try harder. 1752 if (Dep.isNonLocal()) 1753 return processNonLocalLoad(L, toErase); 1754 1755 Instruction *DepInst = Dep.getInst(); 1756 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1757 Value *StoredVal = DepSI->getOperand(0); 1758 1759 // The store and load are to a must-aliased pointer, but they may not 1760 // actually have the same type. See if we know how to reuse the stored 1761 // value (depending on its type). 1762 const TargetData *TD = 0; 1763 if (StoredVal->getType() != L->getType()) { 1764 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1765 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), 1766 L, *TD); 1767 if (StoredVal == 0) 1768 return false; 1769 1770 DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1771 << '\n' << *L << "\n\n\n"); 1772 } 1773 else 1774 return false; 1775 } 1776 1777 // Remove it! 1778 L->replaceAllUsesWith(StoredVal); 1779 if (isa<PointerType>(StoredVal->getType())) 1780 MD->invalidateCachedPointerInfo(StoredVal); 1781 toErase.push_back(L); 1782 NumGVNLoad++; 1783 return true; 1784 } 1785 1786 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1787 Value *AvailableVal = DepLI; 1788 1789 // The loads are of a must-aliased pointer, but they may not actually have 1790 // the same type. See if we know how to reuse the previously loaded value 1791 // (depending on its type). 1792 const TargetData *TD = 0; 1793 if (DepLI->getType() != L->getType()) { 1794 if ((TD = getAnalysisIfAvailable<TargetData>())) { 1795 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD); 1796 if (AvailableVal == 0) 1797 return false; 1798 1799 DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 1800 << "\n" << *L << "\n\n\n"); 1801 } 1802 else 1803 return false; 1804 } 1805 1806 // Remove it! 1807 L->replaceAllUsesWith(AvailableVal); 1808 if (isa<PointerType>(DepLI->getType())) 1809 MD->invalidateCachedPointerInfo(DepLI); 1810 toErase.push_back(L); 1811 NumGVNLoad++; 1812 return true; 1813 } 1814 1815 // If this load really doesn't depend on anything, then we must be loading an 1816 // undef value. This can happen when loading for a fresh allocation with no 1817 // intervening stores, for example. 1818 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { 1819 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1820 toErase.push_back(L); 1821 NumGVNLoad++; 1822 return true; 1823 } 1824 1825 // If this load occurs either right after a lifetime begin, 1826 // then the loaded value is undefined. 1827 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) { 1828 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1829 L->replaceAllUsesWith(UndefValue::get(L->getType())); 1830 toErase.push_back(L); 1831 NumGVNLoad++; 1832 return true; 1833 } 1834 } 1835 1836 return false; 1837} 1838 1839Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) { 1840 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB); 1841 if (I == localAvail.end()) 1842 return 0; 1843 1844 ValueNumberScope *Locals = I->second; 1845 while (Locals) { 1846 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num); 1847 if (I != Locals->table.end()) 1848 return I->second; 1849 Locals = Locals->parent; 1850 } 1851 1852 return 0; 1853} 1854 1855 1856/// processInstruction - When calculating availability, handle an instruction 1857/// by inserting it into the appropriate sets 1858bool GVN::processInstruction(Instruction *I, 1859 SmallVectorImpl<Instruction*> &toErase) { 1860 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1861 bool Changed = processLoad(LI, toErase); 1862 1863 if (!Changed) { 1864 unsigned Num = VN.lookup_or_add(LI); 1865 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI)); 1866 } 1867 1868 return Changed; 1869 } 1870 1871 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1872 unsigned Num = VN.lookup_or_add(I); 1873 1874 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1875 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1876 1877 if (!BI->isConditional() || isa<Constant>(BI->getCondition())) 1878 return false; 1879 1880 Value *BranchCond = BI->getCondition(); 1881 uint32_t CondVN = VN.lookup_or_add(BranchCond); 1882 1883 BasicBlock *TrueSucc = BI->getSuccessor(0); 1884 BasicBlock *FalseSucc = BI->getSuccessor(1); 1885 1886 if (TrueSucc->getSinglePredecessor()) 1887 localAvail[TrueSucc]->table[CondVN] = 1888 ConstantInt::getTrue(TrueSucc->getContext()); 1889 if (FalseSucc->getSinglePredecessor()) 1890 localAvail[FalseSucc]->table[CondVN] = 1891 ConstantInt::getFalse(TrueSucc->getContext()); 1892 1893 return false; 1894 1895 // Allocations are always uniquely numbered, so we can save time and memory 1896 // by fast failing them. 1897 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { 1898 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1899 return false; 1900 } 1901 1902 // Collapse PHI nodes 1903 if (PHINode* p = dyn_cast<PHINode>(I)) { 1904 Value *constVal = CollapsePhi(p); 1905 1906 if (constVal) { 1907 p->replaceAllUsesWith(constVal); 1908 if (MD && isa<PointerType>(constVal->getType())) 1909 MD->invalidateCachedPointerInfo(constVal); 1910 VN.erase(p); 1911 1912 toErase.push_back(p); 1913 } else { 1914 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1915 } 1916 1917 // If the number we were assigned was a brand new VN, then we don't 1918 // need to do a lookup to see if the number already exists 1919 // somewhere in the domtree: it can't! 1920 } else if (Num == NextNum) { 1921 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1922 1923 // Perform fast-path value-number based elimination of values inherited from 1924 // dominators. 1925 } else if (Value *repl = lookupNumber(I->getParent(), Num)) { 1926 // Remove it! 1927 VN.erase(I); 1928 I->replaceAllUsesWith(repl); 1929 if (MD && isa<PointerType>(repl->getType())) 1930 MD->invalidateCachedPointerInfo(repl); 1931 toErase.push_back(I); 1932 return true; 1933 1934 } else { 1935 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); 1936 } 1937 1938 return false; 1939} 1940 1941/// runOnFunction - This is the main transformation entry point for a function. 1942bool GVN::runOnFunction(Function& F) { 1943 if (!NoLoads) 1944 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1945 DT = &getAnalysis<DominatorTree>(); 1946 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); 1947 VN.setMemDep(MD); 1948 VN.setDomTree(DT); 1949 1950 bool Changed = false; 1951 bool ShouldContinue = true; 1952 1953 // Merge unconditional branches, allowing PRE to catch more 1954 // optimization opportunities. 1955 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 1956 BasicBlock *BB = FI; 1957 ++FI; 1958 bool removedBlock = MergeBlockIntoPredecessor(BB, this); 1959 if (removedBlock) NumGVNBlocks++; 1960 1961 Changed |= removedBlock; 1962 } 1963 1964 unsigned Iteration = 0; 1965 1966 while (ShouldContinue) { 1967 DEBUG(errs() << "GVN iteration: " << Iteration << "\n"); 1968 ShouldContinue = iterateOnFunction(F); 1969 Changed |= ShouldContinue; 1970 ++Iteration; 1971 } 1972 1973 if (EnablePRE) { 1974 bool PREChanged = true; 1975 while (PREChanged) { 1976 PREChanged = performPRE(F); 1977 Changed |= PREChanged; 1978 } 1979 } 1980 // FIXME: Should perform GVN again after PRE does something. PRE can move 1981 // computations into blocks where they become fully redundant. Note that 1982 // we can't do this until PRE's critical edge splitting updates memdep. 1983 // Actually, when this happens, we should just fully integrate PRE into GVN. 1984 1985 cleanupGlobalSets(); 1986 1987 return Changed; 1988} 1989 1990 1991bool GVN::processBlock(BasicBlock *BB) { 1992 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and 1993 // incrementing BI before processing an instruction). 1994 SmallVector<Instruction*, 8> toErase; 1995 bool ChangedFunction = false; 1996 1997 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 1998 BI != BE;) { 1999 ChangedFunction |= processInstruction(BI, toErase); 2000 if (toErase.empty()) { 2001 ++BI; 2002 continue; 2003 } 2004 2005 // If we need some instructions deleted, do it now. 2006 NumGVNInstr += toErase.size(); 2007 2008 // Avoid iterator invalidation. 2009 bool AtStart = BI == BB->begin(); 2010 if (!AtStart) 2011 --BI; 2012 2013 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(), 2014 E = toErase.end(); I != E; ++I) { 2015 DEBUG(errs() << "GVN removed: " << **I << '\n'); 2016 if (MD) MD->removeInstruction(*I); 2017 (*I)->eraseFromParent(); 2018 DEBUG(verifyRemoved(*I)); 2019 } 2020 toErase.clear(); 2021 2022 if (AtStart) 2023 BI = BB->begin(); 2024 else 2025 ++BI; 2026 } 2027 2028 return ChangedFunction; 2029} 2030 2031/// performPRE - Perform a purely local form of PRE that looks for diamond 2032/// control flow patterns and attempts to perform simple PRE at the join point. 2033bool GVN::performPRE(Function &F) { 2034 bool Changed = false; 2035 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 2036 DenseMap<BasicBlock*, Value*> predMap; 2037 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 2038 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 2039 BasicBlock *CurrentBlock = *DI; 2040 2041 // Nothing to PRE in the entry block. 2042 if (CurrentBlock == &F.getEntryBlock()) continue; 2043 2044 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2045 BE = CurrentBlock->end(); BI != BE; ) { 2046 Instruction *CurInst = BI++; 2047 2048 if (isa<AllocaInst>(CurInst) || 2049 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || 2050 CurInst->getType()->isVoidTy() || 2051 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2052 isa<DbgInfoIntrinsic>(CurInst)) 2053 continue; 2054 2055 uint32_t ValNo = VN.lookup(CurInst); 2056 2057 // Look for the predecessors for PRE opportunities. We're 2058 // only trying to solve the basic diamond case, where 2059 // a value is computed in the successor and one predecessor, 2060 // but not the other. We also explicitly disallow cases 2061 // where the successor is its own predecessor, because they're 2062 // more complicated to get right. 2063 unsigned NumWith = 0; 2064 unsigned NumWithout = 0; 2065 BasicBlock *PREPred = 0; 2066 predMap.clear(); 2067 2068 for (pred_iterator PI = pred_begin(CurrentBlock), 2069 PE = pred_end(CurrentBlock); PI != PE; ++PI) { 2070 // We're not interested in PRE where the block is its 2071 // own predecessor, on in blocks with predecessors 2072 // that are not reachable. 2073 if (*PI == CurrentBlock) { 2074 NumWithout = 2; 2075 break; 2076 } else if (!localAvail.count(*PI)) { 2077 NumWithout = 2; 2078 break; 2079 } 2080 2081 DenseMap<uint32_t, Value*>::iterator predV = 2082 localAvail[*PI]->table.find(ValNo); 2083 if (predV == localAvail[*PI]->table.end()) { 2084 PREPred = *PI; 2085 NumWithout++; 2086 } else if (predV->second == CurInst) { 2087 NumWithout = 2; 2088 } else { 2089 predMap[*PI] = predV->second; 2090 NumWith++; 2091 } 2092 } 2093 2094 // Don't do PRE when it might increase code size, i.e. when 2095 // we would need to insert instructions in more than one pred. 2096 if (NumWithout != 1 || NumWith == 0) 2097 continue; 2098 2099 // Don't do PRE across indirect branch. 2100 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2101 continue; 2102 2103 // We can't do PRE safely on a critical edge, so instead we schedule 2104 // the edge to be split and perform the PRE the next time we iterate 2105 // on the function. 2106 unsigned SuccNum = 0; 2107 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors(); 2108 i != e; ++i) 2109 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) { 2110 SuccNum = i; 2111 break; 2112 } 2113 2114 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2115 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2116 continue; 2117 } 2118 2119 // Instantiate the expression the in predecessor that lacked it. 2120 // Because we are going top-down through the block, all value numbers 2121 // will be available in the predecessor by the time we need them. Any 2122 // that weren't original present will have been instantiated earlier 2123 // in this loop. 2124 Instruction *PREInstr = CurInst->clone(); 2125 bool success = true; 2126 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { 2127 Value *Op = PREInstr->getOperand(i); 2128 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2129 continue; 2130 2131 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) { 2132 PREInstr->setOperand(i, V); 2133 } else { 2134 success = false; 2135 break; 2136 } 2137 } 2138 2139 // Fail out if we encounter an operand that is not available in 2140 // the PRE predecessor. This is typically because of loads which 2141 // are not value numbered precisely. 2142 if (!success) { 2143 delete PREInstr; 2144 DEBUG(verifyRemoved(PREInstr)); 2145 continue; 2146 } 2147 2148 PREInstr->insertBefore(PREPred->getTerminator()); 2149 PREInstr->setName(CurInst->getName() + ".pre"); 2150 predMap[PREPred] = PREInstr; 2151 VN.add(PREInstr, ValNo); 2152 NumGVNPRE++; 2153 2154 // Update the availability map to include the new instruction. 2155 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr)); 2156 2157 // Create a PHI to make the value available in this block. 2158 PHINode* Phi = PHINode::Create(CurInst->getType(), 2159 CurInst->getName() + ".pre-phi", 2160 CurrentBlock->begin()); 2161 for (pred_iterator PI = pred_begin(CurrentBlock), 2162 PE = pred_end(CurrentBlock); PI != PE; ++PI) 2163 Phi->addIncoming(predMap[*PI], *PI); 2164 2165 VN.add(Phi, ValNo); 2166 localAvail[CurrentBlock]->table[ValNo] = Phi; 2167 2168 CurInst->replaceAllUsesWith(Phi); 2169 if (MD && isa<PointerType>(Phi->getType())) 2170 MD->invalidateCachedPointerInfo(Phi); 2171 VN.erase(CurInst); 2172 2173 DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n'); 2174 if (MD) MD->removeInstruction(CurInst); 2175 CurInst->eraseFromParent(); 2176 DEBUG(verifyRemoved(CurInst)); 2177 Changed = true; 2178 } 2179 } 2180 2181 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator 2182 I = toSplit.begin(), E = toSplit.end(); I != E; ++I) 2183 SplitCriticalEdge(I->first, I->second, this); 2184 2185 return Changed || toSplit.size(); 2186} 2187 2188/// iterateOnFunction - Executes one iteration of GVN 2189bool GVN::iterateOnFunction(Function &F) { 2190 cleanupGlobalSets(); 2191 2192 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2193 DE = df_end(DT->getRootNode()); DI != DE; ++DI) { 2194 if (DI->getIDom()) 2195 localAvail[DI->getBlock()] = 2196 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]); 2197 else 2198 localAvail[DI->getBlock()] = new ValueNumberScope(0); 2199 } 2200 2201 // Top-down walk of the dominator tree 2202 bool Changed = false; 2203#if 0 2204 // Needed for value numbering with phi construction to work. 2205 ReversePostOrderTraversal<Function*> RPOT(&F); 2206 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(), 2207 RE = RPOT.end(); RI != RE; ++RI) 2208 Changed |= processBlock(*RI); 2209#else 2210 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()), 2211 DE = df_end(DT->getRootNode()); DI != DE; ++DI) 2212 Changed |= processBlock(DI->getBlock()); 2213#endif 2214 2215 return Changed; 2216} 2217 2218void GVN::cleanupGlobalSets() { 2219 VN.clear(); 2220 2221 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator 2222 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) 2223 delete I->second; 2224 localAvail.clear(); 2225} 2226 2227/// verifyRemoved - Verify that the specified instruction does not occur in our 2228/// internal data structures. 2229void GVN::verifyRemoved(const Instruction *Inst) const { 2230 VN.verifyRemoved(Inst); 2231 2232 // Walk through the value number scope to make sure the instruction isn't 2233 // ferreted away in it. 2234 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator 2235 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) { 2236 const ValueNumberScope *VNS = I->second; 2237 2238 while (VNS) { 2239 for (DenseMap<uint32_t, Value*>::const_iterator 2240 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) { 2241 assert(II->second != Inst && "Inst still in value numbering scope!"); 2242 } 2243 2244 VNS = VNS->parent; 2245 } 2246 } 2247} 2248