GVN.cpp revision a81e241fcc1effa9e82fcd7efdee5f6cacce1c7e
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs global value numbering to eliminate fully redundant
11// instructions.  It also performs simple dead load elimination.
12//
13// Note that this pass does the value numbering itself; it does not use the
14// ValueNumbering analysis passes.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "gvn"
19#include "llvm/Transforms/Scalar.h"
20#include "llvm/BasicBlock.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Function.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/LLVMContext.h"
27#include "llvm/Operator.h"
28#include "llvm/Value.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/DepthFirstIterator.h"
31#include "llvm/ADT/PostOrderIterator.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Analysis/ConstantFolding.h"
37#include "llvm/Analysis/Dominators.h"
38#include "llvm/Analysis/MemoryBuiltins.h"
39#include "llvm/Analysis/MemoryDependenceAnalysis.h"
40#include "llvm/Analysis/PHITransAddr.h"
41#include "llvm/Support/CFG.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/Debug.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/GetElementPtrTypeIterator.h"
46#include "llvm/Support/IRBuilder.h"
47#include "llvm/Support/raw_ostream.h"
48#include "llvm/Target/TargetData.h"
49#include "llvm/Transforms/Utils/BasicBlockUtils.h"
50#include "llvm/Transforms/Utils/Local.h"
51#include "llvm/Transforms/Utils/SSAUpdater.h"
52using namespace llvm;
53
54STATISTIC(NumGVNInstr,  "Number of instructions deleted");
55STATISTIC(NumGVNLoad,   "Number of loads deleted");
56STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
57STATISTIC(NumGVNBlocks, "Number of blocks merged");
58STATISTIC(NumPRELoad,   "Number of loads PRE'd");
59
60static cl::opt<bool> EnablePRE("enable-pre",
61                               cl::init(true), cl::Hidden);
62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
63
64//===----------------------------------------------------------------------===//
65//                         ValueTable Class
66//===----------------------------------------------------------------------===//
67
68/// This class holds the mapping between values and value numbers.  It is used
69/// as an efficient mechanism to determine the expression-wise equivalence of
70/// two values.
71namespace {
72  struct Expression {
73    enum ExpressionOpcode {
74      ADD = Instruction::Add,
75      FADD = Instruction::FAdd,
76      SUB = Instruction::Sub,
77      FSUB = Instruction::FSub,
78      MUL = Instruction::Mul,
79      FMUL = Instruction::FMul,
80      UDIV = Instruction::UDiv,
81      SDIV = Instruction::SDiv,
82      FDIV = Instruction::FDiv,
83      UREM = Instruction::URem,
84      SREM = Instruction::SRem,
85      FREM = Instruction::FRem,
86      SHL = Instruction::Shl,
87      LSHR = Instruction::LShr,
88      ASHR = Instruction::AShr,
89      AND = Instruction::And,
90      OR = Instruction::Or,
91      XOR = Instruction::Xor,
92      TRUNC = Instruction::Trunc,
93      ZEXT = Instruction::ZExt,
94      SEXT = Instruction::SExt,
95      FPTOUI = Instruction::FPToUI,
96      FPTOSI = Instruction::FPToSI,
97      UITOFP = Instruction::UIToFP,
98      SITOFP = Instruction::SIToFP,
99      FPTRUNC = Instruction::FPTrunc,
100      FPEXT = Instruction::FPExt,
101      PTRTOINT = Instruction::PtrToInt,
102      INTTOPTR = Instruction::IntToPtr,
103      BITCAST = Instruction::BitCast,
104      ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
105      ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
106      FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
107      FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
108      FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
109      SHUFFLE, SELECT, GEP, CALL, CONSTANT,
110      INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
111
112    ExpressionOpcode opcode;
113    const Type* type;
114    SmallVector<uint32_t, 4> varargs;
115    Value *function;
116
117    Expression() { }
118    Expression(ExpressionOpcode o) : opcode(o) { }
119
120    bool operator==(const Expression &other) const {
121      if (opcode != other.opcode)
122        return false;
123      else if (opcode == EMPTY || opcode == TOMBSTONE)
124        return true;
125      else if (type != other.type)
126        return false;
127      else if (function != other.function)
128        return false;
129      else {
130        if (varargs.size() != other.varargs.size())
131          return false;
132
133        for (size_t i = 0; i < varargs.size(); ++i)
134          if (varargs[i] != other.varargs[i])
135            return false;
136
137        return true;
138      }
139    }
140
141    bool operator!=(const Expression &other) const {
142      return !(*this == other);
143    }
144  };
145
146  class ValueTable {
147    private:
148      DenseMap<Value*, uint32_t> valueNumbering;
149      DenseMap<Expression, uint32_t> expressionNumbering;
150      AliasAnalysis* AA;
151      MemoryDependenceAnalysis* MD;
152      DominatorTree* DT;
153
154      uint32_t nextValueNumber;
155
156      Expression::ExpressionOpcode getOpcode(CmpInst* C);
157      Expression create_expression(BinaryOperator* BO);
158      Expression create_expression(CmpInst* C);
159      Expression create_expression(ShuffleVectorInst* V);
160      Expression create_expression(ExtractElementInst* C);
161      Expression create_expression(InsertElementInst* V);
162      Expression create_expression(SelectInst* V);
163      Expression create_expression(CastInst* C);
164      Expression create_expression(GetElementPtrInst* G);
165      Expression create_expression(CallInst* C);
166      Expression create_expression(Constant* C);
167      Expression create_expression(ExtractValueInst* C);
168      Expression create_expression(InsertValueInst* C);
169
170      uint32_t lookup_or_add_call(CallInst* C);
171    public:
172      ValueTable() : nextValueNumber(1) { }
173      uint32_t lookup_or_add(Value *V);
174      uint32_t lookup(Value *V) const;
175      void add(Value *V, uint32_t num);
176      void clear();
177      void erase(Value *v);
178      unsigned size();
179      void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
180      AliasAnalysis *getAliasAnalysis() const { return AA; }
181      void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
182      void setDomTree(DominatorTree* D) { DT = D; }
183      uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
184      void verifyRemoved(const Value *) const;
185  };
186}
187
188namespace llvm {
189template <> struct DenseMapInfo<Expression> {
190  static inline Expression getEmptyKey() {
191    return Expression(Expression::EMPTY);
192  }
193
194  static inline Expression getTombstoneKey() {
195    return Expression(Expression::TOMBSTONE);
196  }
197
198  static unsigned getHashValue(const Expression e) {
199    unsigned hash = e.opcode;
200
201    hash = ((unsigned)((uintptr_t)e.type >> 4) ^
202            (unsigned)((uintptr_t)e.type >> 9));
203
204    for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
205         E = e.varargs.end(); I != E; ++I)
206      hash = *I + hash * 37;
207
208    hash = ((unsigned)((uintptr_t)e.function >> 4) ^
209            (unsigned)((uintptr_t)e.function >> 9)) +
210           hash * 37;
211
212    return hash;
213  }
214  static bool isEqual(const Expression &LHS, const Expression &RHS) {
215    return LHS == RHS;
216  }
217};
218
219template <>
220struct isPodLike<Expression> { static const bool value = true; };
221
222}
223
224//===----------------------------------------------------------------------===//
225//                     ValueTable Internal Functions
226//===----------------------------------------------------------------------===//
227
228Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
229  if (isa<ICmpInst>(C)) {
230    switch (C->getPredicate()) {
231    default:  // THIS SHOULD NEVER HAPPEN
232      llvm_unreachable("Comparison with unknown predicate?");
233    case ICmpInst::ICMP_EQ:  return Expression::ICMPEQ;
234    case ICmpInst::ICMP_NE:  return Expression::ICMPNE;
235    case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
236    case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
237    case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
238    case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
239    case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
240    case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
241    case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
242    case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
243    }
244  } else {
245    switch (C->getPredicate()) {
246    default: // THIS SHOULD NEVER HAPPEN
247      llvm_unreachable("Comparison with unknown predicate?");
248    case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
249    case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
250    case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
251    case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
252    case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
253    case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
254    case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
255    case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
256    case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
257    case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
258    case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
259    case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
260    case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
261    case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
262    }
263  }
264}
265
266Expression ValueTable::create_expression(CallInst* C) {
267  Expression e;
268
269  e.type = C->getType();
270  e.function = C->getCalledFunction();
271  e.opcode = Expression::CALL;
272
273  for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
274       I != E; ++I)
275    e.varargs.push_back(lookup_or_add(*I));
276
277  return e;
278}
279
280Expression ValueTable::create_expression(BinaryOperator* BO) {
281  Expression e;
282  e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
283  e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
284  e.function = 0;
285  e.type = BO->getType();
286  e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
287
288  return e;
289}
290
291Expression ValueTable::create_expression(CmpInst* C) {
292  Expression e;
293
294  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
295  e.varargs.push_back(lookup_or_add(C->getOperand(1)));
296  e.function = 0;
297  e.type = C->getType();
298  e.opcode = getOpcode(C);
299
300  return e;
301}
302
303Expression ValueTable::create_expression(CastInst* C) {
304  Expression e;
305
306  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
307  e.function = 0;
308  e.type = C->getType();
309  e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
310
311  return e;
312}
313
314Expression ValueTable::create_expression(ShuffleVectorInst* S) {
315  Expression e;
316
317  e.varargs.push_back(lookup_or_add(S->getOperand(0)));
318  e.varargs.push_back(lookup_or_add(S->getOperand(1)));
319  e.varargs.push_back(lookup_or_add(S->getOperand(2)));
320  e.function = 0;
321  e.type = S->getType();
322  e.opcode = Expression::SHUFFLE;
323
324  return e;
325}
326
327Expression ValueTable::create_expression(ExtractElementInst* E) {
328  Expression e;
329
330  e.varargs.push_back(lookup_or_add(E->getOperand(0)));
331  e.varargs.push_back(lookup_or_add(E->getOperand(1)));
332  e.function = 0;
333  e.type = E->getType();
334  e.opcode = Expression::EXTRACT;
335
336  return e;
337}
338
339Expression ValueTable::create_expression(InsertElementInst* I) {
340  Expression e;
341
342  e.varargs.push_back(lookup_or_add(I->getOperand(0)));
343  e.varargs.push_back(lookup_or_add(I->getOperand(1)));
344  e.varargs.push_back(lookup_or_add(I->getOperand(2)));
345  e.function = 0;
346  e.type = I->getType();
347  e.opcode = Expression::INSERT;
348
349  return e;
350}
351
352Expression ValueTable::create_expression(SelectInst* I) {
353  Expression e;
354
355  e.varargs.push_back(lookup_or_add(I->getCondition()));
356  e.varargs.push_back(lookup_or_add(I->getTrueValue()));
357  e.varargs.push_back(lookup_or_add(I->getFalseValue()));
358  e.function = 0;
359  e.type = I->getType();
360  e.opcode = Expression::SELECT;
361
362  return e;
363}
364
365Expression ValueTable::create_expression(GetElementPtrInst* G) {
366  Expression e;
367
368  e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
369  e.function = 0;
370  e.type = G->getType();
371  e.opcode = Expression::GEP;
372
373  for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
374       I != E; ++I)
375    e.varargs.push_back(lookup_or_add(*I));
376
377  return e;
378}
379
380Expression ValueTable::create_expression(ExtractValueInst* E) {
381  Expression e;
382
383  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
384  for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
385       II != IE; ++II)
386    e.varargs.push_back(*II);
387  e.function = 0;
388  e.type = E->getType();
389  e.opcode = Expression::EXTRACTVALUE;
390
391  return e;
392}
393
394Expression ValueTable::create_expression(InsertValueInst* E) {
395  Expression e;
396
397  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
398  e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
399  for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
400       II != IE; ++II)
401    e.varargs.push_back(*II);
402  e.function = 0;
403  e.type = E->getType();
404  e.opcode = Expression::INSERTVALUE;
405
406  return e;
407}
408
409//===----------------------------------------------------------------------===//
410//                     ValueTable External Functions
411//===----------------------------------------------------------------------===//
412
413/// add - Insert a value into the table with a specified value number.
414void ValueTable::add(Value *V, uint32_t num) {
415  valueNumbering.insert(std::make_pair(V, num));
416}
417
418uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
419  if (AA->doesNotAccessMemory(C)) {
420    Expression exp = create_expression(C);
421    uint32_t& e = expressionNumbering[exp];
422    if (!e) e = nextValueNumber++;
423    valueNumbering[C] = e;
424    return e;
425  } else if (AA->onlyReadsMemory(C)) {
426    Expression exp = create_expression(C);
427    uint32_t& e = expressionNumbering[exp];
428    if (!e) {
429      e = nextValueNumber++;
430      valueNumbering[C] = e;
431      return e;
432    }
433    if (!MD) {
434      e = nextValueNumber++;
435      valueNumbering[C] = e;
436      return e;
437    }
438
439    MemDepResult local_dep = MD->getDependency(C);
440
441    if (!local_dep.isDef() && !local_dep.isNonLocal()) {
442      valueNumbering[C] =  nextValueNumber;
443      return nextValueNumber++;
444    }
445
446    if (local_dep.isDef()) {
447      CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
448
449      if (local_cdep->getNumOperands() != C->getNumOperands()) {
450        valueNumbering[C] = nextValueNumber;
451        return nextValueNumber++;
452      }
453
454      for (unsigned i = 1; i < C->getNumOperands(); ++i) {
455        uint32_t c_vn = lookup_or_add(C->getOperand(i));
456        uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
457        if (c_vn != cd_vn) {
458          valueNumbering[C] = nextValueNumber;
459          return nextValueNumber++;
460        }
461      }
462
463      uint32_t v = lookup_or_add(local_cdep);
464      valueNumbering[C] = v;
465      return v;
466    }
467
468    // Non-local case.
469    const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
470      MD->getNonLocalCallDependency(CallSite(C));
471    // FIXME: call/call dependencies for readonly calls should return def, not
472    // clobber!  Move the checking logic to MemDep!
473    CallInst* cdep = 0;
474
475    // Check to see if we have a single dominating call instruction that is
476    // identical to C.
477    for (unsigned i = 0, e = deps.size(); i != e; ++i) {
478      const NonLocalDepEntry *I = &deps[i];
479      // Ignore non-local dependencies.
480      if (I->getResult().isNonLocal())
481        continue;
482
483      // We don't handle non-depedencies.  If we already have a call, reject
484      // instruction dependencies.
485      if (I->getResult().isClobber() || cdep != 0) {
486        cdep = 0;
487        break;
488      }
489
490      CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
491      // FIXME: All duplicated with non-local case.
492      if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
493        cdep = NonLocalDepCall;
494        continue;
495      }
496
497      cdep = 0;
498      break;
499    }
500
501    if (!cdep) {
502      valueNumbering[C] = nextValueNumber;
503      return nextValueNumber++;
504    }
505
506    if (cdep->getNumOperands() != C->getNumOperands()) {
507      valueNumbering[C] = nextValueNumber;
508      return nextValueNumber++;
509    }
510    for (unsigned i = 1; i < C->getNumOperands(); ++i) {
511      uint32_t c_vn = lookup_or_add(C->getOperand(i));
512      uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
513      if (c_vn != cd_vn) {
514        valueNumbering[C] = nextValueNumber;
515        return nextValueNumber++;
516      }
517    }
518
519    uint32_t v = lookup_or_add(cdep);
520    valueNumbering[C] = v;
521    return v;
522
523  } else {
524    valueNumbering[C] = nextValueNumber;
525    return nextValueNumber++;
526  }
527}
528
529/// lookup_or_add - Returns the value number for the specified value, assigning
530/// it a new number if it did not have one before.
531uint32_t ValueTable::lookup_or_add(Value *V) {
532  DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
533  if (VI != valueNumbering.end())
534    return VI->second;
535
536  if (!isa<Instruction>(V)) {
537    valueNumbering[V] = nextValueNumber;
538    return nextValueNumber++;
539  }
540
541  Instruction* I = cast<Instruction>(V);
542  Expression exp;
543  switch (I->getOpcode()) {
544    case Instruction::Call:
545      return lookup_or_add_call(cast<CallInst>(I));
546    case Instruction::Add:
547    case Instruction::FAdd:
548    case Instruction::Sub:
549    case Instruction::FSub:
550    case Instruction::Mul:
551    case Instruction::FMul:
552    case Instruction::UDiv:
553    case Instruction::SDiv:
554    case Instruction::FDiv:
555    case Instruction::URem:
556    case Instruction::SRem:
557    case Instruction::FRem:
558    case Instruction::Shl:
559    case Instruction::LShr:
560    case Instruction::AShr:
561    case Instruction::And:
562    case Instruction::Or :
563    case Instruction::Xor:
564      exp = create_expression(cast<BinaryOperator>(I));
565      break;
566    case Instruction::ICmp:
567    case Instruction::FCmp:
568      exp = create_expression(cast<CmpInst>(I));
569      break;
570    case Instruction::Trunc:
571    case Instruction::ZExt:
572    case Instruction::SExt:
573    case Instruction::FPToUI:
574    case Instruction::FPToSI:
575    case Instruction::UIToFP:
576    case Instruction::SIToFP:
577    case Instruction::FPTrunc:
578    case Instruction::FPExt:
579    case Instruction::PtrToInt:
580    case Instruction::IntToPtr:
581    case Instruction::BitCast:
582      exp = create_expression(cast<CastInst>(I));
583      break;
584    case Instruction::Select:
585      exp = create_expression(cast<SelectInst>(I));
586      break;
587    case Instruction::ExtractElement:
588      exp = create_expression(cast<ExtractElementInst>(I));
589      break;
590    case Instruction::InsertElement:
591      exp = create_expression(cast<InsertElementInst>(I));
592      break;
593    case Instruction::ShuffleVector:
594      exp = create_expression(cast<ShuffleVectorInst>(I));
595      break;
596    case Instruction::ExtractValue:
597      exp = create_expression(cast<ExtractValueInst>(I));
598      break;
599    case Instruction::InsertValue:
600      exp = create_expression(cast<InsertValueInst>(I));
601      break;
602    case Instruction::GetElementPtr:
603      exp = create_expression(cast<GetElementPtrInst>(I));
604      break;
605    default:
606      valueNumbering[V] = nextValueNumber;
607      return nextValueNumber++;
608  }
609
610  uint32_t& e = expressionNumbering[exp];
611  if (!e) e = nextValueNumber++;
612  valueNumbering[V] = e;
613  return e;
614}
615
616/// lookup - Returns the value number of the specified value. Fails if
617/// the value has not yet been numbered.
618uint32_t ValueTable::lookup(Value *V) const {
619  DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
620  assert(VI != valueNumbering.end() && "Value not numbered?");
621  return VI->second;
622}
623
624/// clear - Remove all entries from the ValueTable
625void ValueTable::clear() {
626  valueNumbering.clear();
627  expressionNumbering.clear();
628  nextValueNumber = 1;
629}
630
631/// erase - Remove a value from the value numbering
632void ValueTable::erase(Value *V) {
633  valueNumbering.erase(V);
634}
635
636/// verifyRemoved - Verify that the value is removed from all internal data
637/// structures.
638void ValueTable::verifyRemoved(const Value *V) const {
639  for (DenseMap<Value*, uint32_t>::const_iterator
640         I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
641    assert(I->first != V && "Inst still occurs in value numbering map!");
642  }
643}
644
645//===----------------------------------------------------------------------===//
646//                                GVN Pass
647//===----------------------------------------------------------------------===//
648
649namespace {
650  struct ValueNumberScope {
651    ValueNumberScope* parent;
652    DenseMap<uint32_t, Value*> table;
653
654    ValueNumberScope(ValueNumberScope* p) : parent(p) { }
655  };
656}
657
658namespace {
659
660  class GVN : public FunctionPass {
661    bool runOnFunction(Function &F);
662  public:
663    static char ID; // Pass identification, replacement for typeid
664    explicit GVN(bool nopre = false, bool noloads = false)
665      : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { }
666
667  private:
668    bool NoPRE;
669    bool NoLoads;
670    MemoryDependenceAnalysis *MD;
671    DominatorTree *DT;
672
673    ValueTable VN;
674    DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
675
676    // This transformation requires dominator postdominator info
677    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
678      AU.addRequired<DominatorTree>();
679      if (!NoLoads)
680        AU.addRequired<MemoryDependenceAnalysis>();
681      AU.addRequired<AliasAnalysis>();
682
683      AU.addPreserved<DominatorTree>();
684      AU.addPreserved<AliasAnalysis>();
685    }
686
687    // Helper fuctions
688    // FIXME: eliminate or document these better
689    bool processLoad(LoadInst* L,
690                     SmallVectorImpl<Instruction*> &toErase);
691    bool processInstruction(Instruction *I,
692                            SmallVectorImpl<Instruction*> &toErase);
693    bool processNonLocalLoad(LoadInst* L,
694                             SmallVectorImpl<Instruction*> &toErase);
695    bool processBlock(BasicBlock *BB);
696    void dump(DenseMap<uint32_t, Value*>& d);
697    bool iterateOnFunction(Function &F);
698    Value *CollapsePhi(PHINode* p);
699    bool performPRE(Function& F);
700    Value *lookupNumber(BasicBlock *BB, uint32_t num);
701    void cleanupGlobalSets();
702    void verifyRemoved(const Instruction *I) const;
703  };
704
705  char GVN::ID = 0;
706}
707
708// createGVNPass - The public interface to this file...
709FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) {
710  return new GVN(NoPRE, NoLoads);
711}
712
713static RegisterPass<GVN> X("gvn",
714                           "Global Value Numbering");
715
716void GVN::dump(DenseMap<uint32_t, Value*>& d) {
717  errs() << "{\n";
718  for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
719       E = d.end(); I != E; ++I) {
720      errs() << I->first << "\n";
721      I->second->dump();
722  }
723  errs() << "}\n";
724}
725
726static bool isSafeReplacement(PHINode* p, Instruction *inst) {
727  if (!isa<PHINode>(inst))
728    return true;
729
730  for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
731       UI != E; ++UI)
732    if (PHINode* use_phi = dyn_cast<PHINode>(UI))
733      if (use_phi->getParent() == inst->getParent())
734        return false;
735
736  return true;
737}
738
739Value *GVN::CollapsePhi(PHINode *PN) {
740  Value *ConstVal = PN->hasConstantValue(DT);
741  if (!ConstVal) return 0;
742
743  Instruction *Inst = dyn_cast<Instruction>(ConstVal);
744  if (!Inst)
745    return ConstVal;
746
747  if (DT->dominates(Inst, PN))
748    if (isSafeReplacement(PN, Inst))
749      return Inst;
750  return 0;
751}
752
753/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
754/// we're analyzing is fully available in the specified block.  As we go, keep
755/// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
756/// map is actually a tri-state map with the following values:
757///   0) we know the block *is not* fully available.
758///   1) we know the block *is* fully available.
759///   2) we do not know whether the block is fully available or not, but we are
760///      currently speculating that it will be.
761///   3) we are speculating for this block and have used that to speculate for
762///      other blocks.
763static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
764                            DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
765  // Optimistically assume that the block is fully available and check to see
766  // if we already know about this block in one lookup.
767  std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
768    FullyAvailableBlocks.insert(std::make_pair(BB, 2));
769
770  // If the entry already existed for this block, return the precomputed value.
771  if (!IV.second) {
772    // If this is a speculative "available" value, mark it as being used for
773    // speculation of other blocks.
774    if (IV.first->second == 2)
775      IV.first->second = 3;
776    return IV.first->second != 0;
777  }
778
779  // Otherwise, see if it is fully available in all predecessors.
780  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
781
782  // If this block has no predecessors, it isn't live-in here.
783  if (PI == PE)
784    goto SpeculationFailure;
785
786  for (; PI != PE; ++PI)
787    // If the value isn't fully available in one of our predecessors, then it
788    // isn't fully available in this block either.  Undo our previous
789    // optimistic assumption and bail out.
790    if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
791      goto SpeculationFailure;
792
793  return true;
794
795// SpeculationFailure - If we get here, we found out that this is not, after
796// all, a fully-available block.  We have a problem if we speculated on this and
797// used the speculation to mark other blocks as available.
798SpeculationFailure:
799  char &BBVal = FullyAvailableBlocks[BB];
800
801  // If we didn't speculate on this, just return with it set to false.
802  if (BBVal == 2) {
803    BBVal = 0;
804    return false;
805  }
806
807  // If we did speculate on this value, we could have blocks set to 1 that are
808  // incorrect.  Walk the (transitive) successors of this block and mark them as
809  // 0 if set to one.
810  SmallVector<BasicBlock*, 32> BBWorklist;
811  BBWorklist.push_back(BB);
812
813  do {
814    BasicBlock *Entry = BBWorklist.pop_back_val();
815    // Note that this sets blocks to 0 (unavailable) if they happen to not
816    // already be in FullyAvailableBlocks.  This is safe.
817    char &EntryVal = FullyAvailableBlocks[Entry];
818    if (EntryVal == 0) continue;  // Already unavailable.
819
820    // Mark as unavailable.
821    EntryVal = 0;
822
823    for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
824      BBWorklist.push_back(*I);
825  } while (!BBWorklist.empty());
826
827  return false;
828}
829
830
831/// CanCoerceMustAliasedValueToLoad - Return true if
832/// CoerceAvailableValueToLoadType will succeed.
833static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
834                                            const Type *LoadTy,
835                                            const TargetData &TD) {
836  // If the loaded or stored value is an first class array or struct, don't try
837  // to transform them.  We need to be able to bitcast to integer.
838  if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) ||
839      isa<StructType>(StoredVal->getType()) ||
840      isa<ArrayType>(StoredVal->getType()))
841    return false;
842
843  // The store has to be at least as big as the load.
844  if (TD.getTypeSizeInBits(StoredVal->getType()) <
845        TD.getTypeSizeInBits(LoadTy))
846    return false;
847
848  return true;
849}
850
851
852/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
853/// then a load from a must-aliased pointer of a different type, try to coerce
854/// the stored value.  LoadedTy is the type of the load we want to replace and
855/// InsertPt is the place to insert new instructions.
856///
857/// If we can't do it, return null.
858static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
859                                             const Type *LoadedTy,
860                                             Instruction *InsertPt,
861                                             const TargetData &TD) {
862  if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
863    return 0;
864
865  const Type *StoredValTy = StoredVal->getType();
866
867  uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
868  uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
869
870  // If the store and reload are the same size, we can always reuse it.
871  if (StoreSize == LoadSize) {
872    if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) {
873      // Pointer to Pointer -> use bitcast.
874      return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
875    }
876
877    // Convert source pointers to integers, which can be bitcast.
878    if (isa<PointerType>(StoredValTy)) {
879      StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
880      StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
881    }
882
883    const Type *TypeToCastTo = LoadedTy;
884    if (isa<PointerType>(TypeToCastTo))
885      TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
886
887    if (StoredValTy != TypeToCastTo)
888      StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
889
890    // Cast to pointer if the load needs a pointer type.
891    if (isa<PointerType>(LoadedTy))
892      StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
893
894    return StoredVal;
895  }
896
897  // If the loaded value is smaller than the available value, then we can
898  // extract out a piece from it.  If the available value is too small, then we
899  // can't do anything.
900  assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
901
902  // Convert source pointers to integers, which can be manipulated.
903  if (isa<PointerType>(StoredValTy)) {
904    StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
905    StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
906  }
907
908  // Convert vectors and fp to integer, which can be manipulated.
909  if (!isa<IntegerType>(StoredValTy)) {
910    StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
911    StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
912  }
913
914  // If this is a big-endian system, we need to shift the value down to the low
915  // bits so that a truncate will work.
916  if (TD.isBigEndian()) {
917    Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
918    StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
919  }
920
921  // Truncate the integer to the right size now.
922  const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
923  StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
924
925  if (LoadedTy == NewIntTy)
926    return StoredVal;
927
928  // If the result is a pointer, inttoptr.
929  if (isa<PointerType>(LoadedTy))
930    return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
931
932  // Otherwise, bitcast.
933  return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
934}
935
936/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
937/// be expressed as a base pointer plus a constant offset.  Return the base and
938/// offset to the caller.
939static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
940                                        const TargetData &TD) {
941  Operator *PtrOp = dyn_cast<Operator>(Ptr);
942  if (PtrOp == 0) return Ptr;
943
944  // Just look through bitcasts.
945  if (PtrOp->getOpcode() == Instruction::BitCast)
946    return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
947
948  // If this is a GEP with constant indices, we can look through it.
949  GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
950  if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
951
952  gep_type_iterator GTI = gep_type_begin(GEP);
953  for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
954       ++I, ++GTI) {
955    ConstantInt *OpC = cast<ConstantInt>(*I);
956    if (OpC->isZero()) continue;
957
958    // Handle a struct and array indices which add their offset to the pointer.
959    if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
960      Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
961    } else {
962      uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
963      Offset += OpC->getSExtValue()*Size;
964    }
965  }
966
967  // Re-sign extend from the pointer size if needed to get overflow edge cases
968  // right.
969  unsigned PtrSize = TD.getPointerSizeInBits();
970  if (PtrSize < 64)
971    Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
972
973  return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
974}
975
976
977/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
978/// memdep query of a load that ends up being a clobbering memory write (store,
979/// memset, memcpy, memmove).  This means that the write *may* provide bits used
980/// by the load but we can't be sure because the pointers don't mustalias.
981///
982/// Check this case to see if there is anything more we can do before we give
983/// up.  This returns -1 if we have to give up, or a byte number in the stored
984/// value of the piece that feeds the load.
985static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
986                                          Value *WritePtr,
987                                          uint64_t WriteSizeInBits,
988                                          const TargetData &TD) {
989  // If the loaded or stored value is an first class array or struct, don't try
990  // to transform them.  We need to be able to bitcast to integer.
991  if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy))
992    return -1;
993
994  int64_t StoreOffset = 0, LoadOffset = 0;
995  Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
996  Value *LoadBase =
997    GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
998  if (StoreBase != LoadBase)
999    return -1;
1000
1001  // If the load and store are to the exact same address, they should have been
1002  // a must alias.  AA must have gotten confused.
1003  // FIXME: Study to see if/when this happens.
1004  if (LoadOffset == StoreOffset) {
1005#if 0
1006    dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1007    << "Base       = " << *StoreBase << "\n"
1008    << "Store Ptr  = " << *WritePtr << "\n"
1009    << "Store Offs = " << StoreOffset << "\n"
1010    << "Load Ptr   = " << *LoadPtr << "\n";
1011    abort();
1012#endif
1013    return -1;
1014  }
1015
1016  // If the load and store don't overlap at all, the store doesn't provide
1017  // anything to the load.  In this case, they really don't alias at all, AA
1018  // must have gotten confused.
1019  // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1020  // remove this check, as it is duplicated with what we have below.
1021  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1022
1023  if ((WriteSizeInBits & 7) | (LoadSize & 7))
1024    return -1;
1025  uint64_t StoreSize = WriteSizeInBits >> 3;  // Convert to bytes.
1026  LoadSize >>= 3;
1027
1028
1029  bool isAAFailure = false;
1030  if (StoreOffset < LoadOffset) {
1031    isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1032  } else {
1033    isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1034  }
1035  if (isAAFailure) {
1036#if 0
1037    dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1038    << "Base       = " << *StoreBase << "\n"
1039    << "Store Ptr  = " << *WritePtr << "\n"
1040    << "Store Offs = " << StoreOffset << "\n"
1041    << "Load Ptr   = " << *LoadPtr << "\n";
1042    abort();
1043#endif
1044    return -1;
1045  }
1046
1047  // If the Load isn't completely contained within the stored bits, we don't
1048  // have all the bits to feed it.  We could do something crazy in the future
1049  // (issue a smaller load then merge the bits in) but this seems unlikely to be
1050  // valuable.
1051  if (StoreOffset > LoadOffset ||
1052      StoreOffset+StoreSize < LoadOffset+LoadSize)
1053    return -1;
1054
1055  // Okay, we can do this transformation.  Return the number of bytes into the
1056  // store that the load is.
1057  return LoadOffset-StoreOffset;
1058}
1059
1060/// AnalyzeLoadFromClobberingStore - This function is called when we have a
1061/// memdep query of a load that ends up being a clobbering store.
1062static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1063                                          StoreInst *DepSI,
1064                                          const TargetData &TD) {
1065  // Cannot handle reading from store of first-class aggregate yet.
1066  if (isa<StructType>(DepSI->getOperand(0)->getType()) ||
1067      isa<ArrayType>(DepSI->getOperand(0)->getType()))
1068    return -1;
1069
1070  Value *StorePtr = DepSI->getPointerOperand();
1071  uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1072  return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1073                                        StorePtr, StoreSize, TD);
1074}
1075
1076static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1077                                            MemIntrinsic *MI,
1078                                            const TargetData &TD) {
1079  // If the mem operation is a non-constant size, we can't handle it.
1080  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1081  if (SizeCst == 0) return -1;
1082  uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1083
1084  // If this is memset, we just need to see if the offset is valid in the size
1085  // of the memset..
1086  if (MI->getIntrinsicID() == Intrinsic::memset)
1087    return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1088                                          MemSizeInBits, TD);
1089
1090  // If we have a memcpy/memmove, the only case we can handle is if this is a
1091  // copy from constant memory.  In that case, we can read directly from the
1092  // constant memory.
1093  MemTransferInst *MTI = cast<MemTransferInst>(MI);
1094
1095  Constant *Src = dyn_cast<Constant>(MTI->getSource());
1096  if (Src == 0) return -1;
1097
1098  GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1099  if (GV == 0 || !GV->isConstant()) return -1;
1100
1101  // See if the access is within the bounds of the transfer.
1102  int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1103                                              MI->getDest(), MemSizeInBits, TD);
1104  if (Offset == -1)
1105    return Offset;
1106
1107  // Otherwise, see if we can constant fold a load from the constant with the
1108  // offset applied as appropriate.
1109  Src = ConstantExpr::getBitCast(Src,
1110                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1111  Constant *OffsetCst =
1112    ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1113  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1114  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1115  if (ConstantFoldLoadFromConstPtr(Src, &TD))
1116    return Offset;
1117  return -1;
1118}
1119
1120
1121/// GetStoreValueForLoad - This function is called when we have a
1122/// memdep query of a load that ends up being a clobbering store.  This means
1123/// that the store *may* provide bits used by the load but we can't be sure
1124/// because the pointers don't mustalias.  Check this case to see if there is
1125/// anything more we can do before we give up.
1126static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1127                                   const Type *LoadTy,
1128                                   Instruction *InsertPt, const TargetData &TD){
1129  LLVMContext &Ctx = SrcVal->getType()->getContext();
1130
1131  uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
1132  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1133
1134  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1135
1136  // Compute which bits of the stored value are being used by the load.  Convert
1137  // to an integer type to start with.
1138  if (isa<PointerType>(SrcVal->getType()))
1139    SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1140  if (!isa<IntegerType>(SrcVal->getType()))
1141    SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1142                                   "tmp");
1143
1144  // Shift the bits to the least significant depending on endianness.
1145  unsigned ShiftAmt;
1146  if (TD.isLittleEndian())
1147    ShiftAmt = Offset*8;
1148  else
1149    ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1150
1151  if (ShiftAmt)
1152    SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1153
1154  if (LoadSize != StoreSize)
1155    SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1156                                 "tmp");
1157
1158  return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1159}
1160
1161/// GetMemInstValueForLoad - This function is called when we have a
1162/// memdep query of a load that ends up being a clobbering mem intrinsic.
1163static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1164                                     const Type *LoadTy, Instruction *InsertPt,
1165                                     const TargetData &TD){
1166  LLVMContext &Ctx = LoadTy->getContext();
1167  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1168
1169  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1170
1171  // We know that this method is only called when the mem transfer fully
1172  // provides the bits for the load.
1173  if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1174    // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1175    // independently of what the offset is.
1176    Value *Val = MSI->getValue();
1177    if (LoadSize != 1)
1178      Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1179
1180    Value *OneElt = Val;
1181
1182    // Splat the value out to the right number of bits.
1183    for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1184      // If we can double the number of bytes set, do it.
1185      if (NumBytesSet*2 <= LoadSize) {
1186        Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1187        Val = Builder.CreateOr(Val, ShVal);
1188        NumBytesSet <<= 1;
1189        continue;
1190      }
1191
1192      // Otherwise insert one byte at a time.
1193      Value *ShVal = Builder.CreateShl(Val, 1*8);
1194      Val = Builder.CreateOr(OneElt, ShVal);
1195      ++NumBytesSet;
1196    }
1197
1198    return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1199  }
1200
1201  // Otherwise, this is a memcpy/memmove from a constant global.
1202  MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1203  Constant *Src = cast<Constant>(MTI->getSource());
1204
1205  // Otherwise, see if we can constant fold a load from the constant with the
1206  // offset applied as appropriate.
1207  Src = ConstantExpr::getBitCast(Src,
1208                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1209  Constant *OffsetCst =
1210  ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1211  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1212  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1213  return ConstantFoldLoadFromConstPtr(Src, &TD);
1214}
1215
1216
1217
1218struct AvailableValueInBlock {
1219  /// BB - The basic block in question.
1220  BasicBlock *BB;
1221  enum ValType {
1222    SimpleVal,  // A simple offsetted value that is accessed.
1223    MemIntrin   // A memory intrinsic which is loaded from.
1224  };
1225
1226  /// V - The value that is live out of the block.
1227  PointerIntPair<Value *, 1, ValType> Val;
1228
1229  /// Offset - The byte offset in Val that is interesting for the load query.
1230  unsigned Offset;
1231
1232  static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1233                                   unsigned Offset = 0) {
1234    AvailableValueInBlock Res;
1235    Res.BB = BB;
1236    Res.Val.setPointer(V);
1237    Res.Val.setInt(SimpleVal);
1238    Res.Offset = Offset;
1239    return Res;
1240  }
1241
1242  static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1243                                     unsigned Offset = 0) {
1244    AvailableValueInBlock Res;
1245    Res.BB = BB;
1246    Res.Val.setPointer(MI);
1247    Res.Val.setInt(MemIntrin);
1248    Res.Offset = Offset;
1249    return Res;
1250  }
1251
1252  bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1253  Value *getSimpleValue() const {
1254    assert(isSimpleValue() && "Wrong accessor");
1255    return Val.getPointer();
1256  }
1257
1258  MemIntrinsic *getMemIntrinValue() const {
1259    assert(!isSimpleValue() && "Wrong accessor");
1260    return cast<MemIntrinsic>(Val.getPointer());
1261  }
1262
1263  /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1264  /// defined here to the specified type.  This handles various coercion cases.
1265  Value *MaterializeAdjustedValue(const Type *LoadTy,
1266                                  const TargetData *TD) const {
1267    Value *Res;
1268    if (isSimpleValue()) {
1269      Res = getSimpleValue();
1270      if (Res->getType() != LoadTy) {
1271        assert(TD && "Need target data to handle type mismatch case");
1272        Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1273                                   *TD);
1274
1275        DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << "  "
1276                     << *getSimpleValue() << '\n'
1277                     << *Res << '\n' << "\n\n\n");
1278      }
1279    } else {
1280      Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1281                                   LoadTy, BB->getTerminator(), *TD);
1282      DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1283                   << "  " << *getMemIntrinValue() << '\n'
1284                   << *Res << '\n' << "\n\n\n");
1285    }
1286    return Res;
1287  }
1288};
1289
1290/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1291/// construct SSA form, allowing us to eliminate LI.  This returns the value
1292/// that should be used at LI's definition site.
1293static Value *ConstructSSAForLoadSet(LoadInst *LI,
1294                         SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1295                                     const TargetData *TD,
1296                                     const DominatorTree &DT,
1297                                     AliasAnalysis *AA) {
1298  // Check for the fully redundant, dominating load case.  In this case, we can
1299  // just use the dominating value directly.
1300  if (ValuesPerBlock.size() == 1 &&
1301      DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1302    return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1303
1304  // Otherwise, we have to construct SSA form.
1305  SmallVector<PHINode*, 8> NewPHIs;
1306  SSAUpdater SSAUpdate(&NewPHIs);
1307  SSAUpdate.Initialize(LI);
1308
1309  const Type *LoadTy = LI->getType();
1310
1311  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1312    const AvailableValueInBlock &AV = ValuesPerBlock[i];
1313    BasicBlock *BB = AV.BB;
1314
1315    if (SSAUpdate.HasValueForBlock(BB))
1316      continue;
1317
1318    SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1319  }
1320
1321  // Perform PHI construction.
1322  Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1323
1324  // If new PHI nodes were created, notify alias analysis.
1325  if (isa<PointerType>(V->getType()))
1326    for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1327      AA->copyValue(LI, NewPHIs[i]);
1328
1329  return V;
1330}
1331
1332static bool isLifetimeStart(Instruction *Inst) {
1333  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1334    return II->getIntrinsicID() == Intrinsic::lifetime_start;
1335  return false;
1336}
1337
1338/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1339/// non-local by performing PHI construction.
1340bool GVN::processNonLocalLoad(LoadInst *LI,
1341                              SmallVectorImpl<Instruction*> &toErase) {
1342  // Find the non-local dependencies of the load.
1343  SmallVector<NonLocalDepResult, 64> Deps;
1344  MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1345                                   Deps);
1346  //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1347  //             << Deps.size() << *LI << '\n');
1348
1349  // If we had to process more than one hundred blocks to find the
1350  // dependencies, this load isn't worth worrying about.  Optimizing
1351  // it will be too expensive.
1352  if (Deps.size() > 100)
1353    return false;
1354
1355  // If we had a phi translation failure, we'll have a single entry which is a
1356  // clobber in the current block.  Reject this early.
1357  if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1358    DEBUG(
1359      dbgs() << "GVN: non-local load ";
1360      WriteAsOperand(dbgs(), LI);
1361      dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1362    );
1363    return false;
1364  }
1365
1366  // Filter out useless results (non-locals, etc).  Keep track of the blocks
1367  // where we have a value available in repl, also keep track of whether we see
1368  // dependencies that produce an unknown value for the load (such as a call
1369  // that could potentially clobber the load).
1370  SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1371  SmallVector<BasicBlock*, 16> UnavailableBlocks;
1372
1373  const TargetData *TD = 0;
1374
1375  for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1376    BasicBlock *DepBB = Deps[i].getBB();
1377    MemDepResult DepInfo = Deps[i].getResult();
1378
1379    if (DepInfo.isClobber()) {
1380      // The address being loaded in this non-local block may not be the same as
1381      // the pointer operand of the load if PHI translation occurs.  Make sure
1382      // to consider the right address.
1383      Value *Address = Deps[i].getAddress();
1384
1385      // If the dependence is to a store that writes to a superset of the bits
1386      // read by the load, we can extract the bits we need for the load from the
1387      // stored value.
1388      if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1389        if (TD == 0)
1390          TD = getAnalysisIfAvailable<TargetData>();
1391        if (TD && Address) {
1392          int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1393                                                      DepSI, *TD);
1394          if (Offset != -1) {
1395            ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1396                                                           DepSI->getOperand(0),
1397                                                                Offset));
1398            continue;
1399          }
1400        }
1401      }
1402
1403      // If the clobbering value is a memset/memcpy/memmove, see if we can
1404      // forward a value on from it.
1405      if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1406        if (TD == 0)
1407          TD = getAnalysisIfAvailable<TargetData>();
1408        if (TD && Address) {
1409          int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1410                                                        DepMI, *TD);
1411          if (Offset != -1) {
1412            ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1413                                                                  Offset));
1414            continue;
1415          }
1416        }
1417      }
1418
1419      UnavailableBlocks.push_back(DepBB);
1420      continue;
1421    }
1422
1423    Instruction *DepInst = DepInfo.getInst();
1424
1425    // Loading the allocation -> undef.
1426    if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1427        // Loading immediately after lifetime begin -> undef.
1428        isLifetimeStart(DepInst)) {
1429      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1430                                             UndefValue::get(LI->getType())));
1431      continue;
1432    }
1433
1434    if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1435      // Reject loads and stores that are to the same address but are of
1436      // different types if we have to.
1437      if (S->getOperand(0)->getType() != LI->getType()) {
1438        if (TD == 0)
1439          TD = getAnalysisIfAvailable<TargetData>();
1440
1441        // If the stored value is larger or equal to the loaded value, we can
1442        // reuse it.
1443        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1444                                                        LI->getType(), *TD)) {
1445          UnavailableBlocks.push_back(DepBB);
1446          continue;
1447        }
1448      }
1449
1450      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1451                                                          S->getOperand(0)));
1452      continue;
1453    }
1454
1455    if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1456      // If the types mismatch and we can't handle it, reject reuse of the load.
1457      if (LD->getType() != LI->getType()) {
1458        if (TD == 0)
1459          TD = getAnalysisIfAvailable<TargetData>();
1460
1461        // If the stored value is larger or equal to the loaded value, we can
1462        // reuse it.
1463        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1464          UnavailableBlocks.push_back(DepBB);
1465          continue;
1466        }
1467      }
1468      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1469      continue;
1470    }
1471
1472    UnavailableBlocks.push_back(DepBB);
1473    continue;
1474  }
1475
1476  // If we have no predecessors that produce a known value for this load, exit
1477  // early.
1478  if (ValuesPerBlock.empty()) return false;
1479
1480  // If all of the instructions we depend on produce a known value for this
1481  // load, then it is fully redundant and we can use PHI insertion to compute
1482  // its value.  Insert PHIs and remove the fully redundant value now.
1483  if (UnavailableBlocks.empty()) {
1484    DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1485
1486    // Perform PHI construction.
1487    Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1488                                      VN.getAliasAnalysis());
1489    LI->replaceAllUsesWith(V);
1490
1491    if (isa<PHINode>(V))
1492      V->takeName(LI);
1493    if (isa<PointerType>(V->getType()))
1494      MD->invalidateCachedPointerInfo(V);
1495    toErase.push_back(LI);
1496    NumGVNLoad++;
1497    return true;
1498  }
1499
1500  if (!EnablePRE || !EnableLoadPRE)
1501    return false;
1502
1503  // Okay, we have *some* definitions of the value.  This means that the value
1504  // is available in some of our (transitive) predecessors.  Lets think about
1505  // doing PRE of this load.  This will involve inserting a new load into the
1506  // predecessor when it's not available.  We could do this in general, but
1507  // prefer to not increase code size.  As such, we only do this when we know
1508  // that we only have to insert *one* load (which means we're basically moving
1509  // the load, not inserting a new one).
1510
1511  SmallPtrSet<BasicBlock *, 4> Blockers;
1512  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1513    Blockers.insert(UnavailableBlocks[i]);
1514
1515  // Lets find first basic block with more than one predecessor.  Walk backwards
1516  // through predecessors if needed.
1517  BasicBlock *LoadBB = LI->getParent();
1518  BasicBlock *TmpBB = LoadBB;
1519
1520  bool isSinglePred = false;
1521  bool allSingleSucc = true;
1522  while (TmpBB->getSinglePredecessor()) {
1523    isSinglePred = true;
1524    TmpBB = TmpBB->getSinglePredecessor();
1525    if (!TmpBB) // If haven't found any, bail now.
1526      return false;
1527    if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1528      return false;
1529    if (Blockers.count(TmpBB))
1530      return false;
1531    if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1532      allSingleSucc = false;
1533  }
1534
1535  assert(TmpBB);
1536  LoadBB = TmpBB;
1537
1538  // If we have a repl set with LI itself in it, this means we have a loop where
1539  // at least one of the values is LI.  Since this means that we won't be able
1540  // to eliminate LI even if we insert uses in the other predecessors, we will
1541  // end up increasing code size.  Reject this by scanning for LI.
1542  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1543    if (ValuesPerBlock[i].isSimpleValue() &&
1544        ValuesPerBlock[i].getSimpleValue() == LI)
1545      return false;
1546
1547  // FIXME: It is extremely unclear what this loop is doing, other than
1548  // artificially restricting loadpre.
1549  if (isSinglePred) {
1550    bool isHot = false;
1551    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1552      const AvailableValueInBlock &AV = ValuesPerBlock[i];
1553      if (AV.isSimpleValue())
1554        // "Hot" Instruction is in some loop (because it dominates its dep.
1555        // instruction).
1556        if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1557          if (DT->dominates(LI, I)) {
1558            isHot = true;
1559            break;
1560          }
1561    }
1562
1563    // We are interested only in "hot" instructions. We don't want to do any
1564    // mis-optimizations here.
1565    if (!isHot)
1566      return false;
1567  }
1568
1569  // Okay, we have some hope :).  Check to see if the loaded value is fully
1570  // available in all but one predecessor.
1571  // FIXME: If we could restructure the CFG, we could make a common pred with
1572  // all the preds that don't have an available LI and insert a new load into
1573  // that one block.
1574  BasicBlock *UnavailablePred = 0;
1575
1576  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1577  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1578    FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1579  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1580    FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1581
1582  for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1583       PI != E; ++PI) {
1584    if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
1585      continue;
1586
1587    // If this load is not available in multiple predecessors, reject it.
1588    if (UnavailablePred && UnavailablePred != *PI)
1589      return false;
1590    UnavailablePred = *PI;
1591  }
1592
1593  assert(UnavailablePred != 0 &&
1594         "Fully available value should be eliminated above!");
1595
1596  // We don't currently handle critical edges :(
1597  if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
1598    DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
1599                 << UnavailablePred->getName() << "': " << *LI << '\n');
1600    return false;
1601  }
1602
1603  // Do PHI translation to get its value in the predecessor if necessary.  The
1604  // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1605  //
1606  SmallVector<Instruction*, 8> NewInsts;
1607
1608  // If all preds have a single successor, then we know it is safe to insert the
1609  // load on the pred (?!?), so we can insert code to materialize the pointer if
1610  // it is not available.
1611  PHITransAddr Address(LI->getOperand(0), TD);
1612  Value *LoadPtr = 0;
1613  if (allSingleSucc) {
1614    LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1615                                                *DT, NewInsts);
1616  } else {
1617    Address.PHITranslateValue(LoadBB, UnavailablePred);
1618    LoadPtr = Address.getAddr();
1619
1620    // Make sure the value is live in the predecessor.
1621    if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr))
1622      if (!DT->dominates(Inst->getParent(), UnavailablePred))
1623        LoadPtr = 0;
1624  }
1625
1626  // If we couldn't find or insert a computation of this phi translated value,
1627  // we fail PRE.
1628  if (LoadPtr == 0) {
1629    assert(NewInsts.empty() && "Shouldn't insert insts on failure");
1630    DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1631                 << *LI->getOperand(0) << "\n");
1632    return false;
1633  }
1634
1635  // Assign value numbers to these new instructions.
1636  for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1637    // FIXME: We really _ought_ to insert these value numbers into their
1638    // parent's availability map.  However, in doing so, we risk getting into
1639    // ordering issues.  If a block hasn't been processed yet, we would be
1640    // marking a value as AVAIL-IN, which isn't what we intend.
1641    VN.lookup_or_add(NewInsts[i]);
1642  }
1643
1644  // Make sure it is valid to move this load here.  We have to watch out for:
1645  //  @1 = getelementptr (i8* p, ...
1646  //  test p and branch if == 0
1647  //  load @1
1648  // It is valid to have the getelementptr before the test, even if p can be 0,
1649  // as getelementptr only does address arithmetic.
1650  // If we are not pushing the value through any multiple-successor blocks
1651  // we do not have this case.  Otherwise, check that the load is safe to
1652  // put anywhere; this can be improved, but should be conservatively safe.
1653  if (!allSingleSucc &&
1654      // FIXME: REEVALUTE THIS.
1655      !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) {
1656    assert(NewInsts.empty() && "Should not have inserted instructions");
1657    return false;
1658  }
1659
1660  // Okay, we can eliminate this load by inserting a reload in the predecessor
1661  // and using PHI construction to get the value in the other predecessors, do
1662  // it.
1663  DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1664  DEBUG(if (!NewInsts.empty())
1665          dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1666                 << *NewInsts.back() << '\n');
1667
1668  Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1669                                LI->getAlignment(),
1670                                UnavailablePred->getTerminator());
1671
1672  // Add the newly created load.
1673  ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad));
1674
1675  // Perform PHI construction.
1676  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1677                                    VN.getAliasAnalysis());
1678  LI->replaceAllUsesWith(V);
1679  if (isa<PHINode>(V))
1680    V->takeName(LI);
1681  if (isa<PointerType>(V->getType()))
1682    MD->invalidateCachedPointerInfo(V);
1683  toErase.push_back(LI);
1684  NumPRELoad++;
1685  return true;
1686}
1687
1688/// processLoad - Attempt to eliminate a load, first by eliminating it
1689/// locally, and then attempting non-local elimination if that fails.
1690bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1691  if (!MD)
1692    return false;
1693
1694  if (L->isVolatile())
1695    return false;
1696
1697  // ... to a pointer that has been loaded from before...
1698  MemDepResult Dep = MD->getDependency(L);
1699
1700  // If the value isn't available, don't do anything!
1701  if (Dep.isClobber()) {
1702    // Check to see if we have something like this:
1703    //   store i32 123, i32* %P
1704    //   %A = bitcast i32* %P to i8*
1705    //   %B = gep i8* %A, i32 1
1706    //   %C = load i8* %B
1707    //
1708    // We could do that by recognizing if the clobber instructions are obviously
1709    // a common base + constant offset, and if the previous store (or memset)
1710    // completely covers this load.  This sort of thing can happen in bitfield
1711    // access code.
1712    Value *AvailVal = 0;
1713    if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1714      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1715        int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1716                                                    L->getPointerOperand(),
1717                                                    DepSI, *TD);
1718        if (Offset != -1)
1719          AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1720                                          L->getType(), L, *TD);
1721      }
1722
1723    // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1724    // a value on from it.
1725    if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1726      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1727        int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1728                                                      L->getPointerOperand(),
1729                                                      DepMI, *TD);
1730        if (Offset != -1)
1731          AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1732      }
1733    }
1734
1735    if (AvailVal) {
1736      DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1737            << *AvailVal << '\n' << *L << "\n\n\n");
1738
1739      // Replace the load!
1740      L->replaceAllUsesWith(AvailVal);
1741      if (isa<PointerType>(AvailVal->getType()))
1742        MD->invalidateCachedPointerInfo(AvailVal);
1743      toErase.push_back(L);
1744      NumGVNLoad++;
1745      return true;
1746    }
1747
1748    DEBUG(
1749      // fast print dep, using operator<< on instruction would be too slow
1750      dbgs() << "GVN: load ";
1751      WriteAsOperand(dbgs(), L);
1752      Instruction *I = Dep.getInst();
1753      dbgs() << " is clobbered by " << *I << '\n';
1754    );
1755    return false;
1756  }
1757
1758  // If it is defined in another block, try harder.
1759  if (Dep.isNonLocal())
1760    return processNonLocalLoad(L, toErase);
1761
1762  Instruction *DepInst = Dep.getInst();
1763  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1764    Value *StoredVal = DepSI->getOperand(0);
1765
1766    // The store and load are to a must-aliased pointer, but they may not
1767    // actually have the same type.  See if we know how to reuse the stored
1768    // value (depending on its type).
1769    const TargetData *TD = 0;
1770    if (StoredVal->getType() != L->getType()) {
1771      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1772        StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1773                                                   L, *TD);
1774        if (StoredVal == 0)
1775          return false;
1776
1777        DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1778                     << '\n' << *L << "\n\n\n");
1779      }
1780      else
1781        return false;
1782    }
1783
1784    // Remove it!
1785    L->replaceAllUsesWith(StoredVal);
1786    if (isa<PointerType>(StoredVal->getType()))
1787      MD->invalidateCachedPointerInfo(StoredVal);
1788    toErase.push_back(L);
1789    NumGVNLoad++;
1790    return true;
1791  }
1792
1793  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1794    Value *AvailableVal = DepLI;
1795
1796    // The loads are of a must-aliased pointer, but they may not actually have
1797    // the same type.  See if we know how to reuse the previously loaded value
1798    // (depending on its type).
1799    const TargetData *TD = 0;
1800    if (DepLI->getType() != L->getType()) {
1801      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1802        AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1803        if (AvailableVal == 0)
1804          return false;
1805
1806        DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1807                     << "\n" << *L << "\n\n\n");
1808      }
1809      else
1810        return false;
1811    }
1812
1813    // Remove it!
1814    L->replaceAllUsesWith(AvailableVal);
1815    if (isa<PointerType>(DepLI->getType()))
1816      MD->invalidateCachedPointerInfo(DepLI);
1817    toErase.push_back(L);
1818    NumGVNLoad++;
1819    return true;
1820  }
1821
1822  // If this load really doesn't depend on anything, then we must be loading an
1823  // undef value.  This can happen when loading for a fresh allocation with no
1824  // intervening stores, for example.
1825  if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1826    L->replaceAllUsesWith(UndefValue::get(L->getType()));
1827    toErase.push_back(L);
1828    NumGVNLoad++;
1829    return true;
1830  }
1831
1832  // If this load occurs either right after a lifetime begin,
1833  // then the loaded value is undefined.
1834  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1835    if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1836      L->replaceAllUsesWith(UndefValue::get(L->getType()));
1837      toErase.push_back(L);
1838      NumGVNLoad++;
1839      return true;
1840    }
1841  }
1842
1843  return false;
1844}
1845
1846Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1847  DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1848  if (I == localAvail.end())
1849    return 0;
1850
1851  ValueNumberScope *Locals = I->second;
1852  while (Locals) {
1853    DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1854    if (I != Locals->table.end())
1855      return I->second;
1856    Locals = Locals->parent;
1857  }
1858
1859  return 0;
1860}
1861
1862
1863/// processInstruction - When calculating availability, handle an instruction
1864/// by inserting it into the appropriate sets
1865bool GVN::processInstruction(Instruction *I,
1866                             SmallVectorImpl<Instruction*> &toErase) {
1867  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1868    bool Changed = processLoad(LI, toErase);
1869
1870    if (!Changed) {
1871      unsigned Num = VN.lookup_or_add(LI);
1872      localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1873    }
1874
1875    return Changed;
1876  }
1877
1878  uint32_t NextNum = VN.getNextUnusedValueNumber();
1879  unsigned Num = VN.lookup_or_add(I);
1880
1881  if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1882    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1883
1884    if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1885      return false;
1886
1887    Value *BranchCond = BI->getCondition();
1888    uint32_t CondVN = VN.lookup_or_add(BranchCond);
1889
1890    BasicBlock *TrueSucc = BI->getSuccessor(0);
1891    BasicBlock *FalseSucc = BI->getSuccessor(1);
1892
1893    if (TrueSucc->getSinglePredecessor())
1894      localAvail[TrueSucc]->table[CondVN] =
1895        ConstantInt::getTrue(TrueSucc->getContext());
1896    if (FalseSucc->getSinglePredecessor())
1897      localAvail[FalseSucc]->table[CondVN] =
1898        ConstantInt::getFalse(TrueSucc->getContext());
1899
1900    return false;
1901
1902  // Allocations are always uniquely numbered, so we can save time and memory
1903  // by fast failing them.
1904  } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1905    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1906    return false;
1907  }
1908
1909  // Collapse PHI nodes
1910  if (PHINode* p = dyn_cast<PHINode>(I)) {
1911    Value *constVal = CollapsePhi(p);
1912
1913    if (constVal) {
1914      p->replaceAllUsesWith(constVal);
1915      if (MD && isa<PointerType>(constVal->getType()))
1916        MD->invalidateCachedPointerInfo(constVal);
1917      VN.erase(p);
1918
1919      toErase.push_back(p);
1920    } else {
1921      localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1922    }
1923
1924  // If the number we were assigned was a brand new VN, then we don't
1925  // need to do a lookup to see if the number already exists
1926  // somewhere in the domtree: it can't!
1927  } else if (Num == NextNum) {
1928    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1929
1930  // Perform fast-path value-number based elimination of values inherited from
1931  // dominators.
1932  } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1933    // Remove it!
1934    VN.erase(I);
1935    I->replaceAllUsesWith(repl);
1936    if (MD && isa<PointerType>(repl->getType()))
1937      MD->invalidateCachedPointerInfo(repl);
1938    toErase.push_back(I);
1939    return true;
1940
1941  } else {
1942    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1943  }
1944
1945  return false;
1946}
1947
1948/// runOnFunction - This is the main transformation entry point for a function.
1949bool GVN::runOnFunction(Function& F) {
1950  if (!NoLoads)
1951    MD = &getAnalysis<MemoryDependenceAnalysis>();
1952  DT = &getAnalysis<DominatorTree>();
1953  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1954  VN.setMemDep(MD);
1955  VN.setDomTree(DT);
1956
1957  bool Changed = false;
1958  bool ShouldContinue = true;
1959
1960  // Merge unconditional branches, allowing PRE to catch more
1961  // optimization opportunities.
1962  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1963    BasicBlock *BB = FI;
1964    ++FI;
1965    bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1966    if (removedBlock) NumGVNBlocks++;
1967
1968    Changed |= removedBlock;
1969  }
1970
1971  unsigned Iteration = 0;
1972
1973  while (ShouldContinue) {
1974    DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
1975    ShouldContinue = iterateOnFunction(F);
1976    Changed |= ShouldContinue;
1977    ++Iteration;
1978  }
1979
1980  if (EnablePRE) {
1981    bool PREChanged = true;
1982    while (PREChanged) {
1983      PREChanged = performPRE(F);
1984      Changed |= PREChanged;
1985    }
1986  }
1987  // FIXME: Should perform GVN again after PRE does something.  PRE can move
1988  // computations into blocks where they become fully redundant.  Note that
1989  // we can't do this until PRE's critical edge splitting updates memdep.
1990  // Actually, when this happens, we should just fully integrate PRE into GVN.
1991
1992  cleanupGlobalSets();
1993
1994  return Changed;
1995}
1996
1997
1998bool GVN::processBlock(BasicBlock *BB) {
1999  // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2000  // incrementing BI before processing an instruction).
2001  SmallVector<Instruction*, 8> toErase;
2002  bool ChangedFunction = false;
2003
2004  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2005       BI != BE;) {
2006    ChangedFunction |= processInstruction(BI, toErase);
2007    if (toErase.empty()) {
2008      ++BI;
2009      continue;
2010    }
2011
2012    // If we need some instructions deleted, do it now.
2013    NumGVNInstr += toErase.size();
2014
2015    // Avoid iterator invalidation.
2016    bool AtStart = BI == BB->begin();
2017    if (!AtStart)
2018      --BI;
2019
2020    for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2021         E = toErase.end(); I != E; ++I) {
2022      DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2023      if (MD) MD->removeInstruction(*I);
2024      (*I)->eraseFromParent();
2025      DEBUG(verifyRemoved(*I));
2026    }
2027    toErase.clear();
2028
2029    if (AtStart)
2030      BI = BB->begin();
2031    else
2032      ++BI;
2033  }
2034
2035  return ChangedFunction;
2036}
2037
2038/// performPRE - Perform a purely local form of PRE that looks for diamond
2039/// control flow patterns and attempts to perform simple PRE at the join point.
2040bool GVN::performPRE(Function &F) {
2041  bool Changed = false;
2042  SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
2043  DenseMap<BasicBlock*, Value*> predMap;
2044  for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2045       DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2046    BasicBlock *CurrentBlock = *DI;
2047
2048    // Nothing to PRE in the entry block.
2049    if (CurrentBlock == &F.getEntryBlock()) continue;
2050
2051    for (BasicBlock::iterator BI = CurrentBlock->begin(),
2052         BE = CurrentBlock->end(); BI != BE; ) {
2053      Instruction *CurInst = BI++;
2054
2055      if (isa<AllocaInst>(CurInst) ||
2056          isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2057          CurInst->getType()->isVoidTy() ||
2058          CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2059          isa<DbgInfoIntrinsic>(CurInst))
2060        continue;
2061
2062      uint32_t ValNo = VN.lookup(CurInst);
2063
2064      // Look for the predecessors for PRE opportunities.  We're
2065      // only trying to solve the basic diamond case, where
2066      // a value is computed in the successor and one predecessor,
2067      // but not the other.  We also explicitly disallow cases
2068      // where the successor is its own predecessor, because they're
2069      // more complicated to get right.
2070      unsigned NumWith = 0;
2071      unsigned NumWithout = 0;
2072      BasicBlock *PREPred = 0;
2073      predMap.clear();
2074
2075      for (pred_iterator PI = pred_begin(CurrentBlock),
2076           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2077        // We're not interested in PRE where the block is its
2078        // own predecessor, on in blocks with predecessors
2079        // that are not reachable.
2080        if (*PI == CurrentBlock) {
2081          NumWithout = 2;
2082          break;
2083        } else if (!localAvail.count(*PI))  {
2084          NumWithout = 2;
2085          break;
2086        }
2087
2088        DenseMap<uint32_t, Value*>::iterator predV =
2089                                            localAvail[*PI]->table.find(ValNo);
2090        if (predV == localAvail[*PI]->table.end()) {
2091          PREPred = *PI;
2092          NumWithout++;
2093        } else if (predV->second == CurInst) {
2094          NumWithout = 2;
2095        } else {
2096          predMap[*PI] = predV->second;
2097          NumWith++;
2098        }
2099      }
2100
2101      // Don't do PRE when it might increase code size, i.e. when
2102      // we would need to insert instructions in more than one pred.
2103      if (NumWithout != 1 || NumWith == 0)
2104        continue;
2105
2106      // Don't do PRE across indirect branch.
2107      if (isa<IndirectBrInst>(PREPred->getTerminator()))
2108        continue;
2109
2110      // We can't do PRE safely on a critical edge, so instead we schedule
2111      // the edge to be split and perform the PRE the next time we iterate
2112      // on the function.
2113      unsigned SuccNum = 0;
2114      for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
2115           i != e; ++i)
2116        if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
2117          SuccNum = i;
2118          break;
2119        }
2120
2121      if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2122        toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2123        continue;
2124      }
2125
2126      // Instantiate the expression the in predecessor that lacked it.
2127      // Because we are going top-down through the block, all value numbers
2128      // will be available in the predecessor by the time we need them.  Any
2129      // that weren't original present will have been instantiated earlier
2130      // in this loop.
2131      Instruction *PREInstr = CurInst->clone();
2132      bool success = true;
2133      for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2134        Value *Op = PREInstr->getOperand(i);
2135        if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2136          continue;
2137
2138        if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2139          PREInstr->setOperand(i, V);
2140        } else {
2141          success = false;
2142          break;
2143        }
2144      }
2145
2146      // Fail out if we encounter an operand that is not available in
2147      // the PRE predecessor.  This is typically because of loads which
2148      // are not value numbered precisely.
2149      if (!success) {
2150        delete PREInstr;
2151        DEBUG(verifyRemoved(PREInstr));
2152        continue;
2153      }
2154
2155      PREInstr->insertBefore(PREPred->getTerminator());
2156      PREInstr->setName(CurInst->getName() + ".pre");
2157      predMap[PREPred] = PREInstr;
2158      VN.add(PREInstr, ValNo);
2159      NumGVNPRE++;
2160
2161      // Update the availability map to include the new instruction.
2162      localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2163
2164      // Create a PHI to make the value available in this block.
2165      PHINode* Phi = PHINode::Create(CurInst->getType(),
2166                                     CurInst->getName() + ".pre-phi",
2167                                     CurrentBlock->begin());
2168      for (pred_iterator PI = pred_begin(CurrentBlock),
2169           PE = pred_end(CurrentBlock); PI != PE; ++PI)
2170        Phi->addIncoming(predMap[*PI], *PI);
2171
2172      VN.add(Phi, ValNo);
2173      localAvail[CurrentBlock]->table[ValNo] = Phi;
2174
2175      CurInst->replaceAllUsesWith(Phi);
2176      if (MD && isa<PointerType>(Phi->getType()))
2177        MD->invalidateCachedPointerInfo(Phi);
2178      VN.erase(CurInst);
2179
2180      DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2181      if (MD) MD->removeInstruction(CurInst);
2182      CurInst->eraseFromParent();
2183      DEBUG(verifyRemoved(CurInst));
2184      Changed = true;
2185    }
2186  }
2187
2188  for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
2189       I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
2190    SplitCriticalEdge(I->first, I->second, this);
2191
2192  return Changed || toSplit.size();
2193}
2194
2195/// iterateOnFunction - Executes one iteration of GVN
2196bool GVN::iterateOnFunction(Function &F) {
2197  cleanupGlobalSets();
2198
2199  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2200       DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2201    if (DI->getIDom())
2202      localAvail[DI->getBlock()] =
2203                   new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2204    else
2205      localAvail[DI->getBlock()] = new ValueNumberScope(0);
2206  }
2207
2208  // Top-down walk of the dominator tree
2209  bool Changed = false;
2210#if 0
2211  // Needed for value numbering with phi construction to work.
2212  ReversePostOrderTraversal<Function*> RPOT(&F);
2213  for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2214       RE = RPOT.end(); RI != RE; ++RI)
2215    Changed |= processBlock(*RI);
2216#else
2217  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2218       DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2219    Changed |= processBlock(DI->getBlock());
2220#endif
2221
2222  return Changed;
2223}
2224
2225void GVN::cleanupGlobalSets() {
2226  VN.clear();
2227
2228  for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2229       I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2230    delete I->second;
2231  localAvail.clear();
2232}
2233
2234/// verifyRemoved - Verify that the specified instruction does not occur in our
2235/// internal data structures.
2236void GVN::verifyRemoved(const Instruction *Inst) const {
2237  VN.verifyRemoved(Inst);
2238
2239  // Walk through the value number scope to make sure the instruction isn't
2240  // ferreted away in it.
2241  for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2242         I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2243    const ValueNumberScope *VNS = I->second;
2244
2245    while (VNS) {
2246      for (DenseMap<uint32_t, Value*>::const_iterator
2247             II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2248        assert(II->second != Inst && "Inst still in value numbering scope!");
2249      }
2250
2251      VNS = VNS->parent;
2252    }
2253  }
2254}
2255