GVN.cpp revision 1df9859c40492511b8aa4321eb76496005d3b75b
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs global value numbering to eliminate fully redundant
11// instructions.  It also performs simple dead load elimination.
12//
13// Note that this pass does the value numbering itself; it does not use the
14// ValueNumbering analysis passes.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "gvn"
19#include "llvm/Transforms/Scalar.h"
20#include "llvm/BasicBlock.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Function.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/LLVMContext.h"
27#include "llvm/Operator.h"
28#include "llvm/Value.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/DepthFirstIterator.h"
31#include "llvm/ADT/PostOrderIterator.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Analysis/ConstantFolding.h"
37#include "llvm/Analysis/Dominators.h"
38#include "llvm/Analysis/MemoryBuiltins.h"
39#include "llvm/Analysis/MemoryDependenceAnalysis.h"
40#include "llvm/Analysis/PHITransAddr.h"
41#include "llvm/Support/CFG.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/Debug.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/GetElementPtrTypeIterator.h"
46#include "llvm/Support/IRBuilder.h"
47#include "llvm/Support/raw_ostream.h"
48#include "llvm/Target/TargetData.h"
49#include "llvm/Transforms/Utils/BasicBlockUtils.h"
50#include "llvm/Transforms/Utils/Local.h"
51#include "llvm/Transforms/Utils/SSAUpdater.h"
52using namespace llvm;
53
54STATISTIC(NumGVNInstr,  "Number of instructions deleted");
55STATISTIC(NumGVNLoad,   "Number of loads deleted");
56STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
57STATISTIC(NumGVNBlocks, "Number of blocks merged");
58STATISTIC(NumPRELoad,   "Number of loads PRE'd");
59
60static cl::opt<bool> EnablePRE("enable-pre",
61                               cl::init(true), cl::Hidden);
62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
63static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
64
65//===----------------------------------------------------------------------===//
66//                         ValueTable Class
67//===----------------------------------------------------------------------===//
68
69/// This class holds the mapping between values and value numbers.  It is used
70/// as an efficient mechanism to determine the expression-wise equivalence of
71/// two values.
72namespace {
73  struct Expression {
74    enum ExpressionOpcode {
75      ADD = Instruction::Add,
76      FADD = Instruction::FAdd,
77      SUB = Instruction::Sub,
78      FSUB = Instruction::FSub,
79      MUL = Instruction::Mul,
80      FMUL = Instruction::FMul,
81      UDIV = Instruction::UDiv,
82      SDIV = Instruction::SDiv,
83      FDIV = Instruction::FDiv,
84      UREM = Instruction::URem,
85      SREM = Instruction::SRem,
86      FREM = Instruction::FRem,
87      SHL = Instruction::Shl,
88      LSHR = Instruction::LShr,
89      ASHR = Instruction::AShr,
90      AND = Instruction::And,
91      OR = Instruction::Or,
92      XOR = Instruction::Xor,
93      TRUNC = Instruction::Trunc,
94      ZEXT = Instruction::ZExt,
95      SEXT = Instruction::SExt,
96      FPTOUI = Instruction::FPToUI,
97      FPTOSI = Instruction::FPToSI,
98      UITOFP = Instruction::UIToFP,
99      SITOFP = Instruction::SIToFP,
100      FPTRUNC = Instruction::FPTrunc,
101      FPEXT = Instruction::FPExt,
102      PTRTOINT = Instruction::PtrToInt,
103      INTTOPTR = Instruction::IntToPtr,
104      BITCAST = Instruction::BitCast,
105      ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
106      ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
107      FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
108      FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
109      FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
110      SHUFFLE, SELECT, GEP, CALL, CONSTANT,
111      INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
112
113    ExpressionOpcode opcode;
114    const Type* type;
115    SmallVector<uint32_t, 4> varargs;
116    Value *function;
117
118    Expression() { }
119    Expression(ExpressionOpcode o) : opcode(o) { }
120
121    bool operator==(const Expression &other) const {
122      if (opcode != other.opcode)
123        return false;
124      else if (opcode == EMPTY || opcode == TOMBSTONE)
125        return true;
126      else if (type != other.type)
127        return false;
128      else if (function != other.function)
129        return false;
130      else {
131        if (varargs.size() != other.varargs.size())
132          return false;
133
134        for (size_t i = 0; i < varargs.size(); ++i)
135          if (varargs[i] != other.varargs[i])
136            return false;
137
138        return true;
139      }
140    }
141
142    bool operator!=(const Expression &other) const {
143      return !(*this == other);
144    }
145  };
146
147  class ValueTable {
148    private:
149      DenseMap<Value*, uint32_t> valueNumbering;
150      DenseMap<Expression, uint32_t> expressionNumbering;
151      AliasAnalysis* AA;
152      MemoryDependenceAnalysis* MD;
153      DominatorTree* DT;
154
155      uint32_t nextValueNumber;
156
157      Expression::ExpressionOpcode getOpcode(CmpInst* C);
158      Expression create_expression(BinaryOperator* BO);
159      Expression create_expression(CmpInst* C);
160      Expression create_expression(ShuffleVectorInst* V);
161      Expression create_expression(ExtractElementInst* C);
162      Expression create_expression(InsertElementInst* V);
163      Expression create_expression(SelectInst* V);
164      Expression create_expression(CastInst* C);
165      Expression create_expression(GetElementPtrInst* G);
166      Expression create_expression(CallInst* C);
167      Expression create_expression(Constant* C);
168      Expression create_expression(ExtractValueInst* C);
169      Expression create_expression(InsertValueInst* C);
170
171      uint32_t lookup_or_add_call(CallInst* C);
172    public:
173      ValueTable() : nextValueNumber(1) { }
174      uint32_t lookup_or_add(Value *V);
175      uint32_t lookup(Value *V) const;
176      void add(Value *V, uint32_t num);
177      void clear();
178      void erase(Value *v);
179      unsigned size();
180      void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
181      AliasAnalysis *getAliasAnalysis() const { return AA; }
182      void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
183      void setDomTree(DominatorTree* D) { DT = D; }
184      uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
185      void verifyRemoved(const Value *) const;
186  };
187}
188
189namespace llvm {
190template <> struct DenseMapInfo<Expression> {
191  static inline Expression getEmptyKey() {
192    return Expression(Expression::EMPTY);
193  }
194
195  static inline Expression getTombstoneKey() {
196    return Expression(Expression::TOMBSTONE);
197  }
198
199  static unsigned getHashValue(const Expression e) {
200    unsigned hash = e.opcode;
201
202    hash = ((unsigned)((uintptr_t)e.type >> 4) ^
203            (unsigned)((uintptr_t)e.type >> 9));
204
205    for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
206         E = e.varargs.end(); I != E; ++I)
207      hash = *I + hash * 37;
208
209    hash = ((unsigned)((uintptr_t)e.function >> 4) ^
210            (unsigned)((uintptr_t)e.function >> 9)) +
211           hash * 37;
212
213    return hash;
214  }
215  static bool isEqual(const Expression &LHS, const Expression &RHS) {
216    return LHS == RHS;
217  }
218};
219
220template <>
221struct isPodLike<Expression> { static const bool value = true; };
222
223}
224
225//===----------------------------------------------------------------------===//
226//                     ValueTable Internal Functions
227//===----------------------------------------------------------------------===//
228
229Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
230  if (isa<ICmpInst>(C)) {
231    switch (C->getPredicate()) {
232    default:  // THIS SHOULD NEVER HAPPEN
233      llvm_unreachable("Comparison with unknown predicate?");
234    case ICmpInst::ICMP_EQ:  return Expression::ICMPEQ;
235    case ICmpInst::ICMP_NE:  return Expression::ICMPNE;
236    case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
237    case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
238    case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
239    case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
240    case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
241    case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
242    case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
243    case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
244    }
245  } else {
246    switch (C->getPredicate()) {
247    default: // THIS SHOULD NEVER HAPPEN
248      llvm_unreachable("Comparison with unknown predicate?");
249    case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
250    case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
251    case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
252    case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
253    case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
254    case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
255    case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
256    case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
257    case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
258    case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
259    case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
260    case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
261    case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
262    case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
263    }
264  }
265}
266
267Expression ValueTable::create_expression(CallInst* C) {
268  Expression e;
269
270  e.type = C->getType();
271  e.function = C->getCalledFunction();
272  e.opcode = Expression::CALL;
273
274  for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
275       I != E; ++I)
276    e.varargs.push_back(lookup_or_add(*I));
277
278  return e;
279}
280
281Expression ValueTable::create_expression(BinaryOperator* BO) {
282  Expression e;
283  e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
284  e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
285  e.function = 0;
286  e.type = BO->getType();
287  e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
288
289  return e;
290}
291
292Expression ValueTable::create_expression(CmpInst* C) {
293  Expression e;
294
295  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
296  e.varargs.push_back(lookup_or_add(C->getOperand(1)));
297  e.function = 0;
298  e.type = C->getType();
299  e.opcode = getOpcode(C);
300
301  return e;
302}
303
304Expression ValueTable::create_expression(CastInst* C) {
305  Expression e;
306
307  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
308  e.function = 0;
309  e.type = C->getType();
310  e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
311
312  return e;
313}
314
315Expression ValueTable::create_expression(ShuffleVectorInst* S) {
316  Expression e;
317
318  e.varargs.push_back(lookup_or_add(S->getOperand(0)));
319  e.varargs.push_back(lookup_or_add(S->getOperand(1)));
320  e.varargs.push_back(lookup_or_add(S->getOperand(2)));
321  e.function = 0;
322  e.type = S->getType();
323  e.opcode = Expression::SHUFFLE;
324
325  return e;
326}
327
328Expression ValueTable::create_expression(ExtractElementInst* E) {
329  Expression e;
330
331  e.varargs.push_back(lookup_or_add(E->getOperand(0)));
332  e.varargs.push_back(lookup_or_add(E->getOperand(1)));
333  e.function = 0;
334  e.type = E->getType();
335  e.opcode = Expression::EXTRACT;
336
337  return e;
338}
339
340Expression ValueTable::create_expression(InsertElementInst* I) {
341  Expression e;
342
343  e.varargs.push_back(lookup_or_add(I->getOperand(0)));
344  e.varargs.push_back(lookup_or_add(I->getOperand(1)));
345  e.varargs.push_back(lookup_or_add(I->getOperand(2)));
346  e.function = 0;
347  e.type = I->getType();
348  e.opcode = Expression::INSERT;
349
350  return e;
351}
352
353Expression ValueTable::create_expression(SelectInst* I) {
354  Expression e;
355
356  e.varargs.push_back(lookup_or_add(I->getCondition()));
357  e.varargs.push_back(lookup_or_add(I->getTrueValue()));
358  e.varargs.push_back(lookup_or_add(I->getFalseValue()));
359  e.function = 0;
360  e.type = I->getType();
361  e.opcode = Expression::SELECT;
362
363  return e;
364}
365
366Expression ValueTable::create_expression(GetElementPtrInst* G) {
367  Expression e;
368
369  e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
370  e.function = 0;
371  e.type = G->getType();
372  e.opcode = Expression::GEP;
373
374  for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
375       I != E; ++I)
376    e.varargs.push_back(lookup_or_add(*I));
377
378  return e;
379}
380
381Expression ValueTable::create_expression(ExtractValueInst* E) {
382  Expression e;
383
384  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
385  for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
386       II != IE; ++II)
387    e.varargs.push_back(*II);
388  e.function = 0;
389  e.type = E->getType();
390  e.opcode = Expression::EXTRACTVALUE;
391
392  return e;
393}
394
395Expression ValueTable::create_expression(InsertValueInst* E) {
396  Expression e;
397
398  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
399  e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
400  for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
401       II != IE; ++II)
402    e.varargs.push_back(*II);
403  e.function = 0;
404  e.type = E->getType();
405  e.opcode = Expression::INSERTVALUE;
406
407  return e;
408}
409
410//===----------------------------------------------------------------------===//
411//                     ValueTable External Functions
412//===----------------------------------------------------------------------===//
413
414/// add - Insert a value into the table with a specified value number.
415void ValueTable::add(Value *V, uint32_t num) {
416  valueNumbering.insert(std::make_pair(V, num));
417}
418
419uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
420  if (AA->doesNotAccessMemory(C)) {
421    Expression exp = create_expression(C);
422    uint32_t& e = expressionNumbering[exp];
423    if (!e) e = nextValueNumber++;
424    valueNumbering[C] = e;
425    return e;
426  } else if (AA->onlyReadsMemory(C)) {
427    Expression exp = create_expression(C);
428    uint32_t& e = expressionNumbering[exp];
429    if (!e) {
430      e = nextValueNumber++;
431      valueNumbering[C] = e;
432      return e;
433    }
434    if (!MD) {
435      e = nextValueNumber++;
436      valueNumbering[C] = e;
437      return e;
438    }
439
440    MemDepResult local_dep = MD->getDependency(C);
441
442    if (!local_dep.isDef() && !local_dep.isNonLocal()) {
443      valueNumbering[C] =  nextValueNumber;
444      return nextValueNumber++;
445    }
446
447    if (local_dep.isDef()) {
448      CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
449
450      if (local_cdep->getNumOperands() != C->getNumOperands()) {
451        valueNumbering[C] = nextValueNumber;
452        return nextValueNumber++;
453      }
454
455      for (unsigned i = 1; i < C->getNumOperands(); ++i) {
456        uint32_t c_vn = lookup_or_add(C->getOperand(i));
457        uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
458        if (c_vn != cd_vn) {
459          valueNumbering[C] = nextValueNumber;
460          return nextValueNumber++;
461        }
462      }
463
464      uint32_t v = lookup_or_add(local_cdep);
465      valueNumbering[C] = v;
466      return v;
467    }
468
469    // Non-local case.
470    const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
471      MD->getNonLocalCallDependency(CallSite(C));
472    // FIXME: call/call dependencies for readonly calls should return def, not
473    // clobber!  Move the checking logic to MemDep!
474    CallInst* cdep = 0;
475
476    // Check to see if we have a single dominating call instruction that is
477    // identical to C.
478    for (unsigned i = 0, e = deps.size(); i != e; ++i) {
479      const NonLocalDepEntry *I = &deps[i];
480      // Ignore non-local dependencies.
481      if (I->getResult().isNonLocal())
482        continue;
483
484      // We don't handle non-depedencies.  If we already have a call, reject
485      // instruction dependencies.
486      if (I->getResult().isClobber() || cdep != 0) {
487        cdep = 0;
488        break;
489      }
490
491      CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
492      // FIXME: All duplicated with non-local case.
493      if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
494        cdep = NonLocalDepCall;
495        continue;
496      }
497
498      cdep = 0;
499      break;
500    }
501
502    if (!cdep) {
503      valueNumbering[C] = nextValueNumber;
504      return nextValueNumber++;
505    }
506
507    if (cdep->getNumOperands() != C->getNumOperands()) {
508      valueNumbering[C] = nextValueNumber;
509      return nextValueNumber++;
510    }
511    for (unsigned i = 1; i < C->getNumOperands(); ++i) {
512      uint32_t c_vn = lookup_or_add(C->getOperand(i));
513      uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
514      if (c_vn != cd_vn) {
515        valueNumbering[C] = nextValueNumber;
516        return nextValueNumber++;
517      }
518    }
519
520    uint32_t v = lookup_or_add(cdep);
521    valueNumbering[C] = v;
522    return v;
523
524  } else {
525    valueNumbering[C] = nextValueNumber;
526    return nextValueNumber++;
527  }
528}
529
530/// lookup_or_add - Returns the value number for the specified value, assigning
531/// it a new number if it did not have one before.
532uint32_t ValueTable::lookup_or_add(Value *V) {
533  DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
534  if (VI != valueNumbering.end())
535    return VI->second;
536
537  if (!isa<Instruction>(V)) {
538    valueNumbering[V] = nextValueNumber;
539    return nextValueNumber++;
540  }
541
542  Instruction* I = cast<Instruction>(V);
543  Expression exp;
544  switch (I->getOpcode()) {
545    case Instruction::Call:
546      return lookup_or_add_call(cast<CallInst>(I));
547    case Instruction::Add:
548    case Instruction::FAdd:
549    case Instruction::Sub:
550    case Instruction::FSub:
551    case Instruction::Mul:
552    case Instruction::FMul:
553    case Instruction::UDiv:
554    case Instruction::SDiv:
555    case Instruction::FDiv:
556    case Instruction::URem:
557    case Instruction::SRem:
558    case Instruction::FRem:
559    case Instruction::Shl:
560    case Instruction::LShr:
561    case Instruction::AShr:
562    case Instruction::And:
563    case Instruction::Or :
564    case Instruction::Xor:
565      exp = create_expression(cast<BinaryOperator>(I));
566      break;
567    case Instruction::ICmp:
568    case Instruction::FCmp:
569      exp = create_expression(cast<CmpInst>(I));
570      break;
571    case Instruction::Trunc:
572    case Instruction::ZExt:
573    case Instruction::SExt:
574    case Instruction::FPToUI:
575    case Instruction::FPToSI:
576    case Instruction::UIToFP:
577    case Instruction::SIToFP:
578    case Instruction::FPTrunc:
579    case Instruction::FPExt:
580    case Instruction::PtrToInt:
581    case Instruction::IntToPtr:
582    case Instruction::BitCast:
583      exp = create_expression(cast<CastInst>(I));
584      break;
585    case Instruction::Select:
586      exp = create_expression(cast<SelectInst>(I));
587      break;
588    case Instruction::ExtractElement:
589      exp = create_expression(cast<ExtractElementInst>(I));
590      break;
591    case Instruction::InsertElement:
592      exp = create_expression(cast<InsertElementInst>(I));
593      break;
594    case Instruction::ShuffleVector:
595      exp = create_expression(cast<ShuffleVectorInst>(I));
596      break;
597    case Instruction::ExtractValue:
598      exp = create_expression(cast<ExtractValueInst>(I));
599      break;
600    case Instruction::InsertValue:
601      exp = create_expression(cast<InsertValueInst>(I));
602      break;
603    case Instruction::GetElementPtr:
604      exp = create_expression(cast<GetElementPtrInst>(I));
605      break;
606    default:
607      valueNumbering[V] = nextValueNumber;
608      return nextValueNumber++;
609  }
610
611  uint32_t& e = expressionNumbering[exp];
612  if (!e) e = nextValueNumber++;
613  valueNumbering[V] = e;
614  return e;
615}
616
617/// lookup - Returns the value number of the specified value. Fails if
618/// the value has not yet been numbered.
619uint32_t ValueTable::lookup(Value *V) const {
620  DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
621  assert(VI != valueNumbering.end() && "Value not numbered?");
622  return VI->second;
623}
624
625/// clear - Remove all entries from the ValueTable
626void ValueTable::clear() {
627  valueNumbering.clear();
628  expressionNumbering.clear();
629  nextValueNumber = 1;
630}
631
632/// erase - Remove a value from the value numbering
633void ValueTable::erase(Value *V) {
634  valueNumbering.erase(V);
635}
636
637/// verifyRemoved - Verify that the value is removed from all internal data
638/// structures.
639void ValueTable::verifyRemoved(const Value *V) const {
640  for (DenseMap<Value*, uint32_t>::const_iterator
641         I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
642    assert(I->first != V && "Inst still occurs in value numbering map!");
643  }
644}
645
646//===----------------------------------------------------------------------===//
647//                                GVN Pass
648//===----------------------------------------------------------------------===//
649
650namespace {
651  struct ValueNumberScope {
652    ValueNumberScope* parent;
653    DenseMap<uint32_t, Value*> table;
654
655    ValueNumberScope(ValueNumberScope* p) : parent(p) { }
656  };
657}
658
659namespace {
660
661  class GVN : public FunctionPass {
662    bool runOnFunction(Function &F);
663  public:
664    static char ID; // Pass identification, replacement for typeid
665    explicit GVN(bool nopre = false, bool noloads = false)
666      : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { }
667
668  private:
669    bool NoPRE;
670    bool NoLoads;
671    MemoryDependenceAnalysis *MD;
672    DominatorTree *DT;
673
674    ValueTable VN;
675    DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
676
677    // This transformation requires dominator postdominator info
678    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
679      AU.addRequired<DominatorTree>();
680      if (!NoLoads)
681        AU.addRequired<MemoryDependenceAnalysis>();
682      AU.addRequired<AliasAnalysis>();
683
684      AU.addPreserved<DominatorTree>();
685      AU.addPreserved<AliasAnalysis>();
686    }
687
688    // Helper fuctions
689    // FIXME: eliminate or document these better
690    bool processLoad(LoadInst* L,
691                     SmallVectorImpl<Instruction*> &toErase);
692    bool processInstruction(Instruction *I,
693                            SmallVectorImpl<Instruction*> &toErase);
694    bool processNonLocalLoad(LoadInst* L,
695                             SmallVectorImpl<Instruction*> &toErase);
696    bool processBlock(BasicBlock *BB);
697    void dump(DenseMap<uint32_t, Value*>& d);
698    bool iterateOnFunction(Function &F);
699    Value *CollapsePhi(PHINode* p);
700    bool performPRE(Function& F);
701    Value *lookupNumber(BasicBlock *BB, uint32_t num);
702    void cleanupGlobalSets();
703    void verifyRemoved(const Instruction *I) const;
704  };
705
706  char GVN::ID = 0;
707}
708
709// createGVNPass - The public interface to this file...
710FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) {
711  return new GVN(NoPRE, NoLoads);
712}
713
714static RegisterPass<GVN> X("gvn",
715                           "Global Value Numbering");
716
717void GVN::dump(DenseMap<uint32_t, Value*>& d) {
718  errs() << "{\n";
719  for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
720       E = d.end(); I != E; ++I) {
721      errs() << I->first << "\n";
722      I->second->dump();
723  }
724  errs() << "}\n";
725}
726
727static bool isSafeReplacement(PHINode* p, Instruction *inst) {
728  if (!isa<PHINode>(inst))
729    return true;
730
731  for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
732       UI != E; ++UI)
733    if (PHINode* use_phi = dyn_cast<PHINode>(UI))
734      if (use_phi->getParent() == inst->getParent())
735        return false;
736
737  return true;
738}
739
740Value *GVN::CollapsePhi(PHINode *PN) {
741  Value *ConstVal = PN->hasConstantValue(DT);
742  if (!ConstVal) return 0;
743
744  Instruction *Inst = dyn_cast<Instruction>(ConstVal);
745  if (!Inst)
746    return ConstVal;
747
748  if (DT->dominates(Inst, PN))
749    if (isSafeReplacement(PN, Inst))
750      return Inst;
751  return 0;
752}
753
754/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
755/// we're analyzing is fully available in the specified block.  As we go, keep
756/// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
757/// map is actually a tri-state map with the following values:
758///   0) we know the block *is not* fully available.
759///   1) we know the block *is* fully available.
760///   2) we do not know whether the block is fully available or not, but we are
761///      currently speculating that it will be.
762///   3) we are speculating for this block and have used that to speculate for
763///      other blocks.
764static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
765                            DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
766  // Optimistically assume that the block is fully available and check to see
767  // if we already know about this block in one lookup.
768  std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
769    FullyAvailableBlocks.insert(std::make_pair(BB, 2));
770
771  // If the entry already existed for this block, return the precomputed value.
772  if (!IV.second) {
773    // If this is a speculative "available" value, mark it as being used for
774    // speculation of other blocks.
775    if (IV.first->second == 2)
776      IV.first->second = 3;
777    return IV.first->second != 0;
778  }
779
780  // Otherwise, see if it is fully available in all predecessors.
781  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
782
783  // If this block has no predecessors, it isn't live-in here.
784  if (PI == PE)
785    goto SpeculationFailure;
786
787  for (; PI != PE; ++PI)
788    // If the value isn't fully available in one of our predecessors, then it
789    // isn't fully available in this block either.  Undo our previous
790    // optimistic assumption and bail out.
791    if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
792      goto SpeculationFailure;
793
794  return true;
795
796// SpeculationFailure - If we get here, we found out that this is not, after
797// all, a fully-available block.  We have a problem if we speculated on this and
798// used the speculation to mark other blocks as available.
799SpeculationFailure:
800  char &BBVal = FullyAvailableBlocks[BB];
801
802  // If we didn't speculate on this, just return with it set to false.
803  if (BBVal == 2) {
804    BBVal = 0;
805    return false;
806  }
807
808  // If we did speculate on this value, we could have blocks set to 1 that are
809  // incorrect.  Walk the (transitive) successors of this block and mark them as
810  // 0 if set to one.
811  SmallVector<BasicBlock*, 32> BBWorklist;
812  BBWorklist.push_back(BB);
813
814  do {
815    BasicBlock *Entry = BBWorklist.pop_back_val();
816    // Note that this sets blocks to 0 (unavailable) if they happen to not
817    // already be in FullyAvailableBlocks.  This is safe.
818    char &EntryVal = FullyAvailableBlocks[Entry];
819    if (EntryVal == 0) continue;  // Already unavailable.
820
821    // Mark as unavailable.
822    EntryVal = 0;
823
824    for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
825      BBWorklist.push_back(*I);
826  } while (!BBWorklist.empty());
827
828  return false;
829}
830
831
832/// CanCoerceMustAliasedValueToLoad - Return true if
833/// CoerceAvailableValueToLoadType will succeed.
834static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
835                                            const Type *LoadTy,
836                                            const TargetData &TD) {
837  // If the loaded or stored value is an first class array or struct, don't try
838  // to transform them.  We need to be able to bitcast to integer.
839  if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
840      StoredVal->getType()->isStructTy() ||
841      StoredVal->getType()->isArrayTy())
842    return false;
843
844  // The store has to be at least as big as the load.
845  if (TD.getTypeSizeInBits(StoredVal->getType()) <
846        TD.getTypeSizeInBits(LoadTy))
847    return false;
848
849  return true;
850}
851
852
853/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
854/// then a load from a must-aliased pointer of a different type, try to coerce
855/// the stored value.  LoadedTy is the type of the load we want to replace and
856/// InsertPt is the place to insert new instructions.
857///
858/// If we can't do it, return null.
859static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
860                                             const Type *LoadedTy,
861                                             Instruction *InsertPt,
862                                             const TargetData &TD) {
863  if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
864    return 0;
865
866  const Type *StoredValTy = StoredVal->getType();
867
868  uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
869  uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
870
871  // If the store and reload are the same size, we can always reuse it.
872  if (StoreSize == LoadSize) {
873    if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
874      // Pointer to Pointer -> use bitcast.
875      return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
876    }
877
878    // Convert source pointers to integers, which can be bitcast.
879    if (StoredValTy->isPointerTy()) {
880      StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
881      StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
882    }
883
884    const Type *TypeToCastTo = LoadedTy;
885    if (TypeToCastTo->isPointerTy())
886      TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
887
888    if (StoredValTy != TypeToCastTo)
889      StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
890
891    // Cast to pointer if the load needs a pointer type.
892    if (LoadedTy->isPointerTy())
893      StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
894
895    return StoredVal;
896  }
897
898  // If the loaded value is smaller than the available value, then we can
899  // extract out a piece from it.  If the available value is too small, then we
900  // can't do anything.
901  assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
902
903  // Convert source pointers to integers, which can be manipulated.
904  if (StoredValTy->isPointerTy()) {
905    StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
906    StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
907  }
908
909  // Convert vectors and fp to integer, which can be manipulated.
910  if (!StoredValTy->isIntegerTy()) {
911    StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
912    StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
913  }
914
915  // If this is a big-endian system, we need to shift the value down to the low
916  // bits so that a truncate will work.
917  if (TD.isBigEndian()) {
918    Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
919    StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
920  }
921
922  // Truncate the integer to the right size now.
923  const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
924  StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
925
926  if (LoadedTy == NewIntTy)
927    return StoredVal;
928
929  // If the result is a pointer, inttoptr.
930  if (LoadedTy->isPointerTy())
931    return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
932
933  // Otherwise, bitcast.
934  return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
935}
936
937/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
938/// be expressed as a base pointer plus a constant offset.  Return the base and
939/// offset to the caller.
940static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
941                                        const TargetData &TD) {
942  Operator *PtrOp = dyn_cast<Operator>(Ptr);
943  if (PtrOp == 0) return Ptr;
944
945  // Just look through bitcasts.
946  if (PtrOp->getOpcode() == Instruction::BitCast)
947    return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
948
949  // If this is a GEP with constant indices, we can look through it.
950  GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
951  if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
952
953  gep_type_iterator GTI = gep_type_begin(GEP);
954  for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
955       ++I, ++GTI) {
956    ConstantInt *OpC = cast<ConstantInt>(*I);
957    if (OpC->isZero()) continue;
958
959    // Handle a struct and array indices which add their offset to the pointer.
960    if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
961      Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
962    } else {
963      uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
964      Offset += OpC->getSExtValue()*Size;
965    }
966  }
967
968  // Re-sign extend from the pointer size if needed to get overflow edge cases
969  // right.
970  unsigned PtrSize = TD.getPointerSizeInBits();
971  if (PtrSize < 64)
972    Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
973
974  return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
975}
976
977
978/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
979/// memdep query of a load that ends up being a clobbering memory write (store,
980/// memset, memcpy, memmove).  This means that the write *may* provide bits used
981/// by the load but we can't be sure because the pointers don't mustalias.
982///
983/// Check this case to see if there is anything more we can do before we give
984/// up.  This returns -1 if we have to give up, or a byte number in the stored
985/// value of the piece that feeds the load.
986static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
987                                          Value *WritePtr,
988                                          uint64_t WriteSizeInBits,
989                                          const TargetData &TD) {
990  // If the loaded or stored value is an first class array or struct, don't try
991  // to transform them.  We need to be able to bitcast to integer.
992  if (LoadTy->isStructTy() || LoadTy->isArrayTy())
993    return -1;
994
995  int64_t StoreOffset = 0, LoadOffset = 0;
996  Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
997  Value *LoadBase =
998    GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
999  if (StoreBase != LoadBase)
1000    return -1;
1001
1002  // If the load and store are to the exact same address, they should have been
1003  // a must alias.  AA must have gotten confused.
1004  // FIXME: Study to see if/when this happens.
1005  if (LoadOffset == StoreOffset) {
1006#if 0
1007    dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1008    << "Base       = " << *StoreBase << "\n"
1009    << "Store Ptr  = " << *WritePtr << "\n"
1010    << "Store Offs = " << StoreOffset << "\n"
1011    << "Load Ptr   = " << *LoadPtr << "\n";
1012    abort();
1013#endif
1014    return -1;
1015  }
1016
1017  // If the load and store don't overlap at all, the store doesn't provide
1018  // anything to the load.  In this case, they really don't alias at all, AA
1019  // must have gotten confused.
1020  // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1021  // remove this check, as it is duplicated with what we have below.
1022  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1023
1024  if ((WriteSizeInBits & 7) | (LoadSize & 7))
1025    return -1;
1026  uint64_t StoreSize = WriteSizeInBits >> 3;  // Convert to bytes.
1027  LoadSize >>= 3;
1028
1029
1030  bool isAAFailure = false;
1031  if (StoreOffset < LoadOffset) {
1032    isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1033  } else {
1034    isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1035  }
1036  if (isAAFailure) {
1037#if 0
1038    dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1039    << "Base       = " << *StoreBase << "\n"
1040    << "Store Ptr  = " << *WritePtr << "\n"
1041    << "Store Offs = " << StoreOffset << "\n"
1042    << "Load Ptr   = " << *LoadPtr << "\n";
1043    abort();
1044#endif
1045    return -1;
1046  }
1047
1048  // If the Load isn't completely contained within the stored bits, we don't
1049  // have all the bits to feed it.  We could do something crazy in the future
1050  // (issue a smaller load then merge the bits in) but this seems unlikely to be
1051  // valuable.
1052  if (StoreOffset > LoadOffset ||
1053      StoreOffset+StoreSize < LoadOffset+LoadSize)
1054    return -1;
1055
1056  // Okay, we can do this transformation.  Return the number of bytes into the
1057  // store that the load is.
1058  return LoadOffset-StoreOffset;
1059}
1060
1061/// AnalyzeLoadFromClobberingStore - This function is called when we have a
1062/// memdep query of a load that ends up being a clobbering store.
1063static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1064                                          StoreInst *DepSI,
1065                                          const TargetData &TD) {
1066  // Cannot handle reading from store of first-class aggregate yet.
1067  if (DepSI->getOperand(0)->getType()->isStructTy() ||
1068      DepSI->getOperand(0)->getType()->isArrayTy())
1069    return -1;
1070
1071  Value *StorePtr = DepSI->getPointerOperand();
1072  uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1073  return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1074                                        StorePtr, StoreSize, TD);
1075}
1076
1077static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1078                                            MemIntrinsic *MI,
1079                                            const TargetData &TD) {
1080  // If the mem operation is a non-constant size, we can't handle it.
1081  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1082  if (SizeCst == 0) return -1;
1083  uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1084
1085  // If this is memset, we just need to see if the offset is valid in the size
1086  // of the memset..
1087  if (MI->getIntrinsicID() == Intrinsic::memset)
1088    return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1089                                          MemSizeInBits, TD);
1090
1091  // If we have a memcpy/memmove, the only case we can handle is if this is a
1092  // copy from constant memory.  In that case, we can read directly from the
1093  // constant memory.
1094  MemTransferInst *MTI = cast<MemTransferInst>(MI);
1095
1096  Constant *Src = dyn_cast<Constant>(MTI->getSource());
1097  if (Src == 0) return -1;
1098
1099  GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1100  if (GV == 0 || !GV->isConstant()) return -1;
1101
1102  // See if the access is within the bounds of the transfer.
1103  int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1104                                              MI->getDest(), MemSizeInBits, TD);
1105  if (Offset == -1)
1106    return Offset;
1107
1108  // Otherwise, see if we can constant fold a load from the constant with the
1109  // offset applied as appropriate.
1110  Src = ConstantExpr::getBitCast(Src,
1111                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1112  Constant *OffsetCst =
1113    ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1114  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1115  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1116  if (ConstantFoldLoadFromConstPtr(Src, &TD))
1117    return Offset;
1118  return -1;
1119}
1120
1121
1122/// GetStoreValueForLoad - This function is called when we have a
1123/// memdep query of a load that ends up being a clobbering store.  This means
1124/// that the store *may* provide bits used by the load but we can't be sure
1125/// because the pointers don't mustalias.  Check this case to see if there is
1126/// anything more we can do before we give up.
1127static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1128                                   const Type *LoadTy,
1129                                   Instruction *InsertPt, const TargetData &TD){
1130  LLVMContext &Ctx = SrcVal->getType()->getContext();
1131
1132  uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
1133  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1134
1135  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1136
1137  // Compute which bits of the stored value are being used by the load.  Convert
1138  // to an integer type to start with.
1139  if (SrcVal->getType()->isPointerTy())
1140    SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1141  if (!SrcVal->getType()->isIntegerTy())
1142    SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1143                                   "tmp");
1144
1145  // Shift the bits to the least significant depending on endianness.
1146  unsigned ShiftAmt;
1147  if (TD.isLittleEndian())
1148    ShiftAmt = Offset*8;
1149  else
1150    ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1151
1152  if (ShiftAmt)
1153    SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1154
1155  if (LoadSize != StoreSize)
1156    SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1157                                 "tmp");
1158
1159  return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1160}
1161
1162/// GetMemInstValueForLoad - This function is called when we have a
1163/// memdep query of a load that ends up being a clobbering mem intrinsic.
1164static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1165                                     const Type *LoadTy, Instruction *InsertPt,
1166                                     const TargetData &TD){
1167  LLVMContext &Ctx = LoadTy->getContext();
1168  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1169
1170  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1171
1172  // We know that this method is only called when the mem transfer fully
1173  // provides the bits for the load.
1174  if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1175    // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1176    // independently of what the offset is.
1177    Value *Val = MSI->getValue();
1178    if (LoadSize != 1)
1179      Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1180
1181    Value *OneElt = Val;
1182
1183    // Splat the value out to the right number of bits.
1184    for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1185      // If we can double the number of bytes set, do it.
1186      if (NumBytesSet*2 <= LoadSize) {
1187        Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1188        Val = Builder.CreateOr(Val, ShVal);
1189        NumBytesSet <<= 1;
1190        continue;
1191      }
1192
1193      // Otherwise insert one byte at a time.
1194      Value *ShVal = Builder.CreateShl(Val, 1*8);
1195      Val = Builder.CreateOr(OneElt, ShVal);
1196      ++NumBytesSet;
1197    }
1198
1199    return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1200  }
1201
1202  // Otherwise, this is a memcpy/memmove from a constant global.
1203  MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1204  Constant *Src = cast<Constant>(MTI->getSource());
1205
1206  // Otherwise, see if we can constant fold a load from the constant with the
1207  // offset applied as appropriate.
1208  Src = ConstantExpr::getBitCast(Src,
1209                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1210  Constant *OffsetCst =
1211  ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1212  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1213  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1214  return ConstantFoldLoadFromConstPtr(Src, &TD);
1215}
1216
1217
1218
1219struct AvailableValueInBlock {
1220  /// BB - The basic block in question.
1221  BasicBlock *BB;
1222  enum ValType {
1223    SimpleVal,  // A simple offsetted value that is accessed.
1224    MemIntrin   // A memory intrinsic which is loaded from.
1225  };
1226
1227  /// V - The value that is live out of the block.
1228  PointerIntPair<Value *, 1, ValType> Val;
1229
1230  /// Offset - The byte offset in Val that is interesting for the load query.
1231  unsigned Offset;
1232
1233  static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1234                                   unsigned Offset = 0) {
1235    AvailableValueInBlock Res;
1236    Res.BB = BB;
1237    Res.Val.setPointer(V);
1238    Res.Val.setInt(SimpleVal);
1239    Res.Offset = Offset;
1240    return Res;
1241  }
1242
1243  static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1244                                     unsigned Offset = 0) {
1245    AvailableValueInBlock Res;
1246    Res.BB = BB;
1247    Res.Val.setPointer(MI);
1248    Res.Val.setInt(MemIntrin);
1249    Res.Offset = Offset;
1250    return Res;
1251  }
1252
1253  bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1254  Value *getSimpleValue() const {
1255    assert(isSimpleValue() && "Wrong accessor");
1256    return Val.getPointer();
1257  }
1258
1259  MemIntrinsic *getMemIntrinValue() const {
1260    assert(!isSimpleValue() && "Wrong accessor");
1261    return cast<MemIntrinsic>(Val.getPointer());
1262  }
1263
1264  /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1265  /// defined here to the specified type.  This handles various coercion cases.
1266  Value *MaterializeAdjustedValue(const Type *LoadTy,
1267                                  const TargetData *TD) const {
1268    Value *Res;
1269    if (isSimpleValue()) {
1270      Res = getSimpleValue();
1271      if (Res->getType() != LoadTy) {
1272        assert(TD && "Need target data to handle type mismatch case");
1273        Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1274                                   *TD);
1275
1276        DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << "  "
1277                     << *getSimpleValue() << '\n'
1278                     << *Res << '\n' << "\n\n\n");
1279      }
1280    } else {
1281      Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1282                                   LoadTy, BB->getTerminator(), *TD);
1283      DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1284                   << "  " << *getMemIntrinValue() << '\n'
1285                   << *Res << '\n' << "\n\n\n");
1286    }
1287    return Res;
1288  }
1289};
1290
1291/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1292/// construct SSA form, allowing us to eliminate LI.  This returns the value
1293/// that should be used at LI's definition site.
1294static Value *ConstructSSAForLoadSet(LoadInst *LI,
1295                         SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1296                                     const TargetData *TD,
1297                                     const DominatorTree &DT,
1298                                     AliasAnalysis *AA) {
1299  // Check for the fully redundant, dominating load case.  In this case, we can
1300  // just use the dominating value directly.
1301  if (ValuesPerBlock.size() == 1 &&
1302      DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1303    return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1304
1305  // Otherwise, we have to construct SSA form.
1306  SmallVector<PHINode*, 8> NewPHIs;
1307  SSAUpdater SSAUpdate(&NewPHIs);
1308  SSAUpdate.Initialize(LI);
1309
1310  const Type *LoadTy = LI->getType();
1311
1312  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1313    const AvailableValueInBlock &AV = ValuesPerBlock[i];
1314    BasicBlock *BB = AV.BB;
1315
1316    if (SSAUpdate.HasValueForBlock(BB))
1317      continue;
1318
1319    SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1320  }
1321
1322  // Perform PHI construction.
1323  Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1324
1325  // If new PHI nodes were created, notify alias analysis.
1326  if (V->getType()->isPointerTy())
1327    for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1328      AA->copyValue(LI, NewPHIs[i]);
1329
1330  return V;
1331}
1332
1333static bool isLifetimeStart(Instruction *Inst) {
1334  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1335    return II->getIntrinsicID() == Intrinsic::lifetime_start;
1336  return false;
1337}
1338
1339/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1340/// non-local by performing PHI construction.
1341bool GVN::processNonLocalLoad(LoadInst *LI,
1342                              SmallVectorImpl<Instruction*> &toErase) {
1343  // Find the non-local dependencies of the load.
1344  SmallVector<NonLocalDepResult, 64> Deps;
1345  MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1346                                   Deps);
1347  //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1348  //             << Deps.size() << *LI << '\n');
1349
1350  // If we had to process more than one hundred blocks to find the
1351  // dependencies, this load isn't worth worrying about.  Optimizing
1352  // it will be too expensive.
1353  if (Deps.size() > 100)
1354    return false;
1355
1356  // If we had a phi translation failure, we'll have a single entry which is a
1357  // clobber in the current block.  Reject this early.
1358  if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1359    DEBUG(
1360      dbgs() << "GVN: non-local load ";
1361      WriteAsOperand(dbgs(), LI);
1362      dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1363    );
1364    return false;
1365  }
1366
1367  // Filter out useless results (non-locals, etc).  Keep track of the blocks
1368  // where we have a value available in repl, also keep track of whether we see
1369  // dependencies that produce an unknown value for the load (such as a call
1370  // that could potentially clobber the load).
1371  SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1372  SmallVector<BasicBlock*, 16> UnavailableBlocks;
1373
1374  const TargetData *TD = 0;
1375
1376  for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1377    BasicBlock *DepBB = Deps[i].getBB();
1378    MemDepResult DepInfo = Deps[i].getResult();
1379
1380    if (DepInfo.isClobber()) {
1381      // The address being loaded in this non-local block may not be the same as
1382      // the pointer operand of the load if PHI translation occurs.  Make sure
1383      // to consider the right address.
1384      Value *Address = Deps[i].getAddress();
1385
1386      // If the dependence is to a store that writes to a superset of the bits
1387      // read by the load, we can extract the bits we need for the load from the
1388      // stored value.
1389      if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1390        if (TD == 0)
1391          TD = getAnalysisIfAvailable<TargetData>();
1392        if (TD && Address) {
1393          int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1394                                                      DepSI, *TD);
1395          if (Offset != -1) {
1396            ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1397                                                           DepSI->getOperand(0),
1398                                                                Offset));
1399            continue;
1400          }
1401        }
1402      }
1403
1404      // If the clobbering value is a memset/memcpy/memmove, see if we can
1405      // forward a value on from it.
1406      if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1407        if (TD == 0)
1408          TD = getAnalysisIfAvailable<TargetData>();
1409        if (TD && Address) {
1410          int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1411                                                        DepMI, *TD);
1412          if (Offset != -1) {
1413            ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1414                                                                  Offset));
1415            continue;
1416          }
1417        }
1418      }
1419
1420      UnavailableBlocks.push_back(DepBB);
1421      continue;
1422    }
1423
1424    Instruction *DepInst = DepInfo.getInst();
1425
1426    // Loading the allocation -> undef.
1427    if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1428        // Loading immediately after lifetime begin -> undef.
1429        isLifetimeStart(DepInst)) {
1430      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1431                                             UndefValue::get(LI->getType())));
1432      continue;
1433    }
1434
1435    if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1436      // Reject loads and stores that are to the same address but are of
1437      // different types if we have to.
1438      if (S->getOperand(0)->getType() != LI->getType()) {
1439        if (TD == 0)
1440          TD = getAnalysisIfAvailable<TargetData>();
1441
1442        // If the stored value is larger or equal to the loaded value, we can
1443        // reuse it.
1444        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1445                                                        LI->getType(), *TD)) {
1446          UnavailableBlocks.push_back(DepBB);
1447          continue;
1448        }
1449      }
1450
1451      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1452                                                          S->getOperand(0)));
1453      continue;
1454    }
1455
1456    if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1457      // If the types mismatch and we can't handle it, reject reuse of the load.
1458      if (LD->getType() != LI->getType()) {
1459        if (TD == 0)
1460          TD = getAnalysisIfAvailable<TargetData>();
1461
1462        // If the stored value is larger or equal to the loaded value, we can
1463        // reuse it.
1464        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1465          UnavailableBlocks.push_back(DepBB);
1466          continue;
1467        }
1468      }
1469      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1470      continue;
1471    }
1472
1473    UnavailableBlocks.push_back(DepBB);
1474    continue;
1475  }
1476
1477  // If we have no predecessors that produce a known value for this load, exit
1478  // early.
1479  if (ValuesPerBlock.empty()) return false;
1480
1481  // If all of the instructions we depend on produce a known value for this
1482  // load, then it is fully redundant and we can use PHI insertion to compute
1483  // its value.  Insert PHIs and remove the fully redundant value now.
1484  if (UnavailableBlocks.empty()) {
1485    DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1486
1487    // Perform PHI construction.
1488    Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1489                                      VN.getAliasAnalysis());
1490    LI->replaceAllUsesWith(V);
1491
1492    if (isa<PHINode>(V))
1493      V->takeName(LI);
1494    if (V->getType()->isPointerTy())
1495      MD->invalidateCachedPointerInfo(V);
1496    toErase.push_back(LI);
1497    NumGVNLoad++;
1498    return true;
1499  }
1500
1501  if (!EnablePRE || !EnableLoadPRE)
1502    return false;
1503
1504  // Okay, we have *some* definitions of the value.  This means that the value
1505  // is available in some of our (transitive) predecessors.  Lets think about
1506  // doing PRE of this load.  This will involve inserting a new load into the
1507  // predecessor when it's not available.  We could do this in general, but
1508  // prefer to not increase code size.  As such, we only do this when we know
1509  // that we only have to insert *one* load (which means we're basically moving
1510  // the load, not inserting a new one).
1511
1512  SmallPtrSet<BasicBlock *, 4> Blockers;
1513  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1514    Blockers.insert(UnavailableBlocks[i]);
1515
1516  // Lets find first basic block with more than one predecessor.  Walk backwards
1517  // through predecessors if needed.
1518  BasicBlock *LoadBB = LI->getParent();
1519  BasicBlock *TmpBB = LoadBB;
1520
1521  bool isSinglePred = false;
1522  bool allSingleSucc = true;
1523  while (TmpBB->getSinglePredecessor()) {
1524    isSinglePred = true;
1525    TmpBB = TmpBB->getSinglePredecessor();
1526    if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1527      return false;
1528    if (Blockers.count(TmpBB))
1529      return false;
1530    if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1531      allSingleSucc = false;
1532  }
1533
1534  assert(TmpBB);
1535  LoadBB = TmpBB;
1536
1537  // If we have a repl set with LI itself in it, this means we have a loop where
1538  // at least one of the values is LI.  Since this means that we won't be able
1539  // to eliminate LI even if we insert uses in the other predecessors, we will
1540  // end up increasing code size.  Reject this by scanning for LI.
1541  if (!EnableFullLoadPRE) {
1542    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1543      if (ValuesPerBlock[i].isSimpleValue() &&
1544          ValuesPerBlock[i].getSimpleValue() == LI)
1545        return false;
1546  }
1547
1548  // FIXME: It is extremely unclear what this loop is doing, other than
1549  // artificially restricting loadpre.
1550  if (isSinglePred) {
1551    bool isHot = false;
1552    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1553      const AvailableValueInBlock &AV = ValuesPerBlock[i];
1554      if (AV.isSimpleValue())
1555        // "Hot" Instruction is in some loop (because it dominates its dep.
1556        // instruction).
1557        if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1558          if (DT->dominates(LI, I)) {
1559            isHot = true;
1560            break;
1561          }
1562    }
1563
1564    // We are interested only in "hot" instructions. We don't want to do any
1565    // mis-optimizations here.
1566    if (!isHot)
1567      return false;
1568  }
1569
1570  // Check to see how many predecessors have the loaded value fully
1571  // available.
1572  DenseMap<BasicBlock*, Value*> PredLoads;
1573  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1574  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1575    FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1576  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1577    FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1578
1579  for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1580       PI != E; ++PI) {
1581    BasicBlock *Pred = *PI;
1582    if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1583      continue;
1584    }
1585    PredLoads[Pred] = 0;
1586    // We don't currently handle critical edges :(
1587    if (Pred->getTerminator()->getNumSuccessors() != 1) {
1588      DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
1589            << Pred->getName() << "': " << *LI << '\n');
1590      return false;
1591    }
1592  }
1593
1594  // Decide whether PRE is profitable for this load.
1595  unsigned NumUnavailablePreds = PredLoads.size();
1596  assert(NumUnavailablePreds != 0 &&
1597         "Fully available value should be eliminated above!");
1598  if (!EnableFullLoadPRE) {
1599    // If this load is unavailable in multiple predecessors, reject it.
1600    // FIXME: If we could restructure the CFG, we could make a common pred with
1601    // all the preds that don't have an available LI and insert a new load into
1602    // that one block.
1603    if (NumUnavailablePreds != 1)
1604      return false;
1605  }
1606
1607  // Check if the load can safely be moved to all the unavailable predecessors.
1608  bool CanDoPRE = true;
1609  SmallVector<Instruction*, 8> NewInsts;
1610  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1611         E = PredLoads.end(); I != E; ++I) {
1612    BasicBlock *UnavailablePred = I->first;
1613
1614    // Do PHI translation to get its value in the predecessor if necessary.  The
1615    // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1616
1617    // If all preds have a single successor, then we know it is safe to insert
1618    // the load on the pred (?!?), so we can insert code to materialize the
1619    // pointer if it is not available.
1620    PHITransAddr Address(LI->getOperand(0), TD);
1621    Value *LoadPtr = 0;
1622    if (allSingleSucc) {
1623      LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1624                                                  *DT, NewInsts);
1625    } else {
1626      Address.PHITranslateValue(LoadBB, UnavailablePred);
1627      LoadPtr = Address.getAddr();
1628
1629      // Make sure the value is live in the predecessor.
1630      if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr))
1631        if (!DT->dominates(Inst->getParent(), UnavailablePred))
1632          LoadPtr = 0;
1633    }
1634
1635    // If we couldn't find or insert a computation of this phi translated value,
1636    // we fail PRE.
1637    if (LoadPtr == 0) {
1638      DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1639            << *LI->getOperand(0) << "\n");
1640      CanDoPRE = false;
1641      break;
1642    }
1643
1644    // Make sure it is valid to move this load here.  We have to watch out for:
1645    //  @1 = getelementptr (i8* p, ...
1646    //  test p and branch if == 0
1647    //  load @1
1648    // It is valid to have the getelementptr before the test, even if p can be 0,
1649    // as getelementptr only does address arithmetic.
1650    // If we are not pushing the value through any multiple-successor blocks
1651    // we do not have this case.  Otherwise, check that the load is safe to
1652    // put anywhere; this can be improved, but should be conservatively safe.
1653    if (!allSingleSucc &&
1654        // FIXME: REEVALUTE THIS.
1655        !isSafeToLoadUnconditionally(LoadPtr,
1656                                     UnavailablePred->getTerminator(),
1657                                     LI->getAlignment(), TD)) {
1658      CanDoPRE = false;
1659      break;
1660    }
1661
1662    I->second = LoadPtr;
1663  }
1664
1665  if (!CanDoPRE) {
1666    while (!NewInsts.empty())
1667      NewInsts.pop_back_val()->eraseFromParent();
1668    return false;
1669  }
1670
1671  // Okay, we can eliminate this load by inserting a reload in the predecessor
1672  // and using PHI construction to get the value in the other predecessors, do
1673  // it.
1674  DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1675  DEBUG(if (!NewInsts.empty())
1676          dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1677                 << *NewInsts.back() << '\n');
1678
1679  // Assign value numbers to the new instructions.
1680  for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1681    // FIXME: We really _ought_ to insert these value numbers into their
1682    // parent's availability map.  However, in doing so, we risk getting into
1683    // ordering issues.  If a block hasn't been processed yet, we would be
1684    // marking a value as AVAIL-IN, which isn't what we intend.
1685    VN.lookup_or_add(NewInsts[i]);
1686  }
1687
1688  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1689         E = PredLoads.end(); I != E; ++I) {
1690    BasicBlock *UnavailablePred = I->first;
1691    Value *LoadPtr = I->second;
1692
1693    Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1694                                  LI->getAlignment(),
1695                                  UnavailablePred->getTerminator());
1696
1697    // Add the newly created load.
1698    ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1699                                                        NewLoad));
1700  }
1701
1702  // Perform PHI construction.
1703  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1704                                    VN.getAliasAnalysis());
1705  LI->replaceAllUsesWith(V);
1706  if (isa<PHINode>(V))
1707    V->takeName(LI);
1708  if (V->getType()->isPointerTy())
1709    MD->invalidateCachedPointerInfo(V);
1710  toErase.push_back(LI);
1711  NumPRELoad++;
1712  return true;
1713}
1714
1715/// processLoad - Attempt to eliminate a load, first by eliminating it
1716/// locally, and then attempting non-local elimination if that fails.
1717bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1718  if (!MD)
1719    return false;
1720
1721  if (L->isVolatile())
1722    return false;
1723
1724  // ... to a pointer that has been loaded from before...
1725  MemDepResult Dep = MD->getDependency(L);
1726
1727  // If the value isn't available, don't do anything!
1728  if (Dep.isClobber()) {
1729    // Check to see if we have something like this:
1730    //   store i32 123, i32* %P
1731    //   %A = bitcast i32* %P to i8*
1732    //   %B = gep i8* %A, i32 1
1733    //   %C = load i8* %B
1734    //
1735    // We could do that by recognizing if the clobber instructions are obviously
1736    // a common base + constant offset, and if the previous store (or memset)
1737    // completely covers this load.  This sort of thing can happen in bitfield
1738    // access code.
1739    Value *AvailVal = 0;
1740    if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1741      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1742        int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1743                                                    L->getPointerOperand(),
1744                                                    DepSI, *TD);
1745        if (Offset != -1)
1746          AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1747                                          L->getType(), L, *TD);
1748      }
1749
1750    // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1751    // a value on from it.
1752    if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1753      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1754        int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1755                                                      L->getPointerOperand(),
1756                                                      DepMI, *TD);
1757        if (Offset != -1)
1758          AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1759      }
1760    }
1761
1762    if (AvailVal) {
1763      DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1764            << *AvailVal << '\n' << *L << "\n\n\n");
1765
1766      // Replace the load!
1767      L->replaceAllUsesWith(AvailVal);
1768      if (AvailVal->getType()->isPointerTy())
1769        MD->invalidateCachedPointerInfo(AvailVal);
1770      toErase.push_back(L);
1771      NumGVNLoad++;
1772      return true;
1773    }
1774
1775    DEBUG(
1776      // fast print dep, using operator<< on instruction would be too slow
1777      dbgs() << "GVN: load ";
1778      WriteAsOperand(dbgs(), L);
1779      Instruction *I = Dep.getInst();
1780      dbgs() << " is clobbered by " << *I << '\n';
1781    );
1782    return false;
1783  }
1784
1785  // If it is defined in another block, try harder.
1786  if (Dep.isNonLocal())
1787    return processNonLocalLoad(L, toErase);
1788
1789  Instruction *DepInst = Dep.getInst();
1790  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1791    Value *StoredVal = DepSI->getOperand(0);
1792
1793    // The store and load are to a must-aliased pointer, but they may not
1794    // actually have the same type.  See if we know how to reuse the stored
1795    // value (depending on its type).
1796    const TargetData *TD = 0;
1797    if (StoredVal->getType() != L->getType()) {
1798      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1799        StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1800                                                   L, *TD);
1801        if (StoredVal == 0)
1802          return false;
1803
1804        DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1805                     << '\n' << *L << "\n\n\n");
1806      }
1807      else
1808        return false;
1809    }
1810
1811    // Remove it!
1812    L->replaceAllUsesWith(StoredVal);
1813    if (StoredVal->getType()->isPointerTy())
1814      MD->invalidateCachedPointerInfo(StoredVal);
1815    toErase.push_back(L);
1816    NumGVNLoad++;
1817    return true;
1818  }
1819
1820  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1821    Value *AvailableVal = DepLI;
1822
1823    // The loads are of a must-aliased pointer, but they may not actually have
1824    // the same type.  See if we know how to reuse the previously loaded value
1825    // (depending on its type).
1826    const TargetData *TD = 0;
1827    if (DepLI->getType() != L->getType()) {
1828      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1829        AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1830        if (AvailableVal == 0)
1831          return false;
1832
1833        DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1834                     << "\n" << *L << "\n\n\n");
1835      }
1836      else
1837        return false;
1838    }
1839
1840    // Remove it!
1841    L->replaceAllUsesWith(AvailableVal);
1842    if (DepLI->getType()->isPointerTy())
1843      MD->invalidateCachedPointerInfo(DepLI);
1844    toErase.push_back(L);
1845    NumGVNLoad++;
1846    return true;
1847  }
1848
1849  // If this load really doesn't depend on anything, then we must be loading an
1850  // undef value.  This can happen when loading for a fresh allocation with no
1851  // intervening stores, for example.
1852  if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1853    L->replaceAllUsesWith(UndefValue::get(L->getType()));
1854    toErase.push_back(L);
1855    NumGVNLoad++;
1856    return true;
1857  }
1858
1859  // If this load occurs either right after a lifetime begin,
1860  // then the loaded value is undefined.
1861  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1862    if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1863      L->replaceAllUsesWith(UndefValue::get(L->getType()));
1864      toErase.push_back(L);
1865      NumGVNLoad++;
1866      return true;
1867    }
1868  }
1869
1870  return false;
1871}
1872
1873Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1874  DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1875  if (I == localAvail.end())
1876    return 0;
1877
1878  ValueNumberScope *Locals = I->second;
1879  while (Locals) {
1880    DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1881    if (I != Locals->table.end())
1882      return I->second;
1883    Locals = Locals->parent;
1884  }
1885
1886  return 0;
1887}
1888
1889
1890/// processInstruction - When calculating availability, handle an instruction
1891/// by inserting it into the appropriate sets
1892bool GVN::processInstruction(Instruction *I,
1893                             SmallVectorImpl<Instruction*> &toErase) {
1894  // Ignore dbg info intrinsics.
1895  if (isa<DbgInfoIntrinsic>(I))
1896    return false;
1897
1898  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1899    bool Changed = processLoad(LI, toErase);
1900
1901    if (!Changed) {
1902      unsigned Num = VN.lookup_or_add(LI);
1903      localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1904    }
1905
1906    return Changed;
1907  }
1908
1909  uint32_t NextNum = VN.getNextUnusedValueNumber();
1910  unsigned Num = VN.lookup_or_add(I);
1911
1912  if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1913    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1914
1915    if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1916      return false;
1917
1918    Value *BranchCond = BI->getCondition();
1919    uint32_t CondVN = VN.lookup_or_add(BranchCond);
1920
1921    BasicBlock *TrueSucc = BI->getSuccessor(0);
1922    BasicBlock *FalseSucc = BI->getSuccessor(1);
1923
1924    if (TrueSucc->getSinglePredecessor())
1925      localAvail[TrueSucc]->table[CondVN] =
1926        ConstantInt::getTrue(TrueSucc->getContext());
1927    if (FalseSucc->getSinglePredecessor())
1928      localAvail[FalseSucc]->table[CondVN] =
1929        ConstantInt::getFalse(TrueSucc->getContext());
1930
1931    return false;
1932
1933  // Allocations are always uniquely numbered, so we can save time and memory
1934  // by fast failing them.
1935  } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1936    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1937    return false;
1938  }
1939
1940  // Collapse PHI nodes
1941  if (PHINode* p = dyn_cast<PHINode>(I)) {
1942    Value *constVal = CollapsePhi(p);
1943
1944    if (constVal) {
1945      p->replaceAllUsesWith(constVal);
1946      if (MD && constVal->getType()->isPointerTy())
1947        MD->invalidateCachedPointerInfo(constVal);
1948      VN.erase(p);
1949
1950      toErase.push_back(p);
1951    } else {
1952      localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1953    }
1954
1955  // If the number we were assigned was a brand new VN, then we don't
1956  // need to do a lookup to see if the number already exists
1957  // somewhere in the domtree: it can't!
1958  } else if (Num == NextNum) {
1959    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1960
1961  // Perform fast-path value-number based elimination of values inherited from
1962  // dominators.
1963  } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1964    // Remove it!
1965    VN.erase(I);
1966    I->replaceAllUsesWith(repl);
1967    if (MD && repl->getType()->isPointerTy())
1968      MD->invalidateCachedPointerInfo(repl);
1969    toErase.push_back(I);
1970    return true;
1971
1972  } else {
1973    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1974  }
1975
1976  return false;
1977}
1978
1979/// runOnFunction - This is the main transformation entry point for a function.
1980bool GVN::runOnFunction(Function& F) {
1981  if (!NoLoads)
1982    MD = &getAnalysis<MemoryDependenceAnalysis>();
1983  DT = &getAnalysis<DominatorTree>();
1984  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1985  VN.setMemDep(MD);
1986  VN.setDomTree(DT);
1987
1988  bool Changed = false;
1989  bool ShouldContinue = true;
1990
1991  // Merge unconditional branches, allowing PRE to catch more
1992  // optimization opportunities.
1993  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1994    BasicBlock *BB = FI;
1995    ++FI;
1996    bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1997    if (removedBlock) NumGVNBlocks++;
1998
1999    Changed |= removedBlock;
2000  }
2001
2002  unsigned Iteration = 0;
2003
2004  while (ShouldContinue) {
2005    DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2006    ShouldContinue = iterateOnFunction(F);
2007    Changed |= ShouldContinue;
2008    ++Iteration;
2009  }
2010
2011  if (EnablePRE) {
2012    bool PREChanged = true;
2013    while (PREChanged) {
2014      PREChanged = performPRE(F);
2015      Changed |= PREChanged;
2016    }
2017  }
2018  // FIXME: Should perform GVN again after PRE does something.  PRE can move
2019  // computations into blocks where they become fully redundant.  Note that
2020  // we can't do this until PRE's critical edge splitting updates memdep.
2021  // Actually, when this happens, we should just fully integrate PRE into GVN.
2022
2023  cleanupGlobalSets();
2024
2025  return Changed;
2026}
2027
2028
2029bool GVN::processBlock(BasicBlock *BB) {
2030  // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2031  // incrementing BI before processing an instruction).
2032  SmallVector<Instruction*, 8> toErase;
2033  bool ChangedFunction = false;
2034
2035  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2036       BI != BE;) {
2037    ChangedFunction |= processInstruction(BI, toErase);
2038    if (toErase.empty()) {
2039      ++BI;
2040      continue;
2041    }
2042
2043    // If we need some instructions deleted, do it now.
2044    NumGVNInstr += toErase.size();
2045
2046    // Avoid iterator invalidation.
2047    bool AtStart = BI == BB->begin();
2048    if (!AtStart)
2049      --BI;
2050
2051    for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2052         E = toErase.end(); I != E; ++I) {
2053      DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2054      if (MD) MD->removeInstruction(*I);
2055      (*I)->eraseFromParent();
2056      DEBUG(verifyRemoved(*I));
2057    }
2058    toErase.clear();
2059
2060    if (AtStart)
2061      BI = BB->begin();
2062    else
2063      ++BI;
2064  }
2065
2066  return ChangedFunction;
2067}
2068
2069/// performPRE - Perform a purely local form of PRE that looks for diamond
2070/// control flow patterns and attempts to perform simple PRE at the join point.
2071bool GVN::performPRE(Function &F) {
2072  bool Changed = false;
2073  SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
2074  DenseMap<BasicBlock*, Value*> predMap;
2075  for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2076       DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2077    BasicBlock *CurrentBlock = *DI;
2078
2079    // Nothing to PRE in the entry block.
2080    if (CurrentBlock == &F.getEntryBlock()) continue;
2081
2082    for (BasicBlock::iterator BI = CurrentBlock->begin(),
2083         BE = CurrentBlock->end(); BI != BE; ) {
2084      Instruction *CurInst = BI++;
2085
2086      if (isa<AllocaInst>(CurInst) ||
2087          isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2088          CurInst->getType()->isVoidTy() ||
2089          CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2090          isa<DbgInfoIntrinsic>(CurInst))
2091        continue;
2092
2093      uint32_t ValNo = VN.lookup(CurInst);
2094
2095      // Look for the predecessors for PRE opportunities.  We're
2096      // only trying to solve the basic diamond case, where
2097      // a value is computed in the successor and one predecessor,
2098      // but not the other.  We also explicitly disallow cases
2099      // where the successor is its own predecessor, because they're
2100      // more complicated to get right.
2101      unsigned NumWith = 0;
2102      unsigned NumWithout = 0;
2103      BasicBlock *PREPred = 0;
2104      predMap.clear();
2105
2106      for (pred_iterator PI = pred_begin(CurrentBlock),
2107           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2108        // We're not interested in PRE where the block is its
2109        // own predecessor, or in blocks with predecessors
2110        // that are not reachable.
2111        if (*PI == CurrentBlock) {
2112          NumWithout = 2;
2113          break;
2114        } else if (!localAvail.count(*PI))  {
2115          NumWithout = 2;
2116          break;
2117        }
2118
2119        DenseMap<uint32_t, Value*>::iterator predV =
2120                                            localAvail[*PI]->table.find(ValNo);
2121        if (predV == localAvail[*PI]->table.end()) {
2122          PREPred = *PI;
2123          NumWithout++;
2124        } else if (predV->second == CurInst) {
2125          NumWithout = 2;
2126        } else {
2127          predMap[*PI] = predV->second;
2128          NumWith++;
2129        }
2130      }
2131
2132      // Don't do PRE when it might increase code size, i.e. when
2133      // we would need to insert instructions in more than one pred.
2134      if (NumWithout != 1 || NumWith == 0)
2135        continue;
2136
2137      // Don't do PRE across indirect branch.
2138      if (isa<IndirectBrInst>(PREPred->getTerminator()))
2139        continue;
2140
2141      // We can't do PRE safely on a critical edge, so instead we schedule
2142      // the edge to be split and perform the PRE the next time we iterate
2143      // on the function.
2144      unsigned SuccNum = 0;
2145      for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
2146           i != e; ++i)
2147        if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
2148          SuccNum = i;
2149          break;
2150        }
2151
2152      if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2153        toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2154        continue;
2155      }
2156
2157      // Instantiate the expression in the predecessor that lacked it.
2158      // Because we are going top-down through the block, all value numbers
2159      // will be available in the predecessor by the time we need them.  Any
2160      // that weren't originally present will have been instantiated earlier
2161      // in this loop.
2162      Instruction *PREInstr = CurInst->clone();
2163      bool success = true;
2164      for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2165        Value *Op = PREInstr->getOperand(i);
2166        if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2167          continue;
2168
2169        if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2170          PREInstr->setOperand(i, V);
2171        } else {
2172          success = false;
2173          break;
2174        }
2175      }
2176
2177      // Fail out if we encounter an operand that is not available in
2178      // the PRE predecessor.  This is typically because of loads which
2179      // are not value numbered precisely.
2180      if (!success) {
2181        delete PREInstr;
2182        DEBUG(verifyRemoved(PREInstr));
2183        continue;
2184      }
2185
2186      PREInstr->insertBefore(PREPred->getTerminator());
2187      PREInstr->setName(CurInst->getName() + ".pre");
2188      predMap[PREPred] = PREInstr;
2189      VN.add(PREInstr, ValNo);
2190      NumGVNPRE++;
2191
2192      // Update the availability map to include the new instruction.
2193      localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2194
2195      // Create a PHI to make the value available in this block.
2196      PHINode* Phi = PHINode::Create(CurInst->getType(),
2197                                     CurInst->getName() + ".pre-phi",
2198                                     CurrentBlock->begin());
2199      for (pred_iterator PI = pred_begin(CurrentBlock),
2200           PE = pred_end(CurrentBlock); PI != PE; ++PI)
2201        Phi->addIncoming(predMap[*PI], *PI);
2202
2203      VN.add(Phi, ValNo);
2204      localAvail[CurrentBlock]->table[ValNo] = Phi;
2205
2206      CurInst->replaceAllUsesWith(Phi);
2207      if (MD && Phi->getType()->isPointerTy())
2208        MD->invalidateCachedPointerInfo(Phi);
2209      VN.erase(CurInst);
2210
2211      DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2212      if (MD) MD->removeInstruction(CurInst);
2213      CurInst->eraseFromParent();
2214      DEBUG(verifyRemoved(CurInst));
2215      Changed = true;
2216    }
2217  }
2218
2219  for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
2220       I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
2221    SplitCriticalEdge(I->first, I->second, this);
2222
2223  return Changed || toSplit.size();
2224}
2225
2226/// iterateOnFunction - Executes one iteration of GVN
2227bool GVN::iterateOnFunction(Function &F) {
2228  cleanupGlobalSets();
2229
2230  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2231       DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2232    if (DI->getIDom())
2233      localAvail[DI->getBlock()] =
2234                   new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2235    else
2236      localAvail[DI->getBlock()] = new ValueNumberScope(0);
2237  }
2238
2239  // Top-down walk of the dominator tree
2240  bool Changed = false;
2241#if 0
2242  // Needed for value numbering with phi construction to work.
2243  ReversePostOrderTraversal<Function*> RPOT(&F);
2244  for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2245       RE = RPOT.end(); RI != RE; ++RI)
2246    Changed |= processBlock(*RI);
2247#else
2248  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2249       DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2250    Changed |= processBlock(DI->getBlock());
2251#endif
2252
2253  return Changed;
2254}
2255
2256void GVN::cleanupGlobalSets() {
2257  VN.clear();
2258
2259  for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2260       I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2261    delete I->second;
2262  localAvail.clear();
2263}
2264
2265/// verifyRemoved - Verify that the specified instruction does not occur in our
2266/// internal data structures.
2267void GVN::verifyRemoved(const Instruction *Inst) const {
2268  VN.verifyRemoved(Inst);
2269
2270  // Walk through the value number scope to make sure the instruction isn't
2271  // ferreted away in it.
2272  for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2273         I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2274    const ValueNumberScope *VNS = I->second;
2275
2276    while (VNS) {
2277      for (DenseMap<uint32_t, Value*>::const_iterator
2278             II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2279        assert(II->second != Inst && "Inst still in value numbering scope!");
2280      }
2281
2282      VNS = VNS->parent;
2283    }
2284  }
2285}
2286