GVN.cpp revision aad94aa4375d5ed43be728e03d91751c102ff958
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs global value numbering to eliminate fully redundant
11// instructions.  It also performs simple dead load elimination.
12//
13// Note that this pass does the value numbering itself; it does not use the
14// ValueNumbering analysis passes.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "gvn"
19#include "llvm/Transforms/Scalar.h"
20#include "llvm/Constants.h"
21#include "llvm/DerivedTypes.h"
22#include "llvm/GlobalVariable.h"
23#include "llvm/Function.h"
24#include "llvm/IntrinsicInst.h"
25#include "llvm/LLVMContext.h"
26#include "llvm/Operator.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/ConstantFolding.h"
29#include "llvm/Analysis/Dominators.h"
30#include "llvm/Analysis/InstructionSimplify.h"
31#include "llvm/Analysis/Loads.h"
32#include "llvm/Analysis/MemoryBuiltins.h"
33#include "llvm/Analysis/MemoryDependenceAnalysis.h"
34#include "llvm/Analysis/PHITransAddr.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/Target/TargetData.h"
37#include "llvm/Transforms/Utils/BasicBlockUtils.h"
38#include "llvm/Transforms/Utils/Local.h"
39#include "llvm/Transforms/Utils/SSAUpdater.h"
40#include "llvm/ADT/DenseMap.h"
41#include "llvm/ADT/DepthFirstIterator.h"
42#include "llvm/ADT/PostOrderIterator.h"
43#include "llvm/ADT/SmallPtrSet.h"
44#include "llvm/ADT/Statistic.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/CFG.h"
47#include "llvm/Support/CommandLine.h"
48#include "llvm/Support/Debug.h"
49#include "llvm/Support/ErrorHandling.h"
50#include "llvm/Support/GetElementPtrTypeIterator.h"
51#include "llvm/Support/IRBuilder.h"
52#include <list>
53using namespace llvm;
54
55STATISTIC(NumGVNInstr,  "Number of instructions deleted");
56STATISTIC(NumGVNLoad,   "Number of loads deleted");
57STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
58STATISTIC(NumGVNBlocks, "Number of blocks merged");
59STATISTIC(NumPRELoad,   "Number of loads PRE'd");
60
61static cl::opt<bool> EnablePRE("enable-pre",
62                               cl::init(true), cl::Hidden);
63static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
64
65//===----------------------------------------------------------------------===//
66//                         ValueTable Class
67//===----------------------------------------------------------------------===//
68
69/// This class holds the mapping between values and value numbers.  It is used
70/// as an efficient mechanism to determine the expression-wise equivalence of
71/// two values.
72namespace {
73  struct Expression {
74    enum ExpressionOpcode {
75      ADD = Instruction::Add,
76      FADD = Instruction::FAdd,
77      SUB = Instruction::Sub,
78      FSUB = Instruction::FSub,
79      MUL = Instruction::Mul,
80      FMUL = Instruction::FMul,
81      UDIV = Instruction::UDiv,
82      SDIV = Instruction::SDiv,
83      FDIV = Instruction::FDiv,
84      UREM = Instruction::URem,
85      SREM = Instruction::SRem,
86      FREM = Instruction::FRem,
87      SHL = Instruction::Shl,
88      LSHR = Instruction::LShr,
89      ASHR = Instruction::AShr,
90      AND = Instruction::And,
91      OR = Instruction::Or,
92      XOR = Instruction::Xor,
93      TRUNC = Instruction::Trunc,
94      ZEXT = Instruction::ZExt,
95      SEXT = Instruction::SExt,
96      FPTOUI = Instruction::FPToUI,
97      FPTOSI = Instruction::FPToSI,
98      UITOFP = Instruction::UIToFP,
99      SITOFP = Instruction::SIToFP,
100      FPTRUNC = Instruction::FPTrunc,
101      FPEXT = Instruction::FPExt,
102      PTRTOINT = Instruction::PtrToInt,
103      INTTOPTR = Instruction::IntToPtr,
104      BITCAST = Instruction::BitCast,
105      ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
106      ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
107      FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
108      FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
109      FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
110      SHUFFLE, SELECT, GEP, CALL, CONSTANT,
111      INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
112
113    ExpressionOpcode opcode;
114    const Type* type;
115    SmallVector<uint32_t, 4> varargs;
116    Value *function;
117
118    Expression() { }
119    Expression(ExpressionOpcode o) : opcode(o) { }
120
121    bool operator==(const Expression &other) const {
122      if (opcode != other.opcode)
123        return false;
124      else if (opcode == EMPTY || opcode == TOMBSTONE)
125        return true;
126      else if (type != other.type)
127        return false;
128      else if (function != other.function)
129        return false;
130      else if (varargs != other.varargs)
131        return false;
132      return true;
133    }
134
135    /*bool operator!=(const Expression &other) const {
136      return !(*this == other);
137    }*/
138  };
139
140  class ValueTable {
141    private:
142      DenseMap<Value*, uint32_t> valueNumbering;
143      DenseMap<Expression, uint32_t> expressionNumbering;
144      AliasAnalysis* AA;
145      MemoryDependenceAnalysis* MD;
146      DominatorTree* DT;
147
148      uint32_t nextValueNumber;
149
150      Expression::ExpressionOpcode getOpcode(CmpInst* C);
151      Expression create_expression(BinaryOperator* BO);
152      Expression create_expression(CmpInst* C);
153      Expression create_expression(ShuffleVectorInst* V);
154      Expression create_expression(ExtractElementInst* C);
155      Expression create_expression(InsertElementInst* V);
156      Expression create_expression(SelectInst* V);
157      Expression create_expression(CastInst* C);
158      Expression create_expression(GetElementPtrInst* G);
159      Expression create_expression(CallInst* C);
160      Expression create_expression(ExtractValueInst* C);
161      Expression create_expression(InsertValueInst* C);
162
163      uint32_t lookup_or_add_call(CallInst* C);
164    public:
165      ValueTable() : nextValueNumber(1) { }
166      uint32_t lookup_or_add(Value *V);
167      uint32_t lookup(Value *V) const;
168      void add(Value *V, uint32_t num);
169      void clear();
170      void erase(Value *v);
171      void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
172      AliasAnalysis *getAliasAnalysis() const { return AA; }
173      void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
174      void setDomTree(DominatorTree* D) { DT = D; }
175      uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
176      void verifyRemoved(const Value *) const;
177  };
178}
179
180namespace llvm {
181template <> struct DenseMapInfo<Expression> {
182  static inline Expression getEmptyKey() {
183    return Expression(Expression::EMPTY);
184  }
185
186  static inline Expression getTombstoneKey() {
187    return Expression(Expression::TOMBSTONE);
188  }
189
190  static unsigned getHashValue(const Expression e) {
191    unsigned hash = e.opcode;
192
193    hash = ((unsigned)((uintptr_t)e.type >> 4) ^
194            (unsigned)((uintptr_t)e.type >> 9));
195
196    for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
197         E = e.varargs.end(); I != E; ++I)
198      hash = *I + hash * 37;
199
200    hash = ((unsigned)((uintptr_t)e.function >> 4) ^
201            (unsigned)((uintptr_t)e.function >> 9)) +
202           hash * 37;
203
204    return hash;
205  }
206  static bool isEqual(const Expression &LHS, const Expression &RHS) {
207    return LHS == RHS;
208  }
209};
210
211}
212
213//===----------------------------------------------------------------------===//
214//                     ValueTable Internal Functions
215//===----------------------------------------------------------------------===//
216
217Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
218  if (isa<ICmpInst>(C)) {
219    switch (C->getPredicate()) {
220    default:  // THIS SHOULD NEVER HAPPEN
221      llvm_unreachable("Comparison with unknown predicate?");
222    case ICmpInst::ICMP_EQ:  return Expression::ICMPEQ;
223    case ICmpInst::ICMP_NE:  return Expression::ICMPNE;
224    case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
225    case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
226    case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
227    case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
228    case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
229    case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
230    case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
231    case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
232    }
233  } else {
234    switch (C->getPredicate()) {
235    default: // THIS SHOULD NEVER HAPPEN
236      llvm_unreachable("Comparison with unknown predicate?");
237    case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
238    case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
239    case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
240    case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
241    case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
242    case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
243    case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
244    case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
245    case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
246    case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
247    case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
248    case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
249    case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
250    case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
251    }
252  }
253}
254
255Expression ValueTable::create_expression(CallInst* C) {
256  Expression e;
257
258  e.type = C->getType();
259  e.function = C->getCalledFunction();
260  e.opcode = Expression::CALL;
261
262  CallSite CS(C);
263  for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
264       I != E; ++I)
265    e.varargs.push_back(lookup_or_add(*I));
266
267  return e;
268}
269
270Expression ValueTable::create_expression(BinaryOperator* BO) {
271  Expression e;
272  e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
273  e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
274  e.function = 0;
275  e.type = BO->getType();
276  e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
277
278  return e;
279}
280
281Expression ValueTable::create_expression(CmpInst* C) {
282  Expression e;
283
284  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
285  e.varargs.push_back(lookup_or_add(C->getOperand(1)));
286  e.function = 0;
287  e.type = C->getType();
288  e.opcode = getOpcode(C);
289
290  return e;
291}
292
293Expression ValueTable::create_expression(CastInst* C) {
294  Expression e;
295
296  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
297  e.function = 0;
298  e.type = C->getType();
299  e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
300
301  return e;
302}
303
304Expression ValueTable::create_expression(ShuffleVectorInst* S) {
305  Expression e;
306
307  e.varargs.push_back(lookup_or_add(S->getOperand(0)));
308  e.varargs.push_back(lookup_or_add(S->getOperand(1)));
309  e.varargs.push_back(lookup_or_add(S->getOperand(2)));
310  e.function = 0;
311  e.type = S->getType();
312  e.opcode = Expression::SHUFFLE;
313
314  return e;
315}
316
317Expression ValueTable::create_expression(ExtractElementInst* E) {
318  Expression e;
319
320  e.varargs.push_back(lookup_or_add(E->getOperand(0)));
321  e.varargs.push_back(lookup_or_add(E->getOperand(1)));
322  e.function = 0;
323  e.type = E->getType();
324  e.opcode = Expression::EXTRACT;
325
326  return e;
327}
328
329Expression ValueTable::create_expression(InsertElementInst* I) {
330  Expression e;
331
332  e.varargs.push_back(lookup_or_add(I->getOperand(0)));
333  e.varargs.push_back(lookup_or_add(I->getOperand(1)));
334  e.varargs.push_back(lookup_or_add(I->getOperand(2)));
335  e.function = 0;
336  e.type = I->getType();
337  e.opcode = Expression::INSERT;
338
339  return e;
340}
341
342Expression ValueTable::create_expression(SelectInst* I) {
343  Expression e;
344
345  e.varargs.push_back(lookup_or_add(I->getCondition()));
346  e.varargs.push_back(lookup_or_add(I->getTrueValue()));
347  e.varargs.push_back(lookup_or_add(I->getFalseValue()));
348  e.function = 0;
349  e.type = I->getType();
350  e.opcode = Expression::SELECT;
351
352  return e;
353}
354
355Expression ValueTable::create_expression(GetElementPtrInst* G) {
356  Expression e;
357
358  e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
359  e.function = 0;
360  e.type = G->getType();
361  e.opcode = Expression::GEP;
362
363  for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
364       I != E; ++I)
365    e.varargs.push_back(lookup_or_add(*I));
366
367  return e;
368}
369
370Expression ValueTable::create_expression(ExtractValueInst* E) {
371  Expression e;
372
373  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
374  for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
375       II != IE; ++II)
376    e.varargs.push_back(*II);
377  e.function = 0;
378  e.type = E->getType();
379  e.opcode = Expression::EXTRACTVALUE;
380
381  return e;
382}
383
384Expression ValueTable::create_expression(InsertValueInst* E) {
385  Expression e;
386
387  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
388  e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
389  for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
390       II != IE; ++II)
391    e.varargs.push_back(*II);
392  e.function = 0;
393  e.type = E->getType();
394  e.opcode = Expression::INSERTVALUE;
395
396  return e;
397}
398
399//===----------------------------------------------------------------------===//
400//                     ValueTable External Functions
401//===----------------------------------------------------------------------===//
402
403/// add - Insert a value into the table with a specified value number.
404void ValueTable::add(Value *V, uint32_t num) {
405  valueNumbering.insert(std::make_pair(V, num));
406}
407
408uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
409  if (AA->doesNotAccessMemory(C)) {
410    Expression exp = create_expression(C);
411    uint32_t& e = expressionNumbering[exp];
412    if (!e) e = nextValueNumber++;
413    valueNumbering[C] = e;
414    return e;
415  } else if (AA->onlyReadsMemory(C)) {
416    Expression exp = create_expression(C);
417    uint32_t& e = expressionNumbering[exp];
418    if (!e) {
419      e = nextValueNumber++;
420      valueNumbering[C] = e;
421      return e;
422    }
423    if (!MD) {
424      e = nextValueNumber++;
425      valueNumbering[C] = e;
426      return e;
427    }
428
429    MemDepResult local_dep = MD->getDependency(C);
430
431    if (!local_dep.isDef() && !local_dep.isNonLocal()) {
432      valueNumbering[C] =  nextValueNumber;
433      return nextValueNumber++;
434    }
435
436    if (local_dep.isDef()) {
437      CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
438
439      if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
440        valueNumbering[C] = nextValueNumber;
441        return nextValueNumber++;
442      }
443
444      for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
445        uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
446        uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
447        if (c_vn != cd_vn) {
448          valueNumbering[C] = nextValueNumber;
449          return nextValueNumber++;
450        }
451      }
452
453      uint32_t v = lookup_or_add(local_cdep);
454      valueNumbering[C] = v;
455      return v;
456    }
457
458    // Non-local case.
459    const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
460      MD->getNonLocalCallDependency(CallSite(C));
461    // FIXME: call/call dependencies for readonly calls should return def, not
462    // clobber!  Move the checking logic to MemDep!
463    CallInst* cdep = 0;
464
465    // Check to see if we have a single dominating call instruction that is
466    // identical to C.
467    for (unsigned i = 0, e = deps.size(); i != e; ++i) {
468      const NonLocalDepEntry *I = &deps[i];
469      // Ignore non-local dependencies.
470      if (I->getResult().isNonLocal())
471        continue;
472
473      // We don't handle non-depedencies.  If we already have a call, reject
474      // instruction dependencies.
475      if (I->getResult().isClobber() || cdep != 0) {
476        cdep = 0;
477        break;
478      }
479
480      CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
481      // FIXME: All duplicated with non-local case.
482      if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
483        cdep = NonLocalDepCall;
484        continue;
485      }
486
487      cdep = 0;
488      break;
489    }
490
491    if (!cdep) {
492      valueNumbering[C] = nextValueNumber;
493      return nextValueNumber++;
494    }
495
496    if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
497      valueNumbering[C] = nextValueNumber;
498      return nextValueNumber++;
499    }
500    for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
501      uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
502      uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
503      if (c_vn != cd_vn) {
504        valueNumbering[C] = nextValueNumber;
505        return nextValueNumber++;
506      }
507    }
508
509    uint32_t v = lookup_or_add(cdep);
510    valueNumbering[C] = v;
511    return v;
512
513  } else {
514    valueNumbering[C] = nextValueNumber;
515    return nextValueNumber++;
516  }
517}
518
519/// lookup_or_add - Returns the value number for the specified value, assigning
520/// it a new number if it did not have one before.
521uint32_t ValueTable::lookup_or_add(Value *V) {
522  DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
523  if (VI != valueNumbering.end())
524    return VI->second;
525
526  if (!isa<Instruction>(V)) {
527    valueNumbering[V] = nextValueNumber;
528    return nextValueNumber++;
529  }
530
531  Instruction* I = cast<Instruction>(V);
532  Expression exp;
533  switch (I->getOpcode()) {
534    case Instruction::Call:
535      return lookup_or_add_call(cast<CallInst>(I));
536    case Instruction::Add:
537    case Instruction::FAdd:
538    case Instruction::Sub:
539    case Instruction::FSub:
540    case Instruction::Mul:
541    case Instruction::FMul:
542    case Instruction::UDiv:
543    case Instruction::SDiv:
544    case Instruction::FDiv:
545    case Instruction::URem:
546    case Instruction::SRem:
547    case Instruction::FRem:
548    case Instruction::Shl:
549    case Instruction::LShr:
550    case Instruction::AShr:
551    case Instruction::And:
552    case Instruction::Or :
553    case Instruction::Xor:
554      exp = create_expression(cast<BinaryOperator>(I));
555      break;
556    case Instruction::ICmp:
557    case Instruction::FCmp:
558      exp = create_expression(cast<CmpInst>(I));
559      break;
560    case Instruction::Trunc:
561    case Instruction::ZExt:
562    case Instruction::SExt:
563    case Instruction::FPToUI:
564    case Instruction::FPToSI:
565    case Instruction::UIToFP:
566    case Instruction::SIToFP:
567    case Instruction::FPTrunc:
568    case Instruction::FPExt:
569    case Instruction::PtrToInt:
570    case Instruction::IntToPtr:
571    case Instruction::BitCast:
572      exp = create_expression(cast<CastInst>(I));
573      break;
574    case Instruction::Select:
575      exp = create_expression(cast<SelectInst>(I));
576      break;
577    case Instruction::ExtractElement:
578      exp = create_expression(cast<ExtractElementInst>(I));
579      break;
580    case Instruction::InsertElement:
581      exp = create_expression(cast<InsertElementInst>(I));
582      break;
583    case Instruction::ShuffleVector:
584      exp = create_expression(cast<ShuffleVectorInst>(I));
585      break;
586    case Instruction::ExtractValue:
587      exp = create_expression(cast<ExtractValueInst>(I));
588      break;
589    case Instruction::InsertValue:
590      exp = create_expression(cast<InsertValueInst>(I));
591      break;
592    case Instruction::GetElementPtr:
593      exp = create_expression(cast<GetElementPtrInst>(I));
594      break;
595    default:
596      valueNumbering[V] = nextValueNumber;
597      return nextValueNumber++;
598  }
599
600  uint32_t& e = expressionNumbering[exp];
601  if (!e) e = nextValueNumber++;
602  valueNumbering[V] = e;
603  return e;
604}
605
606/// lookup - Returns the value number of the specified value. Fails if
607/// the value has not yet been numbered.
608uint32_t ValueTable::lookup(Value *V) const {
609  DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
610  assert(VI != valueNumbering.end() && "Value not numbered?");
611  return VI->second;
612}
613
614/// clear - Remove all entries from the ValueTable
615void ValueTable::clear() {
616  valueNumbering.clear();
617  expressionNumbering.clear();
618  nextValueNumber = 1;
619}
620
621/// erase - Remove a value from the value numbering
622void ValueTable::erase(Value *V) {
623  valueNumbering.erase(V);
624}
625
626/// verifyRemoved - Verify that the value is removed from all internal data
627/// structures.
628void ValueTable::verifyRemoved(const Value *V) const {
629  for (DenseMap<Value*, uint32_t>::const_iterator
630         I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
631    assert(I->first != V && "Inst still occurs in value numbering map!");
632  }
633}
634
635//===----------------------------------------------------------------------===//
636//                                GVN Pass
637//===----------------------------------------------------------------------===//
638
639namespace {
640  struct ValueNumberScope {
641    ValueNumberScope* parent;
642    DenseMap<uint32_t, Value*> table;
643
644    ValueNumberScope(ValueNumberScope* p) : parent(p) { }
645  };
646}
647
648namespace {
649
650  class GVN : public FunctionPass {
651    bool runOnFunction(Function &F);
652  public:
653    static char ID; // Pass identification, replacement for typeid
654    explicit GVN(bool noloads = false)
655        : FunctionPass(ID), NoLoads(noloads), MD(0) {
656      initializeGVNPass(*PassRegistry::getPassRegistry());
657    }
658
659  private:
660    bool NoLoads;
661    MemoryDependenceAnalysis *MD;
662    DominatorTree *DT;
663    const TargetData* TD;
664
665    ValueTable VN;
666
667    /// NumberTable - A mapping from value numers to lists of Value*'s that
668    /// have that value number.  Use lookupNumber to query it.
669    DenseMap<uint32_t, std::pair<Value*, void*> > NumberTable;
670    BumpPtrAllocator TableAllocator;
671
672    /// insert_table - Push a new Value to the NumberTable onto the list for
673    /// its value number.
674    void insert_table(uint32_t N, Value *V) {
675      std::pair<Value*, void*>& Curr = NumberTable[N];
676      if (!Curr.first) {
677        Curr.first = V;
678        return;
679      }
680
681      std::pair<Value*, void*>* Node =
682        TableAllocator.Allocate<std::pair<Value*, void*> >();
683      Node->first = V;
684      Node->second = Curr.second;
685      Curr.second = Node;
686    }
687
688    /// erase_table - Scan the list of values corresponding to a given value
689    /// number, and remove the given value if encountered.
690    void erase_table(uint32_t N, Value *V) {
691      std::pair<Value*, void*>* Prev = 0;
692      std::pair<Value*, void*>* Curr = &NumberTable[N];
693
694      while (Curr->first != V) {
695        Prev = Curr;
696        Curr = static_cast<std::pair<Value*, void*>*>(Curr->second);
697      }
698
699      if (Prev) {
700        Prev->second = Curr->second;
701      } else {
702        if (!Curr->second) {
703          Curr->first = 0;
704        } else {
705          std::pair<Value*, void*>* Next =
706            static_cast<std::pair<Value*, void*>*>(Curr->second);
707          Curr->first = Next->first;
708          Curr->second = Next->second;
709        }
710      }
711    }
712
713    // List of critical edges to be split between iterations.
714    SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
715
716    // This transformation requires dominator postdominator info
717    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
718      AU.addRequired<DominatorTree>();
719      if (!NoLoads)
720        AU.addRequired<MemoryDependenceAnalysis>();
721      AU.addRequired<AliasAnalysis>();
722
723      AU.addPreserved<DominatorTree>();
724      AU.addPreserved<AliasAnalysis>();
725    }
726
727    // Helper fuctions
728    // FIXME: eliminate or document these better
729    bool processLoad(LoadInst* L,
730                     SmallVectorImpl<Instruction*> &toErase);
731    bool processInstruction(Instruction *I,
732                            SmallVectorImpl<Instruction*> &toErase);
733    bool processNonLocalLoad(LoadInst* L,
734                             SmallVectorImpl<Instruction*> &toErase);
735    bool processBlock(BasicBlock *BB);
736    void dump(DenseMap<uint32_t, Value*>& d);
737    bool iterateOnFunction(Function &F);
738    bool performPRE(Function& F);
739    Value *lookupNumber(BasicBlock *BB, uint32_t num);
740    void cleanupGlobalSets();
741    void verifyRemoved(const Instruction *I) const;
742    bool splitCriticalEdges();
743  };
744
745  char GVN::ID = 0;
746}
747
748// createGVNPass - The public interface to this file...
749FunctionPass *llvm::createGVNPass(bool NoLoads) {
750  return new GVN(NoLoads);
751}
752
753INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
754INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
755INITIALIZE_PASS_DEPENDENCY(DominatorTree)
756INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
757INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
758
759void GVN::dump(DenseMap<uint32_t, Value*>& d) {
760  errs() << "{\n";
761  for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
762       E = d.end(); I != E; ++I) {
763      errs() << I->first << "\n";
764      I->second->dump();
765  }
766  errs() << "}\n";
767}
768
769/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
770/// we're analyzing is fully available in the specified block.  As we go, keep
771/// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
772/// map is actually a tri-state map with the following values:
773///   0) we know the block *is not* fully available.
774///   1) we know the block *is* fully available.
775///   2) we do not know whether the block is fully available or not, but we are
776///      currently speculating that it will be.
777///   3) we are speculating for this block and have used that to speculate for
778///      other blocks.
779static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
780                            DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
781  // Optimistically assume that the block is fully available and check to see
782  // if we already know about this block in one lookup.
783  std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
784    FullyAvailableBlocks.insert(std::make_pair(BB, 2));
785
786  // If the entry already existed for this block, return the precomputed value.
787  if (!IV.second) {
788    // If this is a speculative "available" value, mark it as being used for
789    // speculation of other blocks.
790    if (IV.first->second == 2)
791      IV.first->second = 3;
792    return IV.first->second != 0;
793  }
794
795  // Otherwise, see if it is fully available in all predecessors.
796  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
797
798  // If this block has no predecessors, it isn't live-in here.
799  if (PI == PE)
800    goto SpeculationFailure;
801
802  for (; PI != PE; ++PI)
803    // If the value isn't fully available in one of our predecessors, then it
804    // isn't fully available in this block either.  Undo our previous
805    // optimistic assumption and bail out.
806    if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
807      goto SpeculationFailure;
808
809  return true;
810
811// SpeculationFailure - If we get here, we found out that this is not, after
812// all, a fully-available block.  We have a problem if we speculated on this and
813// used the speculation to mark other blocks as available.
814SpeculationFailure:
815  char &BBVal = FullyAvailableBlocks[BB];
816
817  // If we didn't speculate on this, just return with it set to false.
818  if (BBVal == 2) {
819    BBVal = 0;
820    return false;
821  }
822
823  // If we did speculate on this value, we could have blocks set to 1 that are
824  // incorrect.  Walk the (transitive) successors of this block and mark them as
825  // 0 if set to one.
826  SmallVector<BasicBlock*, 32> BBWorklist;
827  BBWorklist.push_back(BB);
828
829  do {
830    BasicBlock *Entry = BBWorklist.pop_back_val();
831    // Note that this sets blocks to 0 (unavailable) if they happen to not
832    // already be in FullyAvailableBlocks.  This is safe.
833    char &EntryVal = FullyAvailableBlocks[Entry];
834    if (EntryVal == 0) continue;  // Already unavailable.
835
836    // Mark as unavailable.
837    EntryVal = 0;
838
839    for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
840      BBWorklist.push_back(*I);
841  } while (!BBWorklist.empty());
842
843  return false;
844}
845
846
847/// CanCoerceMustAliasedValueToLoad - Return true if
848/// CoerceAvailableValueToLoadType will succeed.
849static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
850                                            const Type *LoadTy,
851                                            const TargetData &TD) {
852  // If the loaded or stored value is an first class array or struct, don't try
853  // to transform them.  We need to be able to bitcast to integer.
854  if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
855      StoredVal->getType()->isStructTy() ||
856      StoredVal->getType()->isArrayTy())
857    return false;
858
859  // The store has to be at least as big as the load.
860  if (TD.getTypeSizeInBits(StoredVal->getType()) <
861        TD.getTypeSizeInBits(LoadTy))
862    return false;
863
864  return true;
865}
866
867
868/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
869/// then a load from a must-aliased pointer of a different type, try to coerce
870/// the stored value.  LoadedTy is the type of the load we want to replace and
871/// InsertPt is the place to insert new instructions.
872///
873/// If we can't do it, return null.
874static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
875                                             const Type *LoadedTy,
876                                             Instruction *InsertPt,
877                                             const TargetData &TD) {
878  if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
879    return 0;
880
881  const Type *StoredValTy = StoredVal->getType();
882
883  uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
884  uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
885
886  // If the store and reload are the same size, we can always reuse it.
887  if (StoreSize == LoadSize) {
888    if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
889      // Pointer to Pointer -> use bitcast.
890      return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
891    }
892
893    // Convert source pointers to integers, which can be bitcast.
894    if (StoredValTy->isPointerTy()) {
895      StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
896      StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
897    }
898
899    const Type *TypeToCastTo = LoadedTy;
900    if (TypeToCastTo->isPointerTy())
901      TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
902
903    if (StoredValTy != TypeToCastTo)
904      StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
905
906    // Cast to pointer if the load needs a pointer type.
907    if (LoadedTy->isPointerTy())
908      StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
909
910    return StoredVal;
911  }
912
913  // If the loaded value is smaller than the available value, then we can
914  // extract out a piece from it.  If the available value is too small, then we
915  // can't do anything.
916  assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
917
918  // Convert source pointers to integers, which can be manipulated.
919  if (StoredValTy->isPointerTy()) {
920    StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
921    StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
922  }
923
924  // Convert vectors and fp to integer, which can be manipulated.
925  if (!StoredValTy->isIntegerTy()) {
926    StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
927    StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
928  }
929
930  // If this is a big-endian system, we need to shift the value down to the low
931  // bits so that a truncate will work.
932  if (TD.isBigEndian()) {
933    Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
934    StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
935  }
936
937  // Truncate the integer to the right size now.
938  const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
939  StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
940
941  if (LoadedTy == NewIntTy)
942    return StoredVal;
943
944  // If the result is a pointer, inttoptr.
945  if (LoadedTy->isPointerTy())
946    return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
947
948  // Otherwise, bitcast.
949  return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
950}
951
952/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
953/// memdep query of a load that ends up being a clobbering memory write (store,
954/// memset, memcpy, memmove).  This means that the write *may* provide bits used
955/// by the load but we can't be sure because the pointers don't mustalias.
956///
957/// Check this case to see if there is anything more we can do before we give
958/// up.  This returns -1 if we have to give up, or a byte number in the stored
959/// value of the piece that feeds the load.
960static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
961                                          Value *WritePtr,
962                                          uint64_t WriteSizeInBits,
963                                          const TargetData &TD) {
964  // If the loaded or stored value is an first class array or struct, don't try
965  // to transform them.  We need to be able to bitcast to integer.
966  if (LoadTy->isStructTy() || LoadTy->isArrayTy())
967    return -1;
968
969  int64_t StoreOffset = 0, LoadOffset = 0;
970  Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD);
971  Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
972  if (StoreBase != LoadBase)
973    return -1;
974
975  // If the load and store are to the exact same address, they should have been
976  // a must alias.  AA must have gotten confused.
977  // FIXME: Study to see if/when this happens.  One case is forwarding a memset
978  // to a load from the base of the memset.
979#if 0
980  if (LoadOffset == StoreOffset) {
981    dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
982    << "Base       = " << *StoreBase << "\n"
983    << "Store Ptr  = " << *WritePtr << "\n"
984    << "Store Offs = " << StoreOffset << "\n"
985    << "Load Ptr   = " << *LoadPtr << "\n";
986    abort();
987  }
988#endif
989
990  // If the load and store don't overlap at all, the store doesn't provide
991  // anything to the load.  In this case, they really don't alias at all, AA
992  // must have gotten confused.
993  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
994
995  if ((WriteSizeInBits & 7) | (LoadSize & 7))
996    return -1;
997  uint64_t StoreSize = WriteSizeInBits >> 3;  // Convert to bytes.
998  LoadSize >>= 3;
999
1000
1001  bool isAAFailure = false;
1002  if (StoreOffset < LoadOffset)
1003    isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1004  else
1005    isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1006
1007  if (isAAFailure) {
1008#if 0
1009    dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1010    << "Base       = " << *StoreBase << "\n"
1011    << "Store Ptr  = " << *WritePtr << "\n"
1012    << "Store Offs = " << StoreOffset << "\n"
1013    << "Load Ptr   = " << *LoadPtr << "\n";
1014    abort();
1015#endif
1016    return -1;
1017  }
1018
1019  // If the Load isn't completely contained within the stored bits, we don't
1020  // have all the bits to feed it.  We could do something crazy in the future
1021  // (issue a smaller load then merge the bits in) but this seems unlikely to be
1022  // valuable.
1023  if (StoreOffset > LoadOffset ||
1024      StoreOffset+StoreSize < LoadOffset+LoadSize)
1025    return -1;
1026
1027  // Okay, we can do this transformation.  Return the number of bytes into the
1028  // store that the load is.
1029  return LoadOffset-StoreOffset;
1030}
1031
1032/// AnalyzeLoadFromClobberingStore - This function is called when we have a
1033/// memdep query of a load that ends up being a clobbering store.
1034static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1035                                          StoreInst *DepSI,
1036                                          const TargetData &TD) {
1037  // Cannot handle reading from store of first-class aggregate yet.
1038  if (DepSI->getValueOperand()->getType()->isStructTy() ||
1039      DepSI->getValueOperand()->getType()->isArrayTy())
1040    return -1;
1041
1042  Value *StorePtr = DepSI->getPointerOperand();
1043  uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());
1044  return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1045                                        StorePtr, StoreSize, TD);
1046}
1047
1048static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1049                                            MemIntrinsic *MI,
1050                                            const TargetData &TD) {
1051  // If the mem operation is a non-constant size, we can't handle it.
1052  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1053  if (SizeCst == 0) return -1;
1054  uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1055
1056  // If this is memset, we just need to see if the offset is valid in the size
1057  // of the memset..
1058  if (MI->getIntrinsicID() == Intrinsic::memset)
1059    return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1060                                          MemSizeInBits, TD);
1061
1062  // If we have a memcpy/memmove, the only case we can handle is if this is a
1063  // copy from constant memory.  In that case, we can read directly from the
1064  // constant memory.
1065  MemTransferInst *MTI = cast<MemTransferInst>(MI);
1066
1067  Constant *Src = dyn_cast<Constant>(MTI->getSource());
1068  if (Src == 0) return -1;
1069
1070  GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src));
1071  if (GV == 0 || !GV->isConstant()) return -1;
1072
1073  // See if the access is within the bounds of the transfer.
1074  int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1075                                              MI->getDest(), MemSizeInBits, TD);
1076  if (Offset == -1)
1077    return Offset;
1078
1079  // Otherwise, see if we can constant fold a load from the constant with the
1080  // offset applied as appropriate.
1081  Src = ConstantExpr::getBitCast(Src,
1082                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1083  Constant *OffsetCst =
1084    ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1085  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1086  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1087  if (ConstantFoldLoadFromConstPtr(Src, &TD))
1088    return Offset;
1089  return -1;
1090}
1091
1092
1093/// GetStoreValueForLoad - This function is called when we have a
1094/// memdep query of a load that ends up being a clobbering store.  This means
1095/// that the store *may* provide bits used by the load but we can't be sure
1096/// because the pointers don't mustalias.  Check this case to see if there is
1097/// anything more we can do before we give up.
1098static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1099                                   const Type *LoadTy,
1100                                   Instruction *InsertPt, const TargetData &TD){
1101  LLVMContext &Ctx = SrcVal->getType()->getContext();
1102
1103  uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
1104  uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
1105
1106  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1107
1108  // Compute which bits of the stored value are being used by the load.  Convert
1109  // to an integer type to start with.
1110  if (SrcVal->getType()->isPointerTy())
1111    SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1112  if (!SrcVal->getType()->isIntegerTy())
1113    SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1114                                   "tmp");
1115
1116  // Shift the bits to the least significant depending on endianness.
1117  unsigned ShiftAmt;
1118  if (TD.isLittleEndian())
1119    ShiftAmt = Offset*8;
1120  else
1121    ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1122
1123  if (ShiftAmt)
1124    SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1125
1126  if (LoadSize != StoreSize)
1127    SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1128                                 "tmp");
1129
1130  return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1131}
1132
1133/// GetMemInstValueForLoad - This function is called when we have a
1134/// memdep query of a load that ends up being a clobbering mem intrinsic.
1135static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1136                                     const Type *LoadTy, Instruction *InsertPt,
1137                                     const TargetData &TD){
1138  LLVMContext &Ctx = LoadTy->getContext();
1139  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1140
1141  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1142
1143  // We know that this method is only called when the mem transfer fully
1144  // provides the bits for the load.
1145  if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1146    // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1147    // independently of what the offset is.
1148    Value *Val = MSI->getValue();
1149    if (LoadSize != 1)
1150      Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1151
1152    Value *OneElt = Val;
1153
1154    // Splat the value out to the right number of bits.
1155    for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1156      // If we can double the number of bytes set, do it.
1157      if (NumBytesSet*2 <= LoadSize) {
1158        Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1159        Val = Builder.CreateOr(Val, ShVal);
1160        NumBytesSet <<= 1;
1161        continue;
1162      }
1163
1164      // Otherwise insert one byte at a time.
1165      Value *ShVal = Builder.CreateShl(Val, 1*8);
1166      Val = Builder.CreateOr(OneElt, ShVal);
1167      ++NumBytesSet;
1168    }
1169
1170    return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1171  }
1172
1173  // Otherwise, this is a memcpy/memmove from a constant global.
1174  MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1175  Constant *Src = cast<Constant>(MTI->getSource());
1176
1177  // Otherwise, see if we can constant fold a load from the constant with the
1178  // offset applied as appropriate.
1179  Src = ConstantExpr::getBitCast(Src,
1180                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1181  Constant *OffsetCst =
1182  ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1183  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1184  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1185  return ConstantFoldLoadFromConstPtr(Src, &TD);
1186}
1187
1188namespace {
1189
1190struct AvailableValueInBlock {
1191  /// BB - The basic block in question.
1192  BasicBlock *BB;
1193  enum ValType {
1194    SimpleVal,  // A simple offsetted value that is accessed.
1195    MemIntrin   // A memory intrinsic which is loaded from.
1196  };
1197
1198  /// V - The value that is live out of the block.
1199  PointerIntPair<Value *, 1, ValType> Val;
1200
1201  /// Offset - The byte offset in Val that is interesting for the load query.
1202  unsigned Offset;
1203
1204  static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1205                                   unsigned Offset = 0) {
1206    AvailableValueInBlock Res;
1207    Res.BB = BB;
1208    Res.Val.setPointer(V);
1209    Res.Val.setInt(SimpleVal);
1210    Res.Offset = Offset;
1211    return Res;
1212  }
1213
1214  static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1215                                     unsigned Offset = 0) {
1216    AvailableValueInBlock Res;
1217    Res.BB = BB;
1218    Res.Val.setPointer(MI);
1219    Res.Val.setInt(MemIntrin);
1220    Res.Offset = Offset;
1221    return Res;
1222  }
1223
1224  bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1225  Value *getSimpleValue() const {
1226    assert(isSimpleValue() && "Wrong accessor");
1227    return Val.getPointer();
1228  }
1229
1230  MemIntrinsic *getMemIntrinValue() const {
1231    assert(!isSimpleValue() && "Wrong accessor");
1232    return cast<MemIntrinsic>(Val.getPointer());
1233  }
1234
1235  /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1236  /// defined here to the specified type.  This handles various coercion cases.
1237  Value *MaterializeAdjustedValue(const Type *LoadTy,
1238                                  const TargetData *TD) const {
1239    Value *Res;
1240    if (isSimpleValue()) {
1241      Res = getSimpleValue();
1242      if (Res->getType() != LoadTy) {
1243        assert(TD && "Need target data to handle type mismatch case");
1244        Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1245                                   *TD);
1246
1247        DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << "  "
1248                     << *getSimpleValue() << '\n'
1249                     << *Res << '\n' << "\n\n\n");
1250      }
1251    } else {
1252      Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1253                                   LoadTy, BB->getTerminator(), *TD);
1254      DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1255                   << "  " << *getMemIntrinValue() << '\n'
1256                   << *Res << '\n' << "\n\n\n");
1257    }
1258    return Res;
1259  }
1260};
1261
1262}
1263
1264/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1265/// construct SSA form, allowing us to eliminate LI.  This returns the value
1266/// that should be used at LI's definition site.
1267static Value *ConstructSSAForLoadSet(LoadInst *LI,
1268                         SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1269                                     const TargetData *TD,
1270                                     const DominatorTree &DT,
1271                                     AliasAnalysis *AA) {
1272  // Check for the fully redundant, dominating load case.  In this case, we can
1273  // just use the dominating value directly.
1274  if (ValuesPerBlock.size() == 1 &&
1275      DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1276    return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1277
1278  // Otherwise, we have to construct SSA form.
1279  SmallVector<PHINode*, 8> NewPHIs;
1280  SSAUpdater SSAUpdate(&NewPHIs);
1281  SSAUpdate.Initialize(LI->getType(), LI->getName());
1282
1283  const Type *LoadTy = LI->getType();
1284
1285  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1286    const AvailableValueInBlock &AV = ValuesPerBlock[i];
1287    BasicBlock *BB = AV.BB;
1288
1289    if (SSAUpdate.HasValueForBlock(BB))
1290      continue;
1291
1292    SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1293  }
1294
1295  // Perform PHI construction.
1296  Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1297
1298  // If new PHI nodes were created, notify alias analysis.
1299  if (V->getType()->isPointerTy())
1300    for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1301      AA->copyValue(LI, NewPHIs[i]);
1302
1303  return V;
1304}
1305
1306static bool isLifetimeStart(const Instruction *Inst) {
1307  if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1308    return II->getIntrinsicID() == Intrinsic::lifetime_start;
1309  return false;
1310}
1311
1312/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1313/// non-local by performing PHI construction.
1314bool GVN::processNonLocalLoad(LoadInst *LI,
1315                              SmallVectorImpl<Instruction*> &toErase) {
1316  // Find the non-local dependencies of the load.
1317  SmallVector<NonLocalDepResult, 64> Deps;
1318  AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI);
1319  MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps);
1320  //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1321  //             << Deps.size() << *LI << '\n');
1322
1323  // If we had to process more than one hundred blocks to find the
1324  // dependencies, this load isn't worth worrying about.  Optimizing
1325  // it will be too expensive.
1326  if (Deps.size() > 100)
1327    return false;
1328
1329  // If we had a phi translation failure, we'll have a single entry which is a
1330  // clobber in the current block.  Reject this early.
1331  if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1332    DEBUG(
1333      dbgs() << "GVN: non-local load ";
1334      WriteAsOperand(dbgs(), LI);
1335      dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1336    );
1337    return false;
1338  }
1339
1340  // Filter out useless results (non-locals, etc).  Keep track of the blocks
1341  // where we have a value available in repl, also keep track of whether we see
1342  // dependencies that produce an unknown value for the load (such as a call
1343  // that could potentially clobber the load).
1344  SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1345  SmallVector<BasicBlock*, 16> UnavailableBlocks;
1346
1347  for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1348    BasicBlock *DepBB = Deps[i].getBB();
1349    MemDepResult DepInfo = Deps[i].getResult();
1350
1351    if (DepInfo.isClobber()) {
1352      // The address being loaded in this non-local block may not be the same as
1353      // the pointer operand of the load if PHI translation occurs.  Make sure
1354      // to consider the right address.
1355      Value *Address = Deps[i].getAddress();
1356
1357      // If the dependence is to a store that writes to a superset of the bits
1358      // read by the load, we can extract the bits we need for the load from the
1359      // stored value.
1360      if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1361        if (TD && Address) {
1362          int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1363                                                      DepSI, *TD);
1364          if (Offset != -1) {
1365            ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1366                                                       DepSI->getValueOperand(),
1367                                                                Offset));
1368            continue;
1369          }
1370        }
1371      }
1372
1373      // If the clobbering value is a memset/memcpy/memmove, see if we can
1374      // forward a value on from it.
1375      if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1376        if (TD && Address) {
1377          int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1378                                                        DepMI, *TD);
1379          if (Offset != -1) {
1380            ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1381                                                                  Offset));
1382            continue;
1383          }
1384        }
1385      }
1386
1387      UnavailableBlocks.push_back(DepBB);
1388      continue;
1389    }
1390
1391    Instruction *DepInst = DepInfo.getInst();
1392
1393    // Loading the allocation -> undef.
1394    if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1395        // Loading immediately after lifetime begin -> undef.
1396        isLifetimeStart(DepInst)) {
1397      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1398                                             UndefValue::get(LI->getType())));
1399      continue;
1400    }
1401
1402    if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1403      // Reject loads and stores that are to the same address but are of
1404      // different types if we have to.
1405      if (S->getValueOperand()->getType() != LI->getType()) {
1406        // If the stored value is larger or equal to the loaded value, we can
1407        // reuse it.
1408        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
1409                                                        LI->getType(), *TD)) {
1410          UnavailableBlocks.push_back(DepBB);
1411          continue;
1412        }
1413      }
1414
1415      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1416                                                         S->getValueOperand()));
1417      continue;
1418    }
1419
1420    if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1421      // If the types mismatch and we can't handle it, reject reuse of the load.
1422      if (LD->getType() != LI->getType()) {
1423        // If the stored value is larger or equal to the loaded value, we can
1424        // reuse it.
1425        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1426          UnavailableBlocks.push_back(DepBB);
1427          continue;
1428        }
1429      }
1430      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1431      continue;
1432    }
1433
1434    UnavailableBlocks.push_back(DepBB);
1435    continue;
1436  }
1437
1438  // If we have no predecessors that produce a known value for this load, exit
1439  // early.
1440  if (ValuesPerBlock.empty()) return false;
1441
1442  // If all of the instructions we depend on produce a known value for this
1443  // load, then it is fully redundant and we can use PHI insertion to compute
1444  // its value.  Insert PHIs and remove the fully redundant value now.
1445  if (UnavailableBlocks.empty()) {
1446    DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1447
1448    // Perform PHI construction.
1449    Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1450                                      VN.getAliasAnalysis());
1451    LI->replaceAllUsesWith(V);
1452
1453    if (isa<PHINode>(V))
1454      V->takeName(LI);
1455    if (V->getType()->isPointerTy())
1456      MD->invalidateCachedPointerInfo(V);
1457    VN.erase(LI);
1458    toErase.push_back(LI);
1459    ++NumGVNLoad;
1460    return true;
1461  }
1462
1463  if (!EnablePRE || !EnableLoadPRE)
1464    return false;
1465
1466  // Okay, we have *some* definitions of the value.  This means that the value
1467  // is available in some of our (transitive) predecessors.  Lets think about
1468  // doing PRE of this load.  This will involve inserting a new load into the
1469  // predecessor when it's not available.  We could do this in general, but
1470  // prefer to not increase code size.  As such, we only do this when we know
1471  // that we only have to insert *one* load (which means we're basically moving
1472  // the load, not inserting a new one).
1473
1474  SmallPtrSet<BasicBlock *, 4> Blockers;
1475  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1476    Blockers.insert(UnavailableBlocks[i]);
1477
1478  // Lets find first basic block with more than one predecessor.  Walk backwards
1479  // through predecessors if needed.
1480  BasicBlock *LoadBB = LI->getParent();
1481  BasicBlock *TmpBB = LoadBB;
1482
1483  bool isSinglePred = false;
1484  bool allSingleSucc = true;
1485  while (TmpBB->getSinglePredecessor()) {
1486    isSinglePred = true;
1487    TmpBB = TmpBB->getSinglePredecessor();
1488    if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1489      return false;
1490    if (Blockers.count(TmpBB))
1491      return false;
1492
1493    // If any of these blocks has more than one successor (i.e. if the edge we
1494    // just traversed was critical), then there are other paths through this
1495    // block along which the load may not be anticipated.  Hoisting the load
1496    // above this block would be adding the load to execution paths along
1497    // which it was not previously executed.
1498    if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1499      return false;
1500  }
1501
1502  assert(TmpBB);
1503  LoadBB = TmpBB;
1504
1505  // FIXME: It is extremely unclear what this loop is doing, other than
1506  // artificially restricting loadpre.
1507  if (isSinglePred) {
1508    bool isHot = false;
1509    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1510      const AvailableValueInBlock &AV = ValuesPerBlock[i];
1511      if (AV.isSimpleValue())
1512        // "Hot" Instruction is in some loop (because it dominates its dep.
1513        // instruction).
1514        if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1515          if (DT->dominates(LI, I)) {
1516            isHot = true;
1517            break;
1518          }
1519    }
1520
1521    // We are interested only in "hot" instructions. We don't want to do any
1522    // mis-optimizations here.
1523    if (!isHot)
1524      return false;
1525  }
1526
1527  // Check to see how many predecessors have the loaded value fully
1528  // available.
1529  DenseMap<BasicBlock*, Value*> PredLoads;
1530  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1531  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1532    FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1533  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1534    FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1535
1536  SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
1537  for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1538       PI != E; ++PI) {
1539    BasicBlock *Pred = *PI;
1540    if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1541      continue;
1542    }
1543    PredLoads[Pred] = 0;
1544
1545    if (Pred->getTerminator()->getNumSuccessors() != 1) {
1546      if (isa<IndirectBrInst>(Pred->getTerminator())) {
1547        DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1548              << Pred->getName() << "': " << *LI << '\n');
1549        return false;
1550      }
1551      unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1552      NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1553    }
1554  }
1555  if (!NeedToSplit.empty()) {
1556    toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
1557    return false;
1558  }
1559
1560  // Decide whether PRE is profitable for this load.
1561  unsigned NumUnavailablePreds = PredLoads.size();
1562  assert(NumUnavailablePreds != 0 &&
1563         "Fully available value should be eliminated above!");
1564
1565  // If this load is unavailable in multiple predecessors, reject it.
1566  // FIXME: If we could restructure the CFG, we could make a common pred with
1567  // all the preds that don't have an available LI and insert a new load into
1568  // that one block.
1569  if (NumUnavailablePreds != 1)
1570      return false;
1571
1572  // Check if the load can safely be moved to all the unavailable predecessors.
1573  bool CanDoPRE = true;
1574  SmallVector<Instruction*, 8> NewInsts;
1575  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1576         E = PredLoads.end(); I != E; ++I) {
1577    BasicBlock *UnavailablePred = I->first;
1578
1579    // Do PHI translation to get its value in the predecessor if necessary.  The
1580    // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1581
1582    // If all preds have a single successor, then we know it is safe to insert
1583    // the load on the pred (?!?), so we can insert code to materialize the
1584    // pointer if it is not available.
1585    PHITransAddr Address(LI->getPointerOperand(), TD);
1586    Value *LoadPtr = 0;
1587    if (allSingleSucc) {
1588      LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1589                                                  *DT, NewInsts);
1590    } else {
1591      Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1592      LoadPtr = Address.getAddr();
1593    }
1594
1595    // If we couldn't find or insert a computation of this phi translated value,
1596    // we fail PRE.
1597    if (LoadPtr == 0) {
1598      DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1599            << *LI->getPointerOperand() << "\n");
1600      CanDoPRE = false;
1601      break;
1602    }
1603
1604    // Make sure it is valid to move this load here.  We have to watch out for:
1605    //  @1 = getelementptr (i8* p, ...
1606    //  test p and branch if == 0
1607    //  load @1
1608    // It is valid to have the getelementptr before the test, even if p can be 0,
1609    // as getelementptr only does address arithmetic.
1610    // If we are not pushing the value through any multiple-successor blocks
1611    // we do not have this case.  Otherwise, check that the load is safe to
1612    // put anywhere; this can be improved, but should be conservatively safe.
1613    if (!allSingleSucc &&
1614        // FIXME: REEVALUTE THIS.
1615        !isSafeToLoadUnconditionally(LoadPtr,
1616                                     UnavailablePred->getTerminator(),
1617                                     LI->getAlignment(), TD)) {
1618      CanDoPRE = false;
1619      break;
1620    }
1621
1622    I->second = LoadPtr;
1623  }
1624
1625  if (!CanDoPRE) {
1626    while (!NewInsts.empty())
1627      NewInsts.pop_back_val()->eraseFromParent();
1628    return false;
1629  }
1630
1631  // Okay, we can eliminate this load by inserting a reload in the predecessor
1632  // and using PHI construction to get the value in the other predecessors, do
1633  // it.
1634  DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1635  DEBUG(if (!NewInsts.empty())
1636          dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1637                 << *NewInsts.back() << '\n');
1638
1639  // Assign value numbers to the new instructions.
1640  for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1641    // FIXME: We really _ought_ to insert these value numbers into their
1642    // parent's availability map.  However, in doing so, we risk getting into
1643    // ordering issues.  If a block hasn't been processed yet, we would be
1644    // marking a value as AVAIL-IN, which isn't what we intend.
1645    VN.lookup_or_add(NewInsts[i]);
1646  }
1647
1648  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1649         E = PredLoads.end(); I != E; ++I) {
1650    BasicBlock *UnavailablePred = I->first;
1651    Value *LoadPtr = I->second;
1652
1653    Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1654                                        LI->getAlignment(),
1655                                        UnavailablePred->getTerminator());
1656
1657    // Transfer the old load's TBAA tag to the new load.
1658    if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa))
1659      NewLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
1660
1661    // Add the newly created load.
1662    ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1663                                                        NewLoad));
1664    MD->invalidateCachedPointerInfo(LoadPtr);
1665    DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1666  }
1667
1668  // Perform PHI construction.
1669  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1670                                    VN.getAliasAnalysis());
1671  LI->replaceAllUsesWith(V);
1672  if (isa<PHINode>(V))
1673    V->takeName(LI);
1674  if (V->getType()->isPointerTy())
1675    MD->invalidateCachedPointerInfo(V);
1676  VN.erase(LI);
1677  toErase.push_back(LI);
1678  ++NumPRELoad;
1679  return true;
1680}
1681
1682/// processLoad - Attempt to eliminate a load, first by eliminating it
1683/// locally, and then attempting non-local elimination if that fails.
1684bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1685  if (!MD)
1686    return false;
1687
1688  if (L->isVolatile())
1689    return false;
1690
1691  // ... to a pointer that has been loaded from before...
1692  MemDepResult Dep = MD->getDependency(L);
1693
1694  // If the value isn't available, don't do anything!
1695  if (Dep.isClobber()) {
1696    // Check to see if we have something like this:
1697    //   store i32 123, i32* %P
1698    //   %A = bitcast i32* %P to i8*
1699    //   %B = gep i8* %A, i32 1
1700    //   %C = load i8* %B
1701    //
1702    // We could do that by recognizing if the clobber instructions are obviously
1703    // a common base + constant offset, and if the previous store (or memset)
1704    // completely covers this load.  This sort of thing can happen in bitfield
1705    // access code.
1706    Value *AvailVal = 0;
1707    if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1708      if (TD) {
1709        int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1710                                                    L->getPointerOperand(),
1711                                                    DepSI, *TD);
1712        if (Offset != -1)
1713          AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
1714                                          L->getType(), L, *TD);
1715      }
1716
1717    // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1718    // a value on from it.
1719    if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1720      if (TD) {
1721        int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1722                                                      L->getPointerOperand(),
1723                                                      DepMI, *TD);
1724        if (Offset != -1)
1725          AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1726      }
1727    }
1728
1729    if (AvailVal) {
1730      DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1731            << *AvailVal << '\n' << *L << "\n\n\n");
1732
1733      // Replace the load!
1734      L->replaceAllUsesWith(AvailVal);
1735      if (AvailVal->getType()->isPointerTy())
1736        MD->invalidateCachedPointerInfo(AvailVal);
1737      VN.erase(L);
1738      toErase.push_back(L);
1739      ++NumGVNLoad;
1740      return true;
1741    }
1742
1743    DEBUG(
1744      // fast print dep, using operator<< on instruction would be too slow
1745      dbgs() << "GVN: load ";
1746      WriteAsOperand(dbgs(), L);
1747      Instruction *I = Dep.getInst();
1748      dbgs() << " is clobbered by " << *I << '\n';
1749    );
1750    return false;
1751  }
1752
1753  // If it is defined in another block, try harder.
1754  if (Dep.isNonLocal())
1755    return processNonLocalLoad(L, toErase);
1756
1757  Instruction *DepInst = Dep.getInst();
1758  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1759    Value *StoredVal = DepSI->getValueOperand();
1760
1761    // The store and load are to a must-aliased pointer, but they may not
1762    // actually have the same type.  See if we know how to reuse the stored
1763    // value (depending on its type).
1764    if (StoredVal->getType() != L->getType()) {
1765      if (TD) {
1766        StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1767                                                   L, *TD);
1768        if (StoredVal == 0)
1769          return false;
1770
1771        DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1772                     << '\n' << *L << "\n\n\n");
1773      }
1774      else
1775        return false;
1776    }
1777
1778    // Remove it!
1779    L->replaceAllUsesWith(StoredVal);
1780    if (StoredVal->getType()->isPointerTy())
1781      MD->invalidateCachedPointerInfo(StoredVal);
1782    VN.erase(L);
1783    toErase.push_back(L);
1784    ++NumGVNLoad;
1785    return true;
1786  }
1787
1788  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1789    Value *AvailableVal = DepLI;
1790
1791    // The loads are of a must-aliased pointer, but they may not actually have
1792    // the same type.  See if we know how to reuse the previously loaded value
1793    // (depending on its type).
1794    if (DepLI->getType() != L->getType()) {
1795      if (TD) {
1796        AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1797        if (AvailableVal == 0)
1798          return false;
1799
1800        DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1801                     << "\n" << *L << "\n\n\n");
1802      }
1803      else
1804        return false;
1805    }
1806
1807    // Remove it!
1808    L->replaceAllUsesWith(AvailableVal);
1809    if (DepLI->getType()->isPointerTy())
1810      MD->invalidateCachedPointerInfo(DepLI);
1811    VN.erase(L);
1812    toErase.push_back(L);
1813    ++NumGVNLoad;
1814    return true;
1815  }
1816
1817  // If this load really doesn't depend on anything, then we must be loading an
1818  // undef value.  This can happen when loading for a fresh allocation with no
1819  // intervening stores, for example.
1820  if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1821    L->replaceAllUsesWith(UndefValue::get(L->getType()));
1822    VN.erase(L);
1823    toErase.push_back(L);
1824    ++NumGVNLoad;
1825    return true;
1826  }
1827
1828  // If this load occurs either right after a lifetime begin,
1829  // then the loaded value is undefined.
1830  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1831    if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1832      L->replaceAllUsesWith(UndefValue::get(L->getType()));
1833      VN.erase(L);
1834      toErase.push_back(L);
1835      ++NumGVNLoad;
1836      return true;
1837    }
1838  }
1839
1840  return false;
1841}
1842
1843// lookupNumber - In order to find a leader for a given value number at a
1844// specific basic block, we first obtain the list of all Values for that number,
1845// and then scan the list to find one whose block dominates the block in
1846// question.  This is fast because dominator tree queries consist of only
1847// a few comparisons of DFS numbers.
1848Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1849  std::pair<Value*, void*> Vals = NumberTable[num];
1850  if (!Vals.first) return 0;
1851  Instruction *Inst = dyn_cast<Instruction>(Vals.first);
1852  if (!Inst) return Vals.first;
1853  BasicBlock *Parent = Inst->getParent();
1854  if (DT->dominates(Parent, BB))
1855    return Inst;
1856
1857  std::pair<Value*, void*>* Next =
1858    static_cast<std::pair<Value*, void*>*>(Vals.second);
1859  while (Next) {
1860    Instruction *CurrInst = dyn_cast<Instruction>(Next->first);
1861    if (!CurrInst) return Next->first;
1862
1863    BasicBlock *Parent = CurrInst->getParent();
1864    if (DT->dominates(Parent, BB))
1865      return CurrInst;
1866
1867    Next = static_cast<std::pair<Value*, void*>*>(Next->second);
1868  }
1869
1870  return 0;
1871}
1872
1873
1874/// processInstruction - When calculating availability, handle an instruction
1875/// by inserting it into the appropriate sets
1876bool GVN::processInstruction(Instruction *I,
1877                             SmallVectorImpl<Instruction*> &toErase) {
1878  // Ignore dbg info intrinsics.
1879  if (isa<DbgInfoIntrinsic>(I))
1880    return false;
1881
1882  // If the instruction can be easily simplified then do so now in preference
1883  // to value numbering it.  Value numbering often exposes redundancies, for
1884  // example if it determines that %y is equal to %x then the instruction
1885  // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
1886  if (Value *V = SimplifyInstruction(I, TD, DT)) {
1887    I->replaceAllUsesWith(V);
1888    if (MD && V->getType()->isPointerTy())
1889      MD->invalidateCachedPointerInfo(V);
1890    VN.erase(I);
1891    toErase.push_back(I);
1892    return true;
1893  }
1894
1895  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1896    bool Changed = processLoad(LI, toErase);
1897
1898    if (!Changed) {
1899      unsigned Num = VN.lookup_or_add(LI);
1900      insert_table(Num, LI);
1901    }
1902
1903    return Changed;
1904  }
1905
1906  uint32_t NextNum = VN.getNextUnusedValueNumber();
1907  unsigned Num = VN.lookup_or_add(I);
1908
1909  // Allocations are always uniquely numbered, so we can save time and memory
1910  // by fast failing them.
1911  if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) {
1912    insert_table(Num, I);
1913    return false;
1914  }
1915
1916  // If the number we were assigned was a brand new VN, then we don't
1917  // need to do a lookup to see if the number already exists
1918  // somewhere in the domtree: it can't!
1919  if (Num == NextNum) {
1920    insert_table(Num, I);
1921    return false;
1922  }
1923
1924  // Perform fast-path value-number based elimination of values inherited from
1925  // dominators.
1926  Value *repl = lookupNumber(I->getParent(), Num);
1927  if (repl == 0) {
1928    // Failure, just remember this instance for future use.
1929    insert_table(Num, I);
1930    return false;
1931  }
1932
1933  // Remove it!
1934  VN.erase(I);
1935  I->replaceAllUsesWith(repl);
1936  if (MD && repl->getType()->isPointerTy())
1937    MD->invalidateCachedPointerInfo(repl);
1938  toErase.push_back(I);
1939  return true;
1940}
1941
1942/// runOnFunction - This is the main transformation entry point for a function.
1943bool GVN::runOnFunction(Function& F) {
1944  if (!NoLoads)
1945    MD = &getAnalysis<MemoryDependenceAnalysis>();
1946  DT = &getAnalysis<DominatorTree>();
1947  TD = getAnalysisIfAvailable<TargetData>();
1948  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1949  VN.setMemDep(MD);
1950  VN.setDomTree(DT);
1951
1952  bool Changed = false;
1953  bool ShouldContinue = true;
1954
1955  // Merge unconditional branches, allowing PRE to catch more
1956  // optimization opportunities.
1957  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1958    BasicBlock *BB = FI;
1959    ++FI;
1960    bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1961    if (removedBlock) ++NumGVNBlocks;
1962
1963    Changed |= removedBlock;
1964  }
1965
1966  unsigned Iteration = 0;
1967
1968  while (ShouldContinue) {
1969    DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
1970    ShouldContinue = iterateOnFunction(F);
1971    if (splitCriticalEdges())
1972      ShouldContinue = true;
1973    Changed |= ShouldContinue;
1974    ++Iteration;
1975  }
1976
1977  if (EnablePRE) {
1978    bool PREChanged = true;
1979    while (PREChanged) {
1980      PREChanged = performPRE(F);
1981      Changed |= PREChanged;
1982    }
1983  }
1984  // FIXME: Should perform GVN again after PRE does something.  PRE can move
1985  // computations into blocks where they become fully redundant.  Note that
1986  // we can't do this until PRE's critical edge splitting updates memdep.
1987  // Actually, when this happens, we should just fully integrate PRE into GVN.
1988
1989  cleanupGlobalSets();
1990
1991  return Changed;
1992}
1993
1994
1995bool GVN::processBlock(BasicBlock *BB) {
1996  // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
1997  // incrementing BI before processing an instruction).
1998  SmallVector<Instruction*, 8> toErase;
1999  bool ChangedFunction = false;
2000
2001  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2002       BI != BE;) {
2003    ChangedFunction |= processInstruction(BI, toErase);
2004    if (toErase.empty()) {
2005      ++BI;
2006      continue;
2007    }
2008
2009    // If we need some instructions deleted, do it now.
2010    NumGVNInstr += toErase.size();
2011
2012    // Avoid iterator invalidation.
2013    bool AtStart = BI == BB->begin();
2014    if (!AtStart)
2015      --BI;
2016
2017    for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2018         E = toErase.end(); I != E; ++I) {
2019      DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2020      if (MD) MD->removeInstruction(*I);
2021      (*I)->eraseFromParent();
2022      DEBUG(verifyRemoved(*I));
2023    }
2024    toErase.clear();
2025
2026    if (AtStart)
2027      BI = BB->begin();
2028    else
2029      ++BI;
2030  }
2031
2032  return ChangedFunction;
2033}
2034
2035/// performPRE - Perform a purely local form of PRE that looks for diamond
2036/// control flow patterns and attempts to perform simple PRE at the join point.
2037bool GVN::performPRE(Function &F) {
2038  bool Changed = false;
2039  DenseMap<BasicBlock*, Value*> predMap;
2040  for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2041       DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2042    BasicBlock *CurrentBlock = *DI;
2043
2044    // Nothing to PRE in the entry block.
2045    if (CurrentBlock == &F.getEntryBlock()) continue;
2046
2047    for (BasicBlock::iterator BI = CurrentBlock->begin(),
2048         BE = CurrentBlock->end(); BI != BE; ) {
2049      Instruction *CurInst = BI++;
2050
2051      if (isa<AllocaInst>(CurInst) ||
2052          isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2053          CurInst->getType()->isVoidTy() ||
2054          CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2055          isa<DbgInfoIntrinsic>(CurInst))
2056        continue;
2057
2058      // We don't currently value number ANY inline asm calls.
2059      if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
2060        if (CallI->isInlineAsm())
2061          continue;
2062
2063      uint32_t ValNo = VN.lookup(CurInst);
2064
2065      // Look for the predecessors for PRE opportunities.  We're
2066      // only trying to solve the basic diamond case, where
2067      // a value is computed in the successor and one predecessor,
2068      // but not the other.  We also explicitly disallow cases
2069      // where the successor is its own predecessor, because they're
2070      // more complicated to get right.
2071      unsigned NumWith = 0;
2072      unsigned NumWithout = 0;
2073      BasicBlock *PREPred = 0;
2074      predMap.clear();
2075
2076      for (pred_iterator PI = pred_begin(CurrentBlock),
2077           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2078        BasicBlock *P = *PI;
2079        // We're not interested in PRE where the block is its
2080        // own predecessor, or in blocks with predecessors
2081        // that are not reachable.
2082        if (P == CurrentBlock) {
2083          NumWithout = 2;
2084          break;
2085        } else if (!DT->dominates(&F.getEntryBlock(), P))  {
2086          NumWithout = 2;
2087          break;
2088        }
2089
2090        Value* predV = lookupNumber(P, ValNo);
2091        if (predV == 0) {
2092          PREPred = P;
2093          ++NumWithout;
2094        } else if (predV == CurInst) {
2095          NumWithout = 2;
2096        } else {
2097          predMap[P] = predV;
2098          ++NumWith;
2099        }
2100      }
2101
2102      // Don't do PRE when it might increase code size, i.e. when
2103      // we would need to insert instructions in more than one pred.
2104      if (NumWithout != 1 || NumWith == 0)
2105        continue;
2106
2107      // Don't do PRE across indirect branch.
2108      if (isa<IndirectBrInst>(PREPred->getTerminator()))
2109        continue;
2110
2111      // We can't do PRE safely on a critical edge, so instead we schedule
2112      // the edge to be split and perform the PRE the next time we iterate
2113      // on the function.
2114      unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2115      if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2116        toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2117        continue;
2118      }
2119
2120      // Instantiate the expression in the predecessor that lacked it.
2121      // Because we are going top-down through the block, all value numbers
2122      // will be available in the predecessor by the time we need them.  Any
2123      // that weren't originally present will have been instantiated earlier
2124      // in this loop.
2125      Instruction *PREInstr = CurInst->clone();
2126      bool success = true;
2127      for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2128        Value *Op = PREInstr->getOperand(i);
2129        if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2130          continue;
2131
2132        if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2133          PREInstr->setOperand(i, V);
2134        } else {
2135          success = false;
2136          break;
2137        }
2138      }
2139
2140      // Fail out if we encounter an operand that is not available in
2141      // the PRE predecessor.  This is typically because of loads which
2142      // are not value numbered precisely.
2143      if (!success) {
2144        delete PREInstr;
2145        DEBUG(verifyRemoved(PREInstr));
2146        continue;
2147      }
2148
2149      PREInstr->insertBefore(PREPred->getTerminator());
2150      PREInstr->setName(CurInst->getName() + ".pre");
2151      predMap[PREPred] = PREInstr;
2152      VN.add(PREInstr, ValNo);
2153      ++NumGVNPRE;
2154
2155      // Update the availability map to include the new instruction.
2156      insert_table(ValNo, PREInstr);
2157
2158      // Create a PHI to make the value available in this block.
2159      PHINode* Phi = PHINode::Create(CurInst->getType(),
2160                                     CurInst->getName() + ".pre-phi",
2161                                     CurrentBlock->begin());
2162      for (pred_iterator PI = pred_begin(CurrentBlock),
2163           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2164        BasicBlock *P = *PI;
2165        Phi->addIncoming(predMap[P], P);
2166      }
2167
2168      VN.add(Phi, ValNo);
2169      insert_table(ValNo, Phi);
2170
2171      CurInst->replaceAllUsesWith(Phi);
2172      if (MD && Phi->getType()->isPointerTy())
2173        MD->invalidateCachedPointerInfo(Phi);
2174      VN.erase(CurInst);
2175      erase_table(ValNo, CurInst);
2176
2177      DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2178      if (MD) MD->removeInstruction(CurInst);
2179      CurInst->eraseFromParent();
2180      DEBUG(verifyRemoved(CurInst));
2181      Changed = true;
2182    }
2183  }
2184
2185  if (splitCriticalEdges())
2186    Changed = true;
2187
2188  return Changed;
2189}
2190
2191/// splitCriticalEdges - Split critical edges found during the previous
2192/// iteration that may enable further optimization.
2193bool GVN::splitCriticalEdges() {
2194  if (toSplit.empty())
2195    return false;
2196  do {
2197    std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2198    SplitCriticalEdge(Edge.first, Edge.second, this);
2199  } while (!toSplit.empty());
2200  if (MD) MD->invalidateCachedPredecessors();
2201  return true;
2202}
2203
2204/// iterateOnFunction - Executes one iteration of GVN
2205bool GVN::iterateOnFunction(Function &F) {
2206  cleanupGlobalSets();
2207
2208  // Top-down walk of the dominator tree
2209  bool Changed = false;
2210#if 0
2211  // Needed for value numbering with phi construction to work.
2212  ReversePostOrderTraversal<Function*> RPOT(&F);
2213  for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2214       RE = RPOT.end(); RI != RE; ++RI)
2215    Changed |= processBlock(*RI);
2216#else
2217  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2218       DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2219    Changed |= processBlock(DI->getBlock());
2220#endif
2221
2222  return Changed;
2223}
2224
2225void GVN::cleanupGlobalSets() {
2226  VN.clear();
2227  NumberTable.clear();
2228  TableAllocator.Reset();
2229}
2230
2231/// verifyRemoved - Verify that the specified instruction does not occur in our
2232/// internal data structures.
2233void GVN::verifyRemoved(const Instruction *Inst) const {
2234  VN.verifyRemoved(Inst);
2235
2236  // Walk through the value number scope to make sure the instruction isn't
2237  // ferreted away in it.
2238  for (DenseMap<uint32_t, std::pair<Value*, void*> >::const_iterator
2239       I = NumberTable.begin(), E = NumberTable.end(); I != E; ++I) {
2240    std::pair<Value*, void*> const * Node = &I->second;
2241    assert(Node->first != Inst && "Inst still in value numbering scope!");
2242
2243    while (Node->second) {
2244      Node = static_cast<std::pair<Value*, void*>*>(Node->second);
2245      assert(Node->first != Inst && "Inst still in value numbering scope!");
2246    }
2247  }
2248}
2249