GVN.cpp revision 96f1d8ebdd33b3f9bdb3b1163f36072c68599f42
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs global value numbering to eliminate fully redundant
11// instructions.  It also performs simple dead load elimination.
12//
13// Note that this pass does the value numbering itself; it does not use the
14// ValueNumbering analysis passes.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "gvn"
19#include "llvm/Transforms/Scalar.h"
20#include "llvm/BasicBlock.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Function.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/LLVMContext.h"
27#include "llvm/Operator.h"
28#include "llvm/Value.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/DepthFirstIterator.h"
31#include "llvm/ADT/PostOrderIterator.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Analysis/ConstantFolding.h"
37#include "llvm/Analysis/Dominators.h"
38#include "llvm/Analysis/Loads.h"
39#include "llvm/Analysis/MemoryBuiltins.h"
40#include "llvm/Analysis/MemoryDependenceAnalysis.h"
41#include "llvm/Analysis/PHITransAddr.h"
42#include "llvm/Support/CFG.h"
43#include "llvm/Support/CommandLine.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/GetElementPtrTypeIterator.h"
47#include "llvm/Support/IRBuilder.h"
48#include "llvm/Support/raw_ostream.h"
49#include "llvm/Target/TargetData.h"
50#include "llvm/Transforms/Utils/BasicBlockUtils.h"
51#include "llvm/Transforms/Utils/Local.h"
52#include "llvm/Transforms/Utils/SSAUpdater.h"
53using namespace llvm;
54
55STATISTIC(NumGVNInstr,  "Number of instructions deleted");
56STATISTIC(NumGVNLoad,   "Number of loads deleted");
57STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
58STATISTIC(NumGVNBlocks, "Number of blocks merged");
59STATISTIC(NumPRELoad,   "Number of loads PRE'd");
60
61static cl::opt<bool> EnablePRE("enable-pre",
62                               cl::init(true), cl::Hidden);
63static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
64static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
65
66//===----------------------------------------------------------------------===//
67//                         ValueTable Class
68//===----------------------------------------------------------------------===//
69
70/// This class holds the mapping between values and value numbers.  It is used
71/// as an efficient mechanism to determine the expression-wise equivalence of
72/// two values.
73namespace {
74  struct Expression {
75    enum ExpressionOpcode {
76      ADD = Instruction::Add,
77      FADD = Instruction::FAdd,
78      SUB = Instruction::Sub,
79      FSUB = Instruction::FSub,
80      MUL = Instruction::Mul,
81      FMUL = Instruction::FMul,
82      UDIV = Instruction::UDiv,
83      SDIV = Instruction::SDiv,
84      FDIV = Instruction::FDiv,
85      UREM = Instruction::URem,
86      SREM = Instruction::SRem,
87      FREM = Instruction::FRem,
88      SHL = Instruction::Shl,
89      LSHR = Instruction::LShr,
90      ASHR = Instruction::AShr,
91      AND = Instruction::And,
92      OR = Instruction::Or,
93      XOR = Instruction::Xor,
94      TRUNC = Instruction::Trunc,
95      ZEXT = Instruction::ZExt,
96      SEXT = Instruction::SExt,
97      FPTOUI = Instruction::FPToUI,
98      FPTOSI = Instruction::FPToSI,
99      UITOFP = Instruction::UIToFP,
100      SITOFP = Instruction::SIToFP,
101      FPTRUNC = Instruction::FPTrunc,
102      FPEXT = Instruction::FPExt,
103      PTRTOINT = Instruction::PtrToInt,
104      INTTOPTR = Instruction::IntToPtr,
105      BITCAST = Instruction::BitCast,
106      ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
107      ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
108      FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
109      FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
110      FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
111      SHUFFLE, SELECT, GEP, CALL, CONSTANT,
112      INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
113
114    ExpressionOpcode opcode;
115    const Type* type;
116    SmallVector<uint32_t, 4> varargs;
117    Value *function;
118
119    Expression() { }
120    Expression(ExpressionOpcode o) : opcode(o) { }
121
122    bool operator==(const Expression &other) const {
123      if (opcode != other.opcode)
124        return false;
125      else if (opcode == EMPTY || opcode == TOMBSTONE)
126        return true;
127      else if (type != other.type)
128        return false;
129      else if (function != other.function)
130        return false;
131      else {
132        if (varargs.size() != other.varargs.size())
133          return false;
134
135        for (size_t i = 0; i < varargs.size(); ++i)
136          if (varargs[i] != other.varargs[i])
137            return false;
138
139        return true;
140      }
141    }
142
143    bool operator!=(const Expression &other) const {
144      return !(*this == other);
145    }
146  };
147
148  class ValueTable {
149    private:
150      DenseMap<Value*, uint32_t> valueNumbering;
151      DenseMap<Expression, uint32_t> expressionNumbering;
152      AliasAnalysis* AA;
153      MemoryDependenceAnalysis* MD;
154      DominatorTree* DT;
155
156      uint32_t nextValueNumber;
157
158      Expression::ExpressionOpcode getOpcode(CmpInst* C);
159      Expression create_expression(BinaryOperator* BO);
160      Expression create_expression(CmpInst* C);
161      Expression create_expression(ShuffleVectorInst* V);
162      Expression create_expression(ExtractElementInst* C);
163      Expression create_expression(InsertElementInst* V);
164      Expression create_expression(SelectInst* V);
165      Expression create_expression(CastInst* C);
166      Expression create_expression(GetElementPtrInst* G);
167      Expression create_expression(CallInst* C);
168      Expression create_expression(Constant* C);
169      Expression create_expression(ExtractValueInst* C);
170      Expression create_expression(InsertValueInst* C);
171
172      uint32_t lookup_or_add_call(CallInst* C);
173    public:
174      ValueTable() : nextValueNumber(1) { }
175      uint32_t lookup_or_add(Value *V);
176      uint32_t lookup(Value *V) const;
177      void add(Value *V, uint32_t num);
178      void clear();
179      void erase(Value *v);
180      unsigned size();
181      void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
182      AliasAnalysis *getAliasAnalysis() const { return AA; }
183      void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
184      void setDomTree(DominatorTree* D) { DT = D; }
185      uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
186      void verifyRemoved(const Value *) const;
187  };
188}
189
190namespace llvm {
191template <> struct DenseMapInfo<Expression> {
192  static inline Expression getEmptyKey() {
193    return Expression(Expression::EMPTY);
194  }
195
196  static inline Expression getTombstoneKey() {
197    return Expression(Expression::TOMBSTONE);
198  }
199
200  static unsigned getHashValue(const Expression e) {
201    unsigned hash = e.opcode;
202
203    hash = ((unsigned)((uintptr_t)e.type >> 4) ^
204            (unsigned)((uintptr_t)e.type >> 9));
205
206    for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
207         E = e.varargs.end(); I != E; ++I)
208      hash = *I + hash * 37;
209
210    hash = ((unsigned)((uintptr_t)e.function >> 4) ^
211            (unsigned)((uintptr_t)e.function >> 9)) +
212           hash * 37;
213
214    return hash;
215  }
216  static bool isEqual(const Expression &LHS, const Expression &RHS) {
217    return LHS == RHS;
218  }
219};
220
221template <>
222struct isPodLike<Expression> { static const bool value = true; };
223
224}
225
226//===----------------------------------------------------------------------===//
227//                     ValueTable Internal Functions
228//===----------------------------------------------------------------------===//
229
230Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
231  if (isa<ICmpInst>(C)) {
232    switch (C->getPredicate()) {
233    default:  // THIS SHOULD NEVER HAPPEN
234      llvm_unreachable("Comparison with unknown predicate?");
235    case ICmpInst::ICMP_EQ:  return Expression::ICMPEQ;
236    case ICmpInst::ICMP_NE:  return Expression::ICMPNE;
237    case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
238    case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
239    case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
240    case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
241    case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
242    case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
243    case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
244    case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
245    }
246  } else {
247    switch (C->getPredicate()) {
248    default: // THIS SHOULD NEVER HAPPEN
249      llvm_unreachable("Comparison with unknown predicate?");
250    case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
251    case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
252    case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
253    case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
254    case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
255    case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
256    case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
257    case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
258    case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
259    case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
260    case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
261    case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
262    case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
263    case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
264    }
265  }
266}
267
268Expression ValueTable::create_expression(CallInst* C) {
269  Expression e;
270
271  e.type = C->getType();
272  e.function = C->getCalledFunction();
273  e.opcode = Expression::CALL;
274
275  CallSite CS(C);
276  for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
277       I != E; ++I)
278    e.varargs.push_back(lookup_or_add(*I));
279
280  return e;
281}
282
283Expression ValueTable::create_expression(BinaryOperator* BO) {
284  Expression e;
285  e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
286  e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
287  e.function = 0;
288  e.type = BO->getType();
289  e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
290
291  return e;
292}
293
294Expression ValueTable::create_expression(CmpInst* C) {
295  Expression e;
296
297  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
298  e.varargs.push_back(lookup_or_add(C->getOperand(1)));
299  e.function = 0;
300  e.type = C->getType();
301  e.opcode = getOpcode(C);
302
303  return e;
304}
305
306Expression ValueTable::create_expression(CastInst* C) {
307  Expression e;
308
309  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
310  e.function = 0;
311  e.type = C->getType();
312  e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
313
314  return e;
315}
316
317Expression ValueTable::create_expression(ShuffleVectorInst* S) {
318  Expression e;
319
320  e.varargs.push_back(lookup_or_add(S->getOperand(0)));
321  e.varargs.push_back(lookup_or_add(S->getOperand(1)));
322  e.varargs.push_back(lookup_or_add(S->getOperand(2)));
323  e.function = 0;
324  e.type = S->getType();
325  e.opcode = Expression::SHUFFLE;
326
327  return e;
328}
329
330Expression ValueTable::create_expression(ExtractElementInst* E) {
331  Expression e;
332
333  e.varargs.push_back(lookup_or_add(E->getOperand(0)));
334  e.varargs.push_back(lookup_or_add(E->getOperand(1)));
335  e.function = 0;
336  e.type = E->getType();
337  e.opcode = Expression::EXTRACT;
338
339  return e;
340}
341
342Expression ValueTable::create_expression(InsertElementInst* I) {
343  Expression e;
344
345  e.varargs.push_back(lookup_or_add(I->getOperand(0)));
346  e.varargs.push_back(lookup_or_add(I->getOperand(1)));
347  e.varargs.push_back(lookup_or_add(I->getOperand(2)));
348  e.function = 0;
349  e.type = I->getType();
350  e.opcode = Expression::INSERT;
351
352  return e;
353}
354
355Expression ValueTable::create_expression(SelectInst* I) {
356  Expression e;
357
358  e.varargs.push_back(lookup_or_add(I->getCondition()));
359  e.varargs.push_back(lookup_or_add(I->getTrueValue()));
360  e.varargs.push_back(lookup_or_add(I->getFalseValue()));
361  e.function = 0;
362  e.type = I->getType();
363  e.opcode = Expression::SELECT;
364
365  return e;
366}
367
368Expression ValueTable::create_expression(GetElementPtrInst* G) {
369  Expression e;
370
371  e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
372  e.function = 0;
373  e.type = G->getType();
374  e.opcode = Expression::GEP;
375
376  for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
377       I != E; ++I)
378    e.varargs.push_back(lookup_or_add(*I));
379
380  return e;
381}
382
383Expression ValueTable::create_expression(ExtractValueInst* E) {
384  Expression e;
385
386  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
387  for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
388       II != IE; ++II)
389    e.varargs.push_back(*II);
390  e.function = 0;
391  e.type = E->getType();
392  e.opcode = Expression::EXTRACTVALUE;
393
394  return e;
395}
396
397Expression ValueTable::create_expression(InsertValueInst* E) {
398  Expression e;
399
400  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
401  e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
402  for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
403       II != IE; ++II)
404    e.varargs.push_back(*II);
405  e.function = 0;
406  e.type = E->getType();
407  e.opcode = Expression::INSERTVALUE;
408
409  return e;
410}
411
412//===----------------------------------------------------------------------===//
413//                     ValueTable External Functions
414//===----------------------------------------------------------------------===//
415
416/// add - Insert a value into the table with a specified value number.
417void ValueTable::add(Value *V, uint32_t num) {
418  valueNumbering.insert(std::make_pair(V, num));
419}
420
421uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
422  if (AA->doesNotAccessMemory(C)) {
423    Expression exp = create_expression(C);
424    uint32_t& e = expressionNumbering[exp];
425    if (!e) e = nextValueNumber++;
426    valueNumbering[C] = e;
427    return e;
428  } else if (AA->onlyReadsMemory(C)) {
429    Expression exp = create_expression(C);
430    uint32_t& e = expressionNumbering[exp];
431    if (!e) {
432      e = nextValueNumber++;
433      valueNumbering[C] = e;
434      return e;
435    }
436    if (!MD) {
437      e = nextValueNumber++;
438      valueNumbering[C] = e;
439      return e;
440    }
441
442    MemDepResult local_dep = MD->getDependency(C);
443
444    if (!local_dep.isDef() && !local_dep.isNonLocal()) {
445      valueNumbering[C] =  nextValueNumber;
446      return nextValueNumber++;
447    }
448
449    if (local_dep.isDef()) {
450      CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
451
452      if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
453        valueNumbering[C] = nextValueNumber;
454        return nextValueNumber++;
455      }
456
457      for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
458        uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
459        uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
460        if (c_vn != cd_vn) {
461          valueNumbering[C] = nextValueNumber;
462          return nextValueNumber++;
463        }
464      }
465
466      uint32_t v = lookup_or_add(local_cdep);
467      valueNumbering[C] = v;
468      return v;
469    }
470
471    // Non-local case.
472    const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
473      MD->getNonLocalCallDependency(CallSite(C));
474    // FIXME: call/call dependencies for readonly calls should return def, not
475    // clobber!  Move the checking logic to MemDep!
476    CallInst* cdep = 0;
477
478    // Check to see if we have a single dominating call instruction that is
479    // identical to C.
480    for (unsigned i = 0, e = deps.size(); i != e; ++i) {
481      const NonLocalDepEntry *I = &deps[i];
482      // Ignore non-local dependencies.
483      if (I->getResult().isNonLocal())
484        continue;
485
486      // We don't handle non-depedencies.  If we already have a call, reject
487      // instruction dependencies.
488      if (I->getResult().isClobber() || cdep != 0) {
489        cdep = 0;
490        break;
491      }
492
493      CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
494      // FIXME: All duplicated with non-local case.
495      if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
496        cdep = NonLocalDepCall;
497        continue;
498      }
499
500      cdep = 0;
501      break;
502    }
503
504    if (!cdep) {
505      valueNumbering[C] = nextValueNumber;
506      return nextValueNumber++;
507    }
508
509    if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
510      valueNumbering[C] = nextValueNumber;
511      return nextValueNumber++;
512    }
513    for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
514      uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
515      uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
516      if (c_vn != cd_vn) {
517        valueNumbering[C] = nextValueNumber;
518        return nextValueNumber++;
519      }
520    }
521
522    uint32_t v = lookup_or_add(cdep);
523    valueNumbering[C] = v;
524    return v;
525
526  } else {
527    valueNumbering[C] = nextValueNumber;
528    return nextValueNumber++;
529  }
530}
531
532/// lookup_or_add - Returns the value number for the specified value, assigning
533/// it a new number if it did not have one before.
534uint32_t ValueTable::lookup_or_add(Value *V) {
535  DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
536  if (VI != valueNumbering.end())
537    return VI->second;
538
539  if (!isa<Instruction>(V)) {
540    valueNumbering[V] = nextValueNumber;
541    return nextValueNumber++;
542  }
543
544  Instruction* I = cast<Instruction>(V);
545  Expression exp;
546  switch (I->getOpcode()) {
547    case Instruction::Call:
548      return lookup_or_add_call(cast<CallInst>(I));
549    case Instruction::Add:
550    case Instruction::FAdd:
551    case Instruction::Sub:
552    case Instruction::FSub:
553    case Instruction::Mul:
554    case Instruction::FMul:
555    case Instruction::UDiv:
556    case Instruction::SDiv:
557    case Instruction::FDiv:
558    case Instruction::URem:
559    case Instruction::SRem:
560    case Instruction::FRem:
561    case Instruction::Shl:
562    case Instruction::LShr:
563    case Instruction::AShr:
564    case Instruction::And:
565    case Instruction::Or :
566    case Instruction::Xor:
567      exp = create_expression(cast<BinaryOperator>(I));
568      break;
569    case Instruction::ICmp:
570    case Instruction::FCmp:
571      exp = create_expression(cast<CmpInst>(I));
572      break;
573    case Instruction::Trunc:
574    case Instruction::ZExt:
575    case Instruction::SExt:
576    case Instruction::FPToUI:
577    case Instruction::FPToSI:
578    case Instruction::UIToFP:
579    case Instruction::SIToFP:
580    case Instruction::FPTrunc:
581    case Instruction::FPExt:
582    case Instruction::PtrToInt:
583    case Instruction::IntToPtr:
584    case Instruction::BitCast:
585      exp = create_expression(cast<CastInst>(I));
586      break;
587    case Instruction::Select:
588      exp = create_expression(cast<SelectInst>(I));
589      break;
590    case Instruction::ExtractElement:
591      exp = create_expression(cast<ExtractElementInst>(I));
592      break;
593    case Instruction::InsertElement:
594      exp = create_expression(cast<InsertElementInst>(I));
595      break;
596    case Instruction::ShuffleVector:
597      exp = create_expression(cast<ShuffleVectorInst>(I));
598      break;
599    case Instruction::ExtractValue:
600      exp = create_expression(cast<ExtractValueInst>(I));
601      break;
602    case Instruction::InsertValue:
603      exp = create_expression(cast<InsertValueInst>(I));
604      break;
605    case Instruction::GetElementPtr:
606      exp = create_expression(cast<GetElementPtrInst>(I));
607      break;
608    default:
609      valueNumbering[V] = nextValueNumber;
610      return nextValueNumber++;
611  }
612
613  uint32_t& e = expressionNumbering[exp];
614  if (!e) e = nextValueNumber++;
615  valueNumbering[V] = e;
616  return e;
617}
618
619/// lookup - Returns the value number of the specified value. Fails if
620/// the value has not yet been numbered.
621uint32_t ValueTable::lookup(Value *V) const {
622  DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
623  assert(VI != valueNumbering.end() && "Value not numbered?");
624  return VI->second;
625}
626
627/// clear - Remove all entries from the ValueTable
628void ValueTable::clear() {
629  valueNumbering.clear();
630  expressionNumbering.clear();
631  nextValueNumber = 1;
632}
633
634/// erase - Remove a value from the value numbering
635void ValueTable::erase(Value *V) {
636  valueNumbering.erase(V);
637}
638
639/// verifyRemoved - Verify that the value is removed from all internal data
640/// structures.
641void ValueTable::verifyRemoved(const Value *V) const {
642  for (DenseMap<Value*, uint32_t>::const_iterator
643         I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
644    assert(I->first != V && "Inst still occurs in value numbering map!");
645  }
646}
647
648//===----------------------------------------------------------------------===//
649//                                GVN Pass
650//===----------------------------------------------------------------------===//
651
652namespace {
653  struct ValueNumberScope {
654    ValueNumberScope* parent;
655    DenseMap<uint32_t, Value*> table;
656
657    ValueNumberScope(ValueNumberScope* p) : parent(p) { }
658  };
659}
660
661namespace {
662
663  class GVN : public FunctionPass {
664    bool runOnFunction(Function &F);
665  public:
666    static char ID; // Pass identification, replacement for typeid
667    explicit GVN(bool noloads = false)
668      : FunctionPass(&ID), NoLoads(noloads), MD(0) { }
669
670  private:
671    bool NoLoads;
672    MemoryDependenceAnalysis *MD;
673    DominatorTree *DT;
674
675    ValueTable VN;
676    DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
677
678    // List of critical edges to be split between iterations.
679    SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
680
681    // This transformation requires dominator postdominator info
682    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
683      AU.addRequired<DominatorTree>();
684      if (!NoLoads)
685        AU.addRequired<MemoryDependenceAnalysis>();
686      AU.addRequired<AliasAnalysis>();
687
688      AU.addPreserved<DominatorTree>();
689      AU.addPreserved<AliasAnalysis>();
690    }
691
692    // Helper fuctions
693    // FIXME: eliminate or document these better
694    bool processLoad(LoadInst* L,
695                     SmallVectorImpl<Instruction*> &toErase);
696    bool processInstruction(Instruction *I,
697                            SmallVectorImpl<Instruction*> &toErase);
698    bool processNonLocalLoad(LoadInst* L,
699                             SmallVectorImpl<Instruction*> &toErase);
700    bool processBlock(BasicBlock *BB);
701    void dump(DenseMap<uint32_t, Value*>& d);
702    bool iterateOnFunction(Function &F);
703    Value *CollapsePhi(PHINode* p);
704    bool performPRE(Function& F);
705    Value *lookupNumber(BasicBlock *BB, uint32_t num);
706    void cleanupGlobalSets();
707    void verifyRemoved(const Instruction *I) const;
708    bool splitCriticalEdges();
709  };
710
711  char GVN::ID = 0;
712}
713
714// createGVNPass - The public interface to this file...
715FunctionPass *llvm::createGVNPass(bool NoLoads) {
716  return new GVN(NoLoads);
717}
718
719INITIALIZE_PASS(GVN, "gvn", "Global Value Numbering", false, false);
720
721void GVN::dump(DenseMap<uint32_t, Value*>& d) {
722  errs() << "{\n";
723  for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
724       E = d.end(); I != E; ++I) {
725      errs() << I->first << "\n";
726      I->second->dump();
727  }
728  errs() << "}\n";
729}
730
731static bool isSafeReplacement(PHINode* p, Instruction *inst) {
732  if (!isa<PHINode>(inst))
733    return true;
734
735  for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
736       UI != E; ++UI)
737    if (PHINode* use_phi = dyn_cast<PHINode>(*UI))
738      if (use_phi->getParent() == inst->getParent())
739        return false;
740
741  return true;
742}
743
744Value *GVN::CollapsePhi(PHINode *PN) {
745  Value *ConstVal = PN->hasConstantValue(DT);
746  if (!ConstVal) return 0;
747
748  Instruction *Inst = dyn_cast<Instruction>(ConstVal);
749  if (!Inst)
750    return ConstVal;
751
752  if (DT->dominates(Inst, PN))
753    if (isSafeReplacement(PN, Inst))
754      return Inst;
755  return 0;
756}
757
758/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
759/// we're analyzing is fully available in the specified block.  As we go, keep
760/// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
761/// map is actually a tri-state map with the following values:
762///   0) we know the block *is not* fully available.
763///   1) we know the block *is* fully available.
764///   2) we do not know whether the block is fully available or not, but we are
765///      currently speculating that it will be.
766///   3) we are speculating for this block and have used that to speculate for
767///      other blocks.
768static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
769                            DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
770  // Optimistically assume that the block is fully available and check to see
771  // if we already know about this block in one lookup.
772  std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
773    FullyAvailableBlocks.insert(std::make_pair(BB, 2));
774
775  // If the entry already existed for this block, return the precomputed value.
776  if (!IV.second) {
777    // If this is a speculative "available" value, mark it as being used for
778    // speculation of other blocks.
779    if (IV.first->second == 2)
780      IV.first->second = 3;
781    return IV.first->second != 0;
782  }
783
784  // Otherwise, see if it is fully available in all predecessors.
785  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
786
787  // If this block has no predecessors, it isn't live-in here.
788  if (PI == PE)
789    goto SpeculationFailure;
790
791  for (; PI != PE; ++PI)
792    // If the value isn't fully available in one of our predecessors, then it
793    // isn't fully available in this block either.  Undo our previous
794    // optimistic assumption and bail out.
795    if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
796      goto SpeculationFailure;
797
798  return true;
799
800// SpeculationFailure - If we get here, we found out that this is not, after
801// all, a fully-available block.  We have a problem if we speculated on this and
802// used the speculation to mark other blocks as available.
803SpeculationFailure:
804  char &BBVal = FullyAvailableBlocks[BB];
805
806  // If we didn't speculate on this, just return with it set to false.
807  if (BBVal == 2) {
808    BBVal = 0;
809    return false;
810  }
811
812  // If we did speculate on this value, we could have blocks set to 1 that are
813  // incorrect.  Walk the (transitive) successors of this block and mark them as
814  // 0 if set to one.
815  SmallVector<BasicBlock*, 32> BBWorklist;
816  BBWorklist.push_back(BB);
817
818  do {
819    BasicBlock *Entry = BBWorklist.pop_back_val();
820    // Note that this sets blocks to 0 (unavailable) if they happen to not
821    // already be in FullyAvailableBlocks.  This is safe.
822    char &EntryVal = FullyAvailableBlocks[Entry];
823    if (EntryVal == 0) continue;  // Already unavailable.
824
825    // Mark as unavailable.
826    EntryVal = 0;
827
828    for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
829      BBWorklist.push_back(*I);
830  } while (!BBWorklist.empty());
831
832  return false;
833}
834
835
836/// CanCoerceMustAliasedValueToLoad - Return true if
837/// CoerceAvailableValueToLoadType will succeed.
838static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
839                                            const Type *LoadTy,
840                                            const TargetData &TD) {
841  // If the loaded or stored value is an first class array or struct, don't try
842  // to transform them.  We need to be able to bitcast to integer.
843  if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
844      StoredVal->getType()->isStructTy() ||
845      StoredVal->getType()->isArrayTy())
846    return false;
847
848  // The store has to be at least as big as the load.
849  if (TD.getTypeSizeInBits(StoredVal->getType()) <
850        TD.getTypeSizeInBits(LoadTy))
851    return false;
852
853  return true;
854}
855
856
857/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
858/// then a load from a must-aliased pointer of a different type, try to coerce
859/// the stored value.  LoadedTy is the type of the load we want to replace and
860/// InsertPt is the place to insert new instructions.
861///
862/// If we can't do it, return null.
863static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
864                                             const Type *LoadedTy,
865                                             Instruction *InsertPt,
866                                             const TargetData &TD) {
867  if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
868    return 0;
869
870  const Type *StoredValTy = StoredVal->getType();
871
872  uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
873  uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
874
875  // If the store and reload are the same size, we can always reuse it.
876  if (StoreSize == LoadSize) {
877    if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
878      // Pointer to Pointer -> use bitcast.
879      return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
880    }
881
882    // Convert source pointers to integers, which can be bitcast.
883    if (StoredValTy->isPointerTy()) {
884      StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
885      StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
886    }
887
888    const Type *TypeToCastTo = LoadedTy;
889    if (TypeToCastTo->isPointerTy())
890      TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
891
892    if (StoredValTy != TypeToCastTo)
893      StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
894
895    // Cast to pointer if the load needs a pointer type.
896    if (LoadedTy->isPointerTy())
897      StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
898
899    return StoredVal;
900  }
901
902  // If the loaded value is smaller than the available value, then we can
903  // extract out a piece from it.  If the available value is too small, then we
904  // can't do anything.
905  assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
906
907  // Convert source pointers to integers, which can be manipulated.
908  if (StoredValTy->isPointerTy()) {
909    StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
910    StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
911  }
912
913  // Convert vectors and fp to integer, which can be manipulated.
914  if (!StoredValTy->isIntegerTy()) {
915    StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
916    StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
917  }
918
919  // If this is a big-endian system, we need to shift the value down to the low
920  // bits so that a truncate will work.
921  if (TD.isBigEndian()) {
922    Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
923    StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
924  }
925
926  // Truncate the integer to the right size now.
927  const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
928  StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
929
930  if (LoadedTy == NewIntTy)
931    return StoredVal;
932
933  // If the result is a pointer, inttoptr.
934  if (LoadedTy->isPointerTy())
935    return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
936
937  // Otherwise, bitcast.
938  return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
939}
940
941/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
942/// be expressed as a base pointer plus a constant offset.  Return the base and
943/// offset to the caller.
944static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
945                                        const TargetData &TD) {
946  Operator *PtrOp = dyn_cast<Operator>(Ptr);
947  if (PtrOp == 0) return Ptr;
948
949  // Just look through bitcasts.
950  if (PtrOp->getOpcode() == Instruction::BitCast)
951    return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
952
953  // If this is a GEP with constant indices, we can look through it.
954  GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
955  if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
956
957  gep_type_iterator GTI = gep_type_begin(GEP);
958  for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
959       ++I, ++GTI) {
960    ConstantInt *OpC = cast<ConstantInt>(*I);
961    if (OpC->isZero()) continue;
962
963    // Handle a struct and array indices which add their offset to the pointer.
964    if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
965      Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
966    } else {
967      uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
968      Offset += OpC->getSExtValue()*Size;
969    }
970  }
971
972  // Re-sign extend from the pointer size if needed to get overflow edge cases
973  // right.
974  unsigned PtrSize = TD.getPointerSizeInBits();
975  if (PtrSize < 64)
976    Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
977
978  return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
979}
980
981
982/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
983/// memdep query of a load that ends up being a clobbering memory write (store,
984/// memset, memcpy, memmove).  This means that the write *may* provide bits used
985/// by the load but we can't be sure because the pointers don't mustalias.
986///
987/// Check this case to see if there is anything more we can do before we give
988/// up.  This returns -1 if we have to give up, or a byte number in the stored
989/// value of the piece that feeds the load.
990static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
991                                          Value *WritePtr,
992                                          uint64_t WriteSizeInBits,
993                                          const TargetData &TD) {
994  // If the loaded or stored value is an first class array or struct, don't try
995  // to transform them.  We need to be able to bitcast to integer.
996  if (LoadTy->isStructTy() || LoadTy->isArrayTy())
997    return -1;
998
999  int64_t StoreOffset = 0, LoadOffset = 0;
1000  Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
1001  Value *LoadBase =
1002    GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
1003  if (StoreBase != LoadBase)
1004    return -1;
1005
1006  // If the load and store are to the exact same address, they should have been
1007  // a must alias.  AA must have gotten confused.
1008  // FIXME: Study to see if/when this happens.  One case is forwarding a memset
1009  // to a load from the base of the memset.
1010#if 0
1011  if (LoadOffset == StoreOffset) {
1012    dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1013    << "Base       = " << *StoreBase << "\n"
1014    << "Store Ptr  = " << *WritePtr << "\n"
1015    << "Store Offs = " << StoreOffset << "\n"
1016    << "Load Ptr   = " << *LoadPtr << "\n";
1017    abort();
1018  }
1019#endif
1020
1021  // If the load and store don't overlap at all, the store doesn't provide
1022  // anything to the load.  In this case, they really don't alias at all, AA
1023  // must have gotten confused.
1024  // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1025  // remove this check, as it is duplicated with what we have below.
1026  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1027
1028  if ((WriteSizeInBits & 7) | (LoadSize & 7))
1029    return -1;
1030  uint64_t StoreSize = WriteSizeInBits >> 3;  // Convert to bytes.
1031  LoadSize >>= 3;
1032
1033
1034  bool isAAFailure = false;
1035  if (StoreOffset < LoadOffset)
1036    isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1037  else
1038    isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1039
1040  if (isAAFailure) {
1041#if 0
1042    dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1043    << "Base       = " << *StoreBase << "\n"
1044    << "Store Ptr  = " << *WritePtr << "\n"
1045    << "Store Offs = " << StoreOffset << "\n"
1046    << "Load Ptr   = " << *LoadPtr << "\n";
1047    abort();
1048#endif
1049    return -1;
1050  }
1051
1052  // If the Load isn't completely contained within the stored bits, we don't
1053  // have all the bits to feed it.  We could do something crazy in the future
1054  // (issue a smaller load then merge the bits in) but this seems unlikely to be
1055  // valuable.
1056  if (StoreOffset > LoadOffset ||
1057      StoreOffset+StoreSize < LoadOffset+LoadSize)
1058    return -1;
1059
1060  // Okay, we can do this transformation.  Return the number of bytes into the
1061  // store that the load is.
1062  return LoadOffset-StoreOffset;
1063}
1064
1065/// AnalyzeLoadFromClobberingStore - This function is called when we have a
1066/// memdep query of a load that ends up being a clobbering store.
1067static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1068                                          StoreInst *DepSI,
1069                                          const TargetData &TD) {
1070  // Cannot handle reading from store of first-class aggregate yet.
1071  if (DepSI->getOperand(0)->getType()->isStructTy() ||
1072      DepSI->getOperand(0)->getType()->isArrayTy())
1073    return -1;
1074
1075  Value *StorePtr = DepSI->getPointerOperand();
1076  uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1077  return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1078                                        StorePtr, StoreSize, TD);
1079}
1080
1081static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1082                                            MemIntrinsic *MI,
1083                                            const TargetData &TD) {
1084  // If the mem operation is a non-constant size, we can't handle it.
1085  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1086  if (SizeCst == 0) return -1;
1087  uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1088
1089  // If this is memset, we just need to see if the offset is valid in the size
1090  // of the memset..
1091  if (MI->getIntrinsicID() == Intrinsic::memset)
1092    return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1093                                          MemSizeInBits, TD);
1094
1095  // If we have a memcpy/memmove, the only case we can handle is if this is a
1096  // copy from constant memory.  In that case, we can read directly from the
1097  // constant memory.
1098  MemTransferInst *MTI = cast<MemTransferInst>(MI);
1099
1100  Constant *Src = dyn_cast<Constant>(MTI->getSource());
1101  if (Src == 0) return -1;
1102
1103  GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1104  if (GV == 0 || !GV->isConstant()) return -1;
1105
1106  // See if the access is within the bounds of the transfer.
1107  int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1108                                              MI->getDest(), MemSizeInBits, TD);
1109  if (Offset == -1)
1110    return Offset;
1111
1112  // Otherwise, see if we can constant fold a load from the constant with the
1113  // offset applied as appropriate.
1114  Src = ConstantExpr::getBitCast(Src,
1115                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1116  Constant *OffsetCst =
1117    ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1118  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1119  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1120  if (ConstantFoldLoadFromConstPtr(Src, &TD))
1121    return Offset;
1122  return -1;
1123}
1124
1125
1126/// GetStoreValueForLoad - This function is called when we have a
1127/// memdep query of a load that ends up being a clobbering store.  This means
1128/// that the store *may* provide bits used by the load but we can't be sure
1129/// because the pointers don't mustalias.  Check this case to see if there is
1130/// anything more we can do before we give up.
1131static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1132                                   const Type *LoadTy,
1133                                   Instruction *InsertPt, const TargetData &TD){
1134  LLVMContext &Ctx = SrcVal->getType()->getContext();
1135
1136  uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
1137  uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
1138
1139  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1140
1141  // Compute which bits of the stored value are being used by the load.  Convert
1142  // to an integer type to start with.
1143  if (SrcVal->getType()->isPointerTy())
1144    SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1145  if (!SrcVal->getType()->isIntegerTy())
1146    SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1147                                   "tmp");
1148
1149  // Shift the bits to the least significant depending on endianness.
1150  unsigned ShiftAmt;
1151  if (TD.isLittleEndian())
1152    ShiftAmt = Offset*8;
1153  else
1154    ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1155
1156  if (ShiftAmt)
1157    SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1158
1159  if (LoadSize != StoreSize)
1160    SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1161                                 "tmp");
1162
1163  return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1164}
1165
1166/// GetMemInstValueForLoad - This function is called when we have a
1167/// memdep query of a load that ends up being a clobbering mem intrinsic.
1168static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1169                                     const Type *LoadTy, Instruction *InsertPt,
1170                                     const TargetData &TD){
1171  LLVMContext &Ctx = LoadTy->getContext();
1172  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1173
1174  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1175
1176  // We know that this method is only called when the mem transfer fully
1177  // provides the bits for the load.
1178  if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1179    // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1180    // independently of what the offset is.
1181    Value *Val = MSI->getValue();
1182    if (LoadSize != 1)
1183      Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1184
1185    Value *OneElt = Val;
1186
1187    // Splat the value out to the right number of bits.
1188    for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1189      // If we can double the number of bytes set, do it.
1190      if (NumBytesSet*2 <= LoadSize) {
1191        Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1192        Val = Builder.CreateOr(Val, ShVal);
1193        NumBytesSet <<= 1;
1194        continue;
1195      }
1196
1197      // Otherwise insert one byte at a time.
1198      Value *ShVal = Builder.CreateShl(Val, 1*8);
1199      Val = Builder.CreateOr(OneElt, ShVal);
1200      ++NumBytesSet;
1201    }
1202
1203    return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1204  }
1205
1206  // Otherwise, this is a memcpy/memmove from a constant global.
1207  MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1208  Constant *Src = cast<Constant>(MTI->getSource());
1209
1210  // Otherwise, see if we can constant fold a load from the constant with the
1211  // offset applied as appropriate.
1212  Src = ConstantExpr::getBitCast(Src,
1213                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1214  Constant *OffsetCst =
1215  ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1216  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1217  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1218  return ConstantFoldLoadFromConstPtr(Src, &TD);
1219}
1220
1221namespace {
1222
1223struct AvailableValueInBlock {
1224  /// BB - The basic block in question.
1225  BasicBlock *BB;
1226  enum ValType {
1227    SimpleVal,  // A simple offsetted value that is accessed.
1228    MemIntrin   // A memory intrinsic which is loaded from.
1229  };
1230
1231  /// V - The value that is live out of the block.
1232  PointerIntPair<Value *, 1, ValType> Val;
1233
1234  /// Offset - The byte offset in Val that is interesting for the load query.
1235  unsigned Offset;
1236
1237  static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1238                                   unsigned Offset = 0) {
1239    AvailableValueInBlock Res;
1240    Res.BB = BB;
1241    Res.Val.setPointer(V);
1242    Res.Val.setInt(SimpleVal);
1243    Res.Offset = Offset;
1244    return Res;
1245  }
1246
1247  static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1248                                     unsigned Offset = 0) {
1249    AvailableValueInBlock Res;
1250    Res.BB = BB;
1251    Res.Val.setPointer(MI);
1252    Res.Val.setInt(MemIntrin);
1253    Res.Offset = Offset;
1254    return Res;
1255  }
1256
1257  bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1258  Value *getSimpleValue() const {
1259    assert(isSimpleValue() && "Wrong accessor");
1260    return Val.getPointer();
1261  }
1262
1263  MemIntrinsic *getMemIntrinValue() const {
1264    assert(!isSimpleValue() && "Wrong accessor");
1265    return cast<MemIntrinsic>(Val.getPointer());
1266  }
1267
1268  /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1269  /// defined here to the specified type.  This handles various coercion cases.
1270  Value *MaterializeAdjustedValue(const Type *LoadTy,
1271                                  const TargetData *TD) const {
1272    Value *Res;
1273    if (isSimpleValue()) {
1274      Res = getSimpleValue();
1275      if (Res->getType() != LoadTy) {
1276        assert(TD && "Need target data to handle type mismatch case");
1277        Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1278                                   *TD);
1279
1280        DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << "  "
1281                     << *getSimpleValue() << '\n'
1282                     << *Res << '\n' << "\n\n\n");
1283      }
1284    } else {
1285      Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1286                                   LoadTy, BB->getTerminator(), *TD);
1287      DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1288                   << "  " << *getMemIntrinValue() << '\n'
1289                   << *Res << '\n' << "\n\n\n");
1290    }
1291    return Res;
1292  }
1293};
1294
1295}
1296
1297/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1298/// construct SSA form, allowing us to eliminate LI.  This returns the value
1299/// that should be used at LI's definition site.
1300static Value *ConstructSSAForLoadSet(LoadInst *LI,
1301                         SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1302                                     const TargetData *TD,
1303                                     const DominatorTree &DT,
1304                                     AliasAnalysis *AA) {
1305  // Check for the fully redundant, dominating load case.  In this case, we can
1306  // just use the dominating value directly.
1307  if (ValuesPerBlock.size() == 1 &&
1308      DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1309    return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1310
1311  // Otherwise, we have to construct SSA form.
1312  SmallVector<PHINode*, 8> NewPHIs;
1313  SSAUpdater SSAUpdate(&NewPHIs);
1314  SSAUpdate.Initialize(LI);
1315
1316  const Type *LoadTy = LI->getType();
1317
1318  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1319    const AvailableValueInBlock &AV = ValuesPerBlock[i];
1320    BasicBlock *BB = AV.BB;
1321
1322    if (SSAUpdate.HasValueForBlock(BB))
1323      continue;
1324
1325    SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1326  }
1327
1328  // Perform PHI construction.
1329  Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1330
1331  // If new PHI nodes were created, notify alias analysis.
1332  if (V->getType()->isPointerTy())
1333    for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1334      AA->copyValue(LI, NewPHIs[i]);
1335
1336  return V;
1337}
1338
1339static bool isLifetimeStart(const Instruction *Inst) {
1340  if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1341    return II->getIntrinsicID() == Intrinsic::lifetime_start;
1342  return false;
1343}
1344
1345/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1346/// non-local by performing PHI construction.
1347bool GVN::processNonLocalLoad(LoadInst *LI,
1348                              SmallVectorImpl<Instruction*> &toErase) {
1349  // Find the non-local dependencies of the load.
1350  SmallVector<NonLocalDepResult, 64> Deps;
1351  MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1352                                   Deps);
1353  //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1354  //             << Deps.size() << *LI << '\n');
1355
1356  // If we had to process more than one hundred blocks to find the
1357  // dependencies, this load isn't worth worrying about.  Optimizing
1358  // it will be too expensive.
1359  if (Deps.size() > 100)
1360    return false;
1361
1362  // If we had a phi translation failure, we'll have a single entry which is a
1363  // clobber in the current block.  Reject this early.
1364  if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1365    DEBUG(
1366      dbgs() << "GVN: non-local load ";
1367      WriteAsOperand(dbgs(), LI);
1368      dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1369    );
1370    return false;
1371  }
1372
1373  // Filter out useless results (non-locals, etc).  Keep track of the blocks
1374  // where we have a value available in repl, also keep track of whether we see
1375  // dependencies that produce an unknown value for the load (such as a call
1376  // that could potentially clobber the load).
1377  SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1378  SmallVector<BasicBlock*, 16> UnavailableBlocks;
1379
1380  const TargetData *TD = 0;
1381
1382  for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1383    BasicBlock *DepBB = Deps[i].getBB();
1384    MemDepResult DepInfo = Deps[i].getResult();
1385
1386    if (DepInfo.isClobber()) {
1387      // The address being loaded in this non-local block may not be the same as
1388      // the pointer operand of the load if PHI translation occurs.  Make sure
1389      // to consider the right address.
1390      Value *Address = Deps[i].getAddress();
1391
1392      // If the dependence is to a store that writes to a superset of the bits
1393      // read by the load, we can extract the bits we need for the load from the
1394      // stored value.
1395      if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1396        if (TD == 0)
1397          TD = getAnalysisIfAvailable<TargetData>();
1398        if (TD && Address) {
1399          int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1400                                                      DepSI, *TD);
1401          if (Offset != -1) {
1402            ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1403                                                           DepSI->getOperand(0),
1404                                                                Offset));
1405            continue;
1406          }
1407        }
1408      }
1409
1410      // If the clobbering value is a memset/memcpy/memmove, see if we can
1411      // forward a value on from it.
1412      if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1413        if (TD == 0)
1414          TD = getAnalysisIfAvailable<TargetData>();
1415        if (TD && Address) {
1416          int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1417                                                        DepMI, *TD);
1418          if (Offset != -1) {
1419            ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1420                                                                  Offset));
1421            continue;
1422          }
1423        }
1424      }
1425
1426      UnavailableBlocks.push_back(DepBB);
1427      continue;
1428    }
1429
1430    Instruction *DepInst = DepInfo.getInst();
1431
1432    // Loading the allocation -> undef.
1433    if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1434        // Loading immediately after lifetime begin -> undef.
1435        isLifetimeStart(DepInst)) {
1436      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1437                                             UndefValue::get(LI->getType())));
1438      continue;
1439    }
1440
1441    if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1442      // Reject loads and stores that are to the same address but are of
1443      // different types if we have to.
1444      if (S->getOperand(0)->getType() != LI->getType()) {
1445        if (TD == 0)
1446          TD = getAnalysisIfAvailable<TargetData>();
1447
1448        // If the stored value is larger or equal to the loaded value, we can
1449        // reuse it.
1450        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1451                                                        LI->getType(), *TD)) {
1452          UnavailableBlocks.push_back(DepBB);
1453          continue;
1454        }
1455      }
1456
1457      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1458                                                          S->getOperand(0)));
1459      continue;
1460    }
1461
1462    if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1463      // If the types mismatch and we can't handle it, reject reuse of the load.
1464      if (LD->getType() != LI->getType()) {
1465        if (TD == 0)
1466          TD = getAnalysisIfAvailable<TargetData>();
1467
1468        // If the stored value is larger or equal to the loaded value, we can
1469        // reuse it.
1470        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1471          UnavailableBlocks.push_back(DepBB);
1472          continue;
1473        }
1474      }
1475      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1476      continue;
1477    }
1478
1479    UnavailableBlocks.push_back(DepBB);
1480    continue;
1481  }
1482
1483  // If we have no predecessors that produce a known value for this load, exit
1484  // early.
1485  if (ValuesPerBlock.empty()) return false;
1486
1487  // If all of the instructions we depend on produce a known value for this
1488  // load, then it is fully redundant and we can use PHI insertion to compute
1489  // its value.  Insert PHIs and remove the fully redundant value now.
1490  if (UnavailableBlocks.empty()) {
1491    DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1492
1493    // Perform PHI construction.
1494    Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1495                                      VN.getAliasAnalysis());
1496    LI->replaceAllUsesWith(V);
1497
1498    if (isa<PHINode>(V))
1499      V->takeName(LI);
1500    if (V->getType()->isPointerTy())
1501      MD->invalidateCachedPointerInfo(V);
1502    VN.erase(LI);
1503    toErase.push_back(LI);
1504    ++NumGVNLoad;
1505    return true;
1506  }
1507
1508  if (!EnablePRE || !EnableLoadPRE)
1509    return false;
1510
1511  // Okay, we have *some* definitions of the value.  This means that the value
1512  // is available in some of our (transitive) predecessors.  Lets think about
1513  // doing PRE of this load.  This will involve inserting a new load into the
1514  // predecessor when it's not available.  We could do this in general, but
1515  // prefer to not increase code size.  As such, we only do this when we know
1516  // that we only have to insert *one* load (which means we're basically moving
1517  // the load, not inserting a new one).
1518
1519  SmallPtrSet<BasicBlock *, 4> Blockers;
1520  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1521    Blockers.insert(UnavailableBlocks[i]);
1522
1523  // Lets find first basic block with more than one predecessor.  Walk backwards
1524  // through predecessors if needed.
1525  BasicBlock *LoadBB = LI->getParent();
1526  BasicBlock *TmpBB = LoadBB;
1527
1528  bool isSinglePred = false;
1529  bool allSingleSucc = true;
1530  while (TmpBB->getSinglePredecessor()) {
1531    isSinglePred = true;
1532    TmpBB = TmpBB->getSinglePredecessor();
1533    if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1534      return false;
1535    if (Blockers.count(TmpBB))
1536      return false;
1537    if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1538      allSingleSucc = false;
1539  }
1540
1541  assert(TmpBB);
1542  LoadBB = TmpBB;
1543
1544  // If we have a repl set with LI itself in it, this means we have a loop where
1545  // at least one of the values is LI.  Since this means that we won't be able
1546  // to eliminate LI even if we insert uses in the other predecessors, we will
1547  // end up increasing code size.  Reject this by scanning for LI.
1548  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1549    if (ValuesPerBlock[i].isSimpleValue() &&
1550        ValuesPerBlock[i].getSimpleValue() == LI) {
1551      // Skip cases where LI is the only definition, even for EnableFullLoadPRE.
1552      if (!EnableFullLoadPRE || e == 1)
1553        return false;
1554    }
1555  }
1556
1557  // FIXME: It is extremely unclear what this loop is doing, other than
1558  // artificially restricting loadpre.
1559  if (isSinglePred) {
1560    bool isHot = false;
1561    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1562      const AvailableValueInBlock &AV = ValuesPerBlock[i];
1563      if (AV.isSimpleValue())
1564        // "Hot" Instruction is in some loop (because it dominates its dep.
1565        // instruction).
1566        if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1567          if (DT->dominates(LI, I)) {
1568            isHot = true;
1569            break;
1570          }
1571    }
1572
1573    // We are interested only in "hot" instructions. We don't want to do any
1574    // mis-optimizations here.
1575    if (!isHot)
1576      return false;
1577  }
1578
1579  // Check to see how many predecessors have the loaded value fully
1580  // available.
1581  DenseMap<BasicBlock*, Value*> PredLoads;
1582  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1583  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1584    FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1585  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1586    FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1587
1588  SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
1589  for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1590       PI != E; ++PI) {
1591    BasicBlock *Pred = *PI;
1592    if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1593      continue;
1594    }
1595    PredLoads[Pred] = 0;
1596
1597    if (Pred->getTerminator()->getNumSuccessors() != 1) {
1598      if (isa<IndirectBrInst>(Pred->getTerminator())) {
1599        DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1600              << Pred->getName() << "': " << *LI << '\n');
1601        return false;
1602      }
1603      unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1604      NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1605    }
1606  }
1607  if (!NeedToSplit.empty()) {
1608    toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
1609    return false;
1610  }
1611
1612  // Decide whether PRE is profitable for this load.
1613  unsigned NumUnavailablePreds = PredLoads.size();
1614  assert(NumUnavailablePreds != 0 &&
1615         "Fully available value should be eliminated above!");
1616  if (!EnableFullLoadPRE) {
1617    // If this load is unavailable in multiple predecessors, reject it.
1618    // FIXME: If we could restructure the CFG, we could make a common pred with
1619    // all the preds that don't have an available LI and insert a new load into
1620    // that one block.
1621    if (NumUnavailablePreds != 1)
1622      return false;
1623  }
1624
1625  // Check if the load can safely be moved to all the unavailable predecessors.
1626  bool CanDoPRE = true;
1627  SmallVector<Instruction*, 8> NewInsts;
1628  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1629         E = PredLoads.end(); I != E; ++I) {
1630    BasicBlock *UnavailablePred = I->first;
1631
1632    // Do PHI translation to get its value in the predecessor if necessary.  The
1633    // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1634
1635    // If all preds have a single successor, then we know it is safe to insert
1636    // the load on the pred (?!?), so we can insert code to materialize the
1637    // pointer if it is not available.
1638    PHITransAddr Address(LI->getOperand(0), TD);
1639    Value *LoadPtr = 0;
1640    if (allSingleSucc) {
1641      LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1642                                                  *DT, NewInsts);
1643    } else {
1644      Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1645      LoadPtr = Address.getAddr();
1646    }
1647
1648    // If we couldn't find or insert a computation of this phi translated value,
1649    // we fail PRE.
1650    if (LoadPtr == 0) {
1651      DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1652            << *LI->getOperand(0) << "\n");
1653      CanDoPRE = false;
1654      break;
1655    }
1656
1657    // Make sure it is valid to move this load here.  We have to watch out for:
1658    //  @1 = getelementptr (i8* p, ...
1659    //  test p and branch if == 0
1660    //  load @1
1661    // It is valid to have the getelementptr before the test, even if p can be 0,
1662    // as getelementptr only does address arithmetic.
1663    // If we are not pushing the value through any multiple-successor blocks
1664    // we do not have this case.  Otherwise, check that the load is safe to
1665    // put anywhere; this can be improved, but should be conservatively safe.
1666    if (!allSingleSucc &&
1667        // FIXME: REEVALUTE THIS.
1668        !isSafeToLoadUnconditionally(LoadPtr,
1669                                     UnavailablePred->getTerminator(),
1670                                     LI->getAlignment(), TD)) {
1671      CanDoPRE = false;
1672      break;
1673    }
1674
1675    I->second = LoadPtr;
1676  }
1677
1678  if (!CanDoPRE) {
1679    while (!NewInsts.empty())
1680      NewInsts.pop_back_val()->eraseFromParent();
1681    return false;
1682  }
1683
1684  // Okay, we can eliminate this load by inserting a reload in the predecessor
1685  // and using PHI construction to get the value in the other predecessors, do
1686  // it.
1687  DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1688  DEBUG(if (!NewInsts.empty())
1689          dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1690                 << *NewInsts.back() << '\n');
1691
1692  // Assign value numbers to the new instructions.
1693  for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1694    // FIXME: We really _ought_ to insert these value numbers into their
1695    // parent's availability map.  However, in doing so, we risk getting into
1696    // ordering issues.  If a block hasn't been processed yet, we would be
1697    // marking a value as AVAIL-IN, which isn't what we intend.
1698    VN.lookup_or_add(NewInsts[i]);
1699  }
1700
1701  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1702         E = PredLoads.end(); I != E; ++I) {
1703    BasicBlock *UnavailablePred = I->first;
1704    Value *LoadPtr = I->second;
1705
1706    Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1707                                  LI->getAlignment(),
1708                                  UnavailablePred->getTerminator());
1709
1710    // Add the newly created load.
1711    ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1712                                                        NewLoad));
1713    MD->invalidateCachedPointerInfo(LoadPtr);
1714    DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1715  }
1716
1717  // Perform PHI construction.
1718  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1719                                    VN.getAliasAnalysis());
1720  LI->replaceAllUsesWith(V);
1721  if (isa<PHINode>(V))
1722    V->takeName(LI);
1723  if (V->getType()->isPointerTy())
1724    MD->invalidateCachedPointerInfo(V);
1725  VN.erase(LI);
1726  toErase.push_back(LI);
1727  ++NumPRELoad;
1728  return true;
1729}
1730
1731/// processLoad - Attempt to eliminate a load, first by eliminating it
1732/// locally, and then attempting non-local elimination if that fails.
1733bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1734  if (!MD)
1735    return false;
1736
1737  if (L->isVolatile())
1738    return false;
1739
1740  // ... to a pointer that has been loaded from before...
1741  MemDepResult Dep = MD->getDependency(L);
1742
1743  // If the value isn't available, don't do anything!
1744  if (Dep.isClobber()) {
1745    // Check to see if we have something like this:
1746    //   store i32 123, i32* %P
1747    //   %A = bitcast i32* %P to i8*
1748    //   %B = gep i8* %A, i32 1
1749    //   %C = load i8* %B
1750    //
1751    // We could do that by recognizing if the clobber instructions are obviously
1752    // a common base + constant offset, and if the previous store (or memset)
1753    // completely covers this load.  This sort of thing can happen in bitfield
1754    // access code.
1755    Value *AvailVal = 0;
1756    if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1757      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1758        int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1759                                                    L->getPointerOperand(),
1760                                                    DepSI, *TD);
1761        if (Offset != -1)
1762          AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1763                                          L->getType(), L, *TD);
1764      }
1765
1766    // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1767    // a value on from it.
1768    if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1769      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1770        int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1771                                                      L->getPointerOperand(),
1772                                                      DepMI, *TD);
1773        if (Offset != -1)
1774          AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1775      }
1776    }
1777
1778    if (AvailVal) {
1779      DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1780            << *AvailVal << '\n' << *L << "\n\n\n");
1781
1782      // Replace the load!
1783      L->replaceAllUsesWith(AvailVal);
1784      if (AvailVal->getType()->isPointerTy())
1785        MD->invalidateCachedPointerInfo(AvailVal);
1786      VN.erase(L);
1787      toErase.push_back(L);
1788      ++NumGVNLoad;
1789      return true;
1790    }
1791
1792    DEBUG(
1793      // fast print dep, using operator<< on instruction would be too slow
1794      dbgs() << "GVN: load ";
1795      WriteAsOperand(dbgs(), L);
1796      Instruction *I = Dep.getInst();
1797      dbgs() << " is clobbered by " << *I << '\n';
1798    );
1799    return false;
1800  }
1801
1802  // If it is defined in another block, try harder.
1803  if (Dep.isNonLocal())
1804    return processNonLocalLoad(L, toErase);
1805
1806  Instruction *DepInst = Dep.getInst();
1807  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1808    Value *StoredVal = DepSI->getOperand(0);
1809
1810    // The store and load are to a must-aliased pointer, but they may not
1811    // actually have the same type.  See if we know how to reuse the stored
1812    // value (depending on its type).
1813    const TargetData *TD = 0;
1814    if (StoredVal->getType() != L->getType()) {
1815      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1816        StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1817                                                   L, *TD);
1818        if (StoredVal == 0)
1819          return false;
1820
1821        DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1822                     << '\n' << *L << "\n\n\n");
1823      }
1824      else
1825        return false;
1826    }
1827
1828    // Remove it!
1829    L->replaceAllUsesWith(StoredVal);
1830    if (StoredVal->getType()->isPointerTy())
1831      MD->invalidateCachedPointerInfo(StoredVal);
1832    VN.erase(L);
1833    toErase.push_back(L);
1834    ++NumGVNLoad;
1835    return true;
1836  }
1837
1838  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1839    Value *AvailableVal = DepLI;
1840
1841    // The loads are of a must-aliased pointer, but they may not actually have
1842    // the same type.  See if we know how to reuse the previously loaded value
1843    // (depending on its type).
1844    const TargetData *TD = 0;
1845    if (DepLI->getType() != L->getType()) {
1846      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1847        AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1848        if (AvailableVal == 0)
1849          return false;
1850
1851        DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1852                     << "\n" << *L << "\n\n\n");
1853      }
1854      else
1855        return false;
1856    }
1857
1858    // Remove it!
1859    L->replaceAllUsesWith(AvailableVal);
1860    if (DepLI->getType()->isPointerTy())
1861      MD->invalidateCachedPointerInfo(DepLI);
1862    VN.erase(L);
1863    toErase.push_back(L);
1864    ++NumGVNLoad;
1865    return true;
1866  }
1867
1868  // If this load really doesn't depend on anything, then we must be loading an
1869  // undef value.  This can happen when loading for a fresh allocation with no
1870  // intervening stores, for example.
1871  if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1872    L->replaceAllUsesWith(UndefValue::get(L->getType()));
1873    VN.erase(L);
1874    toErase.push_back(L);
1875    ++NumGVNLoad;
1876    return true;
1877  }
1878
1879  // If this load occurs either right after a lifetime begin,
1880  // then the loaded value is undefined.
1881  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1882    if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1883      L->replaceAllUsesWith(UndefValue::get(L->getType()));
1884      VN.erase(L);
1885      toErase.push_back(L);
1886      ++NumGVNLoad;
1887      return true;
1888    }
1889  }
1890
1891  return false;
1892}
1893
1894Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1895  DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1896  if (I == localAvail.end())
1897    return 0;
1898
1899  ValueNumberScope *Locals = I->second;
1900  while (Locals) {
1901    DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1902    if (I != Locals->table.end())
1903      return I->second;
1904    Locals = Locals->parent;
1905  }
1906
1907  return 0;
1908}
1909
1910
1911/// processInstruction - When calculating availability, handle an instruction
1912/// by inserting it into the appropriate sets
1913bool GVN::processInstruction(Instruction *I,
1914                             SmallVectorImpl<Instruction*> &toErase) {
1915  // Ignore dbg info intrinsics.
1916  if (isa<DbgInfoIntrinsic>(I))
1917    return false;
1918
1919  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1920    bool Changed = processLoad(LI, toErase);
1921
1922    if (!Changed) {
1923      unsigned Num = VN.lookup_or_add(LI);
1924      localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1925    }
1926
1927    return Changed;
1928  }
1929
1930  uint32_t NextNum = VN.getNextUnusedValueNumber();
1931  unsigned Num = VN.lookup_or_add(I);
1932
1933  if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1934    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1935
1936    if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1937      return false;
1938
1939    Value *BranchCond = BI->getCondition();
1940    uint32_t CondVN = VN.lookup_or_add(BranchCond);
1941
1942    BasicBlock *TrueSucc = BI->getSuccessor(0);
1943    BasicBlock *FalseSucc = BI->getSuccessor(1);
1944
1945    if (TrueSucc->getSinglePredecessor())
1946      localAvail[TrueSucc]->table[CondVN] =
1947        ConstantInt::getTrue(TrueSucc->getContext());
1948    if (FalseSucc->getSinglePredecessor())
1949      localAvail[FalseSucc]->table[CondVN] =
1950        ConstantInt::getFalse(TrueSucc->getContext());
1951
1952    return false;
1953
1954  // Allocations are always uniquely numbered, so we can save time and memory
1955  // by fast failing them.
1956  } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1957    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1958    return false;
1959  }
1960
1961  // Collapse PHI nodes
1962  if (PHINode* p = dyn_cast<PHINode>(I)) {
1963    Value *constVal = CollapsePhi(p);
1964
1965    if (constVal) {
1966      p->replaceAllUsesWith(constVal);
1967      if (MD && constVal->getType()->isPointerTy())
1968        MD->invalidateCachedPointerInfo(constVal);
1969      VN.erase(p);
1970
1971      toErase.push_back(p);
1972    } else {
1973      localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1974    }
1975
1976  // If the number we were assigned was a brand new VN, then we don't
1977  // need to do a lookup to see if the number already exists
1978  // somewhere in the domtree: it can't!
1979  } else if (Num == NextNum) {
1980    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1981
1982  // Perform fast-path value-number based elimination of values inherited from
1983  // dominators.
1984  } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1985    // Remove it!
1986    VN.erase(I);
1987    I->replaceAllUsesWith(repl);
1988    if (MD && repl->getType()->isPointerTy())
1989      MD->invalidateCachedPointerInfo(repl);
1990    toErase.push_back(I);
1991    return true;
1992
1993  } else {
1994    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1995  }
1996
1997  return false;
1998}
1999
2000/// runOnFunction - This is the main transformation entry point for a function.
2001bool GVN::runOnFunction(Function& F) {
2002  if (!NoLoads)
2003    MD = &getAnalysis<MemoryDependenceAnalysis>();
2004  DT = &getAnalysis<DominatorTree>();
2005  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
2006  VN.setMemDep(MD);
2007  VN.setDomTree(DT);
2008
2009  bool Changed = false;
2010  bool ShouldContinue = true;
2011
2012  // Merge unconditional branches, allowing PRE to catch more
2013  // optimization opportunities.
2014  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2015    BasicBlock *BB = FI;
2016    ++FI;
2017    bool removedBlock = MergeBlockIntoPredecessor(BB, this);
2018    if (removedBlock) ++NumGVNBlocks;
2019
2020    Changed |= removedBlock;
2021  }
2022
2023  unsigned Iteration = 0;
2024
2025  while (ShouldContinue) {
2026    DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2027    ShouldContinue = iterateOnFunction(F);
2028    if (splitCriticalEdges())
2029      ShouldContinue = true;
2030    Changed |= ShouldContinue;
2031    ++Iteration;
2032  }
2033
2034  if (EnablePRE) {
2035    bool PREChanged = true;
2036    while (PREChanged) {
2037      PREChanged = performPRE(F);
2038      Changed |= PREChanged;
2039    }
2040  }
2041  // FIXME: Should perform GVN again after PRE does something.  PRE can move
2042  // computations into blocks where they become fully redundant.  Note that
2043  // we can't do this until PRE's critical edge splitting updates memdep.
2044  // Actually, when this happens, we should just fully integrate PRE into GVN.
2045
2046  cleanupGlobalSets();
2047
2048  return Changed;
2049}
2050
2051
2052bool GVN::processBlock(BasicBlock *BB) {
2053  // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2054  // incrementing BI before processing an instruction).
2055  SmallVector<Instruction*, 8> toErase;
2056  bool ChangedFunction = false;
2057
2058  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2059       BI != BE;) {
2060    ChangedFunction |= processInstruction(BI, toErase);
2061    if (toErase.empty()) {
2062      ++BI;
2063      continue;
2064    }
2065
2066    // If we need some instructions deleted, do it now.
2067    NumGVNInstr += toErase.size();
2068
2069    // Avoid iterator invalidation.
2070    bool AtStart = BI == BB->begin();
2071    if (!AtStart)
2072      --BI;
2073
2074    for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2075         E = toErase.end(); I != E; ++I) {
2076      DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2077      if (MD) MD->removeInstruction(*I);
2078      (*I)->eraseFromParent();
2079      DEBUG(verifyRemoved(*I));
2080    }
2081    toErase.clear();
2082
2083    if (AtStart)
2084      BI = BB->begin();
2085    else
2086      ++BI;
2087  }
2088
2089  return ChangedFunction;
2090}
2091
2092/// performPRE - Perform a purely local form of PRE that looks for diamond
2093/// control flow patterns and attempts to perform simple PRE at the join point.
2094bool GVN::performPRE(Function &F) {
2095  bool Changed = false;
2096  DenseMap<BasicBlock*, Value*> predMap;
2097  for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2098       DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2099    BasicBlock *CurrentBlock = *DI;
2100
2101    // Nothing to PRE in the entry block.
2102    if (CurrentBlock == &F.getEntryBlock()) continue;
2103
2104    for (BasicBlock::iterator BI = CurrentBlock->begin(),
2105         BE = CurrentBlock->end(); BI != BE; ) {
2106      Instruction *CurInst = BI++;
2107
2108      if (isa<AllocaInst>(CurInst) ||
2109          isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2110          CurInst->getType()->isVoidTy() ||
2111          CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2112          isa<DbgInfoIntrinsic>(CurInst))
2113        continue;
2114
2115      uint32_t ValNo = VN.lookup(CurInst);
2116
2117      // Look for the predecessors for PRE opportunities.  We're
2118      // only trying to solve the basic diamond case, where
2119      // a value is computed in the successor and one predecessor,
2120      // but not the other.  We also explicitly disallow cases
2121      // where the successor is its own predecessor, because they're
2122      // more complicated to get right.
2123      unsigned NumWith = 0;
2124      unsigned NumWithout = 0;
2125      BasicBlock *PREPred = 0;
2126      predMap.clear();
2127
2128      for (pred_iterator PI = pred_begin(CurrentBlock),
2129           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2130        BasicBlock *P = *PI;
2131        // We're not interested in PRE where the block is its
2132        // own predecessor, or in blocks with predecessors
2133        // that are not reachable.
2134        if (P == CurrentBlock) {
2135          NumWithout = 2;
2136          break;
2137        } else if (!localAvail.count(P))  {
2138          NumWithout = 2;
2139          break;
2140        }
2141
2142        DenseMap<uint32_t, Value*>::iterator predV =
2143                                            localAvail[P]->table.find(ValNo);
2144        if (predV == localAvail[P]->table.end()) {
2145          PREPred = P;
2146          ++NumWithout;
2147        } else if (predV->second == CurInst) {
2148          NumWithout = 2;
2149        } else {
2150          predMap[P] = predV->second;
2151          ++NumWith;
2152        }
2153      }
2154
2155      // Don't do PRE when it might increase code size, i.e. when
2156      // we would need to insert instructions in more than one pred.
2157      if (NumWithout != 1 || NumWith == 0)
2158        continue;
2159
2160      // Don't do PRE across indirect branch.
2161      if (isa<IndirectBrInst>(PREPred->getTerminator()))
2162        continue;
2163
2164      // We can't do PRE safely on a critical edge, so instead we schedule
2165      // the edge to be split and perform the PRE the next time we iterate
2166      // on the function.
2167      unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2168      if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2169        toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2170        continue;
2171      }
2172
2173      // Instantiate the expression in the predecessor that lacked it.
2174      // Because we are going top-down through the block, all value numbers
2175      // will be available in the predecessor by the time we need them.  Any
2176      // that weren't originally present will have been instantiated earlier
2177      // in this loop.
2178      Instruction *PREInstr = CurInst->clone();
2179      bool success = true;
2180      for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2181        Value *Op = PREInstr->getOperand(i);
2182        if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2183          continue;
2184
2185        if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2186          PREInstr->setOperand(i, V);
2187        } else {
2188          success = false;
2189          break;
2190        }
2191      }
2192
2193      // Fail out if we encounter an operand that is not available in
2194      // the PRE predecessor.  This is typically because of loads which
2195      // are not value numbered precisely.
2196      if (!success) {
2197        delete PREInstr;
2198        DEBUG(verifyRemoved(PREInstr));
2199        continue;
2200      }
2201
2202      PREInstr->insertBefore(PREPred->getTerminator());
2203      PREInstr->setName(CurInst->getName() + ".pre");
2204      predMap[PREPred] = PREInstr;
2205      VN.add(PREInstr, ValNo);
2206      ++NumGVNPRE;
2207
2208      // Update the availability map to include the new instruction.
2209      localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2210
2211      // Create a PHI to make the value available in this block.
2212      PHINode* Phi = PHINode::Create(CurInst->getType(),
2213                                     CurInst->getName() + ".pre-phi",
2214                                     CurrentBlock->begin());
2215      for (pred_iterator PI = pred_begin(CurrentBlock),
2216           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2217        BasicBlock *P = *PI;
2218        Phi->addIncoming(predMap[P], P);
2219      }
2220
2221      VN.add(Phi, ValNo);
2222      localAvail[CurrentBlock]->table[ValNo] = Phi;
2223
2224      CurInst->replaceAllUsesWith(Phi);
2225      if (MD && Phi->getType()->isPointerTy())
2226        MD->invalidateCachedPointerInfo(Phi);
2227      VN.erase(CurInst);
2228
2229      DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2230      if (MD) MD->removeInstruction(CurInst);
2231      CurInst->eraseFromParent();
2232      DEBUG(verifyRemoved(CurInst));
2233      Changed = true;
2234    }
2235  }
2236
2237  if (splitCriticalEdges())
2238    Changed = true;
2239
2240  return Changed;
2241}
2242
2243/// splitCriticalEdges - Split critical edges found during the previous
2244/// iteration that may enable further optimization.
2245bool GVN::splitCriticalEdges() {
2246  if (toSplit.empty())
2247    return false;
2248  do {
2249    std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2250    SplitCriticalEdge(Edge.first, Edge.second, this);
2251  } while (!toSplit.empty());
2252  if (MD) MD->invalidateCachedPredecessors();
2253  return true;
2254}
2255
2256/// iterateOnFunction - Executes one iteration of GVN
2257bool GVN::iterateOnFunction(Function &F) {
2258  cleanupGlobalSets();
2259
2260  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2261       DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2262    if (DI->getIDom())
2263      localAvail[DI->getBlock()] =
2264                   new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2265    else
2266      localAvail[DI->getBlock()] = new ValueNumberScope(0);
2267  }
2268
2269  // Top-down walk of the dominator tree
2270  bool Changed = false;
2271#if 0
2272  // Needed for value numbering with phi construction to work.
2273  ReversePostOrderTraversal<Function*> RPOT(&F);
2274  for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2275       RE = RPOT.end(); RI != RE; ++RI)
2276    Changed |= processBlock(*RI);
2277#else
2278  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2279       DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2280    Changed |= processBlock(DI->getBlock());
2281#endif
2282
2283  return Changed;
2284}
2285
2286void GVN::cleanupGlobalSets() {
2287  VN.clear();
2288
2289  for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2290       I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2291    delete I->second;
2292  localAvail.clear();
2293}
2294
2295/// verifyRemoved - Verify that the specified instruction does not occur in our
2296/// internal data structures.
2297void GVN::verifyRemoved(const Instruction *Inst) const {
2298  VN.verifyRemoved(Inst);
2299
2300  // Walk through the value number scope to make sure the instruction isn't
2301  // ferreted away in it.
2302  for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2303         I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2304    const ValueNumberScope *VNS = I->second;
2305
2306    while (VNS) {
2307      for (DenseMap<uint32_t, Value*>::const_iterator
2308             II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2309        assert(II->second != Inst && "Inst still in value numbering scope!");
2310      }
2311
2312      VNS = VNS->parent;
2313    }
2314  }
2315}
2316