GVN.cpp revision 075fb5d68fcb55d26e44c48f07dfdbbfa21ccb2a
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs global value numbering to eliminate fully redundant
11// instructions.  It also performs simple dead load elimination.
12//
13// Note that this pass does the value numbering itself; it does not use the
14// ValueNumbering analysis passes.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "gvn"
19#include "llvm/Transforms/Scalar.h"
20#include "llvm/BasicBlock.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Function.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/LLVMContext.h"
27#include "llvm/Operator.h"
28#include "llvm/Value.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/DepthFirstIterator.h"
31#include "llvm/ADT/PostOrderIterator.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Analysis/ConstantFolding.h"
37#include "llvm/Analysis/Dominators.h"
38#include "llvm/Analysis/Loads.h"
39#include "llvm/Analysis/MemoryBuiltins.h"
40#include "llvm/Analysis/MemoryDependenceAnalysis.h"
41#include "llvm/Analysis/PHITransAddr.h"
42#include "llvm/Support/CFG.h"
43#include "llvm/Support/CommandLine.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/GetElementPtrTypeIterator.h"
47#include "llvm/Support/IRBuilder.h"
48#include "llvm/Support/raw_ostream.h"
49#include "llvm/Target/TargetData.h"
50#include "llvm/Transforms/Utils/BasicBlockUtils.h"
51#include "llvm/Transforms/Utils/Local.h"
52#include "llvm/Transforms/Utils/SSAUpdater.h"
53using namespace llvm;
54
55STATISTIC(NumGVNInstr,  "Number of instructions deleted");
56STATISTIC(NumGVNLoad,   "Number of loads deleted");
57STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
58STATISTIC(NumGVNBlocks, "Number of blocks merged");
59STATISTIC(NumPRELoad,   "Number of loads PRE'd");
60
61static cl::opt<bool> EnablePRE("enable-pre",
62                               cl::init(true), cl::Hidden);
63static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
64
65//===----------------------------------------------------------------------===//
66//                         ValueTable Class
67//===----------------------------------------------------------------------===//
68
69/// This class holds the mapping between values and value numbers.  It is used
70/// as an efficient mechanism to determine the expression-wise equivalence of
71/// two values.
72namespace {
73  struct Expression {
74    enum ExpressionOpcode {
75      ADD = Instruction::Add,
76      FADD = Instruction::FAdd,
77      SUB = Instruction::Sub,
78      FSUB = Instruction::FSub,
79      MUL = Instruction::Mul,
80      FMUL = Instruction::FMul,
81      UDIV = Instruction::UDiv,
82      SDIV = Instruction::SDiv,
83      FDIV = Instruction::FDiv,
84      UREM = Instruction::URem,
85      SREM = Instruction::SRem,
86      FREM = Instruction::FRem,
87      SHL = Instruction::Shl,
88      LSHR = Instruction::LShr,
89      ASHR = Instruction::AShr,
90      AND = Instruction::And,
91      OR = Instruction::Or,
92      XOR = Instruction::Xor,
93      TRUNC = Instruction::Trunc,
94      ZEXT = Instruction::ZExt,
95      SEXT = Instruction::SExt,
96      FPTOUI = Instruction::FPToUI,
97      FPTOSI = Instruction::FPToSI,
98      UITOFP = Instruction::UIToFP,
99      SITOFP = Instruction::SIToFP,
100      FPTRUNC = Instruction::FPTrunc,
101      FPEXT = Instruction::FPExt,
102      PTRTOINT = Instruction::PtrToInt,
103      INTTOPTR = Instruction::IntToPtr,
104      BITCAST = Instruction::BitCast,
105      ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
106      ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
107      FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
108      FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
109      FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
110      SHUFFLE, SELECT, GEP, CALL, CONSTANT,
111      INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
112
113    ExpressionOpcode opcode;
114    const Type* type;
115    SmallVector<uint32_t, 4> varargs;
116    Value *function;
117
118    Expression() { }
119    Expression(ExpressionOpcode o) : opcode(o) { }
120
121    bool operator==(const Expression &other) const {
122      if (opcode != other.opcode)
123        return false;
124      else if (opcode == EMPTY || opcode == TOMBSTONE)
125        return true;
126      else if (type != other.type)
127        return false;
128      else if (function != other.function)
129        return false;
130      else {
131        if (varargs.size() != other.varargs.size())
132          return false;
133
134        for (size_t i = 0; i < varargs.size(); ++i)
135          if (varargs[i] != other.varargs[i])
136            return false;
137
138        return true;
139      }
140    }
141
142    /*bool operator!=(const Expression &other) const {
143      return !(*this == other);
144    }*/
145  };
146
147  class ValueTable {
148    private:
149      DenseMap<Value*, uint32_t> valueNumbering;
150      DenseMap<Expression, uint32_t> expressionNumbering;
151      AliasAnalysis* AA;
152      MemoryDependenceAnalysis* MD;
153      DominatorTree* DT;
154
155      uint32_t nextValueNumber;
156
157      Expression::ExpressionOpcode getOpcode(CmpInst* C);
158      Expression create_expression(BinaryOperator* BO);
159      Expression create_expression(CmpInst* C);
160      Expression create_expression(ShuffleVectorInst* V);
161      Expression create_expression(ExtractElementInst* C);
162      Expression create_expression(InsertElementInst* V);
163      Expression create_expression(SelectInst* V);
164      Expression create_expression(CastInst* C);
165      Expression create_expression(GetElementPtrInst* G);
166      Expression create_expression(CallInst* C);
167      Expression create_expression(ExtractValueInst* C);
168      Expression create_expression(InsertValueInst* C);
169
170      uint32_t lookup_or_add_call(CallInst* C);
171    public:
172      ValueTable() : nextValueNumber(1) { }
173      uint32_t lookup_or_add(Value *V);
174      uint32_t lookup(Value *V) const;
175      void add(Value *V, uint32_t num);
176      void clear();
177      void erase(Value *v);
178      void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
179      AliasAnalysis *getAliasAnalysis() const { return AA; }
180      void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
181      void setDomTree(DominatorTree* D) { DT = D; }
182      uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
183      void verifyRemoved(const Value *) const;
184  };
185}
186
187namespace llvm {
188template <> struct DenseMapInfo<Expression> {
189  static inline Expression getEmptyKey() {
190    return Expression(Expression::EMPTY);
191  }
192
193  static inline Expression getTombstoneKey() {
194    return Expression(Expression::TOMBSTONE);
195  }
196
197  static unsigned getHashValue(const Expression e) {
198    unsigned hash = e.opcode;
199
200    hash = ((unsigned)((uintptr_t)e.type >> 4) ^
201            (unsigned)((uintptr_t)e.type >> 9));
202
203    for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
204         E = e.varargs.end(); I != E; ++I)
205      hash = *I + hash * 37;
206
207    hash = ((unsigned)((uintptr_t)e.function >> 4) ^
208            (unsigned)((uintptr_t)e.function >> 9)) +
209           hash * 37;
210
211    return hash;
212  }
213  static bool isEqual(const Expression &LHS, const Expression &RHS) {
214    return LHS == RHS;
215  }
216};
217
218template <>
219struct isPodLike<Expression> { static const bool value = true; };
220
221}
222
223//===----------------------------------------------------------------------===//
224//                     ValueTable Internal Functions
225//===----------------------------------------------------------------------===//
226
227Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
228  if (isa<ICmpInst>(C)) {
229    switch (C->getPredicate()) {
230    default:  // THIS SHOULD NEVER HAPPEN
231      llvm_unreachable("Comparison with unknown predicate?");
232    case ICmpInst::ICMP_EQ:  return Expression::ICMPEQ;
233    case ICmpInst::ICMP_NE:  return Expression::ICMPNE;
234    case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
235    case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
236    case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
237    case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
238    case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
239    case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
240    case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
241    case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
242    }
243  } else {
244    switch (C->getPredicate()) {
245    default: // THIS SHOULD NEVER HAPPEN
246      llvm_unreachable("Comparison with unknown predicate?");
247    case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
248    case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
249    case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
250    case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
251    case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
252    case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
253    case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
254    case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
255    case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
256    case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
257    case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
258    case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
259    case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
260    case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
261    }
262  }
263}
264
265Expression ValueTable::create_expression(CallInst* C) {
266  Expression e;
267
268  e.type = C->getType();
269  e.function = C->getCalledFunction();
270  e.opcode = Expression::CALL;
271
272  CallSite CS(C);
273  for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
274       I != E; ++I)
275    e.varargs.push_back(lookup_or_add(*I));
276
277  return e;
278}
279
280Expression ValueTable::create_expression(BinaryOperator* BO) {
281  Expression e;
282  e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
283  e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
284  e.function = 0;
285  e.type = BO->getType();
286  e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
287
288  return e;
289}
290
291Expression ValueTable::create_expression(CmpInst* C) {
292  Expression e;
293
294  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
295  e.varargs.push_back(lookup_or_add(C->getOperand(1)));
296  e.function = 0;
297  e.type = C->getType();
298  e.opcode = getOpcode(C);
299
300  return e;
301}
302
303Expression ValueTable::create_expression(CastInst* C) {
304  Expression e;
305
306  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
307  e.function = 0;
308  e.type = C->getType();
309  e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
310
311  return e;
312}
313
314Expression ValueTable::create_expression(ShuffleVectorInst* S) {
315  Expression e;
316
317  e.varargs.push_back(lookup_or_add(S->getOperand(0)));
318  e.varargs.push_back(lookup_or_add(S->getOperand(1)));
319  e.varargs.push_back(lookup_or_add(S->getOperand(2)));
320  e.function = 0;
321  e.type = S->getType();
322  e.opcode = Expression::SHUFFLE;
323
324  return e;
325}
326
327Expression ValueTable::create_expression(ExtractElementInst* E) {
328  Expression e;
329
330  e.varargs.push_back(lookup_or_add(E->getOperand(0)));
331  e.varargs.push_back(lookup_or_add(E->getOperand(1)));
332  e.function = 0;
333  e.type = E->getType();
334  e.opcode = Expression::EXTRACT;
335
336  return e;
337}
338
339Expression ValueTable::create_expression(InsertElementInst* I) {
340  Expression e;
341
342  e.varargs.push_back(lookup_or_add(I->getOperand(0)));
343  e.varargs.push_back(lookup_or_add(I->getOperand(1)));
344  e.varargs.push_back(lookup_or_add(I->getOperand(2)));
345  e.function = 0;
346  e.type = I->getType();
347  e.opcode = Expression::INSERT;
348
349  return e;
350}
351
352Expression ValueTable::create_expression(SelectInst* I) {
353  Expression e;
354
355  e.varargs.push_back(lookup_or_add(I->getCondition()));
356  e.varargs.push_back(lookup_or_add(I->getTrueValue()));
357  e.varargs.push_back(lookup_or_add(I->getFalseValue()));
358  e.function = 0;
359  e.type = I->getType();
360  e.opcode = Expression::SELECT;
361
362  return e;
363}
364
365Expression ValueTable::create_expression(GetElementPtrInst* G) {
366  Expression e;
367
368  e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
369  e.function = 0;
370  e.type = G->getType();
371  e.opcode = Expression::GEP;
372
373  for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
374       I != E; ++I)
375    e.varargs.push_back(lookup_or_add(*I));
376
377  return e;
378}
379
380Expression ValueTable::create_expression(ExtractValueInst* E) {
381  Expression e;
382
383  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
384  for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
385       II != IE; ++II)
386    e.varargs.push_back(*II);
387  e.function = 0;
388  e.type = E->getType();
389  e.opcode = Expression::EXTRACTVALUE;
390
391  return e;
392}
393
394Expression ValueTable::create_expression(InsertValueInst* E) {
395  Expression e;
396
397  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
398  e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
399  for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
400       II != IE; ++II)
401    e.varargs.push_back(*II);
402  e.function = 0;
403  e.type = E->getType();
404  e.opcode = Expression::INSERTVALUE;
405
406  return e;
407}
408
409//===----------------------------------------------------------------------===//
410//                     ValueTable External Functions
411//===----------------------------------------------------------------------===//
412
413/// add - Insert a value into the table with a specified value number.
414void ValueTable::add(Value *V, uint32_t num) {
415  valueNumbering.insert(std::make_pair(V, num));
416}
417
418uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
419  if (AA->doesNotAccessMemory(C)) {
420    Expression exp = create_expression(C);
421    uint32_t& e = expressionNumbering[exp];
422    if (!e) e = nextValueNumber++;
423    valueNumbering[C] = e;
424    return e;
425  } else if (AA->onlyReadsMemory(C)) {
426    Expression exp = create_expression(C);
427    uint32_t& e = expressionNumbering[exp];
428    if (!e) {
429      e = nextValueNumber++;
430      valueNumbering[C] = e;
431      return e;
432    }
433    if (!MD) {
434      e = nextValueNumber++;
435      valueNumbering[C] = e;
436      return e;
437    }
438
439    MemDepResult local_dep = MD->getDependency(C);
440
441    if (!local_dep.isDef() && !local_dep.isNonLocal()) {
442      valueNumbering[C] =  nextValueNumber;
443      return nextValueNumber++;
444    }
445
446    if (local_dep.isDef()) {
447      CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
448
449      if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
450        valueNumbering[C] = nextValueNumber;
451        return nextValueNumber++;
452      }
453
454      for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
455        uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
456        uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
457        if (c_vn != cd_vn) {
458          valueNumbering[C] = nextValueNumber;
459          return nextValueNumber++;
460        }
461      }
462
463      uint32_t v = lookup_or_add(local_cdep);
464      valueNumbering[C] = v;
465      return v;
466    }
467
468    // Non-local case.
469    const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
470      MD->getNonLocalCallDependency(CallSite(C));
471    // FIXME: call/call dependencies for readonly calls should return def, not
472    // clobber!  Move the checking logic to MemDep!
473    CallInst* cdep = 0;
474
475    // Check to see if we have a single dominating call instruction that is
476    // identical to C.
477    for (unsigned i = 0, e = deps.size(); i != e; ++i) {
478      const NonLocalDepEntry *I = &deps[i];
479      // Ignore non-local dependencies.
480      if (I->getResult().isNonLocal())
481        continue;
482
483      // We don't handle non-depedencies.  If we already have a call, reject
484      // instruction dependencies.
485      if (I->getResult().isClobber() || cdep != 0) {
486        cdep = 0;
487        break;
488      }
489
490      CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
491      // FIXME: All duplicated with non-local case.
492      if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
493        cdep = NonLocalDepCall;
494        continue;
495      }
496
497      cdep = 0;
498      break;
499    }
500
501    if (!cdep) {
502      valueNumbering[C] = nextValueNumber;
503      return nextValueNumber++;
504    }
505
506    if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
507      valueNumbering[C] = nextValueNumber;
508      return nextValueNumber++;
509    }
510    for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
511      uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
512      uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
513      if (c_vn != cd_vn) {
514        valueNumbering[C] = nextValueNumber;
515        return nextValueNumber++;
516      }
517    }
518
519    uint32_t v = lookup_or_add(cdep);
520    valueNumbering[C] = v;
521    return v;
522
523  } else {
524    valueNumbering[C] = nextValueNumber;
525    return nextValueNumber++;
526  }
527}
528
529/// lookup_or_add - Returns the value number for the specified value, assigning
530/// it a new number if it did not have one before.
531uint32_t ValueTable::lookup_or_add(Value *V) {
532  DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
533  if (VI != valueNumbering.end())
534    return VI->second;
535
536  if (!isa<Instruction>(V)) {
537    valueNumbering[V] = nextValueNumber;
538    return nextValueNumber++;
539  }
540
541  Instruction* I = cast<Instruction>(V);
542  Expression exp;
543  switch (I->getOpcode()) {
544    case Instruction::Call:
545      return lookup_or_add_call(cast<CallInst>(I));
546    case Instruction::Add:
547    case Instruction::FAdd:
548    case Instruction::Sub:
549    case Instruction::FSub:
550    case Instruction::Mul:
551    case Instruction::FMul:
552    case Instruction::UDiv:
553    case Instruction::SDiv:
554    case Instruction::FDiv:
555    case Instruction::URem:
556    case Instruction::SRem:
557    case Instruction::FRem:
558    case Instruction::Shl:
559    case Instruction::LShr:
560    case Instruction::AShr:
561    case Instruction::And:
562    case Instruction::Or :
563    case Instruction::Xor:
564      exp = create_expression(cast<BinaryOperator>(I));
565      break;
566    case Instruction::ICmp:
567    case Instruction::FCmp:
568      exp = create_expression(cast<CmpInst>(I));
569      break;
570    case Instruction::Trunc:
571    case Instruction::ZExt:
572    case Instruction::SExt:
573    case Instruction::FPToUI:
574    case Instruction::FPToSI:
575    case Instruction::UIToFP:
576    case Instruction::SIToFP:
577    case Instruction::FPTrunc:
578    case Instruction::FPExt:
579    case Instruction::PtrToInt:
580    case Instruction::IntToPtr:
581    case Instruction::BitCast:
582      exp = create_expression(cast<CastInst>(I));
583      break;
584    case Instruction::Select:
585      exp = create_expression(cast<SelectInst>(I));
586      break;
587    case Instruction::ExtractElement:
588      exp = create_expression(cast<ExtractElementInst>(I));
589      break;
590    case Instruction::InsertElement:
591      exp = create_expression(cast<InsertElementInst>(I));
592      break;
593    case Instruction::ShuffleVector:
594      exp = create_expression(cast<ShuffleVectorInst>(I));
595      break;
596    case Instruction::ExtractValue:
597      exp = create_expression(cast<ExtractValueInst>(I));
598      break;
599    case Instruction::InsertValue:
600      exp = create_expression(cast<InsertValueInst>(I));
601      break;
602    case Instruction::GetElementPtr:
603      exp = create_expression(cast<GetElementPtrInst>(I));
604      break;
605    default:
606      valueNumbering[V] = nextValueNumber;
607      return nextValueNumber++;
608  }
609
610  uint32_t& e = expressionNumbering[exp];
611  if (!e) e = nextValueNumber++;
612  valueNumbering[V] = e;
613  return e;
614}
615
616/// lookup - Returns the value number of the specified value. Fails if
617/// the value has not yet been numbered.
618uint32_t ValueTable::lookup(Value *V) const {
619  DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
620  assert(VI != valueNumbering.end() && "Value not numbered?");
621  return VI->second;
622}
623
624/// clear - Remove all entries from the ValueTable
625void ValueTable::clear() {
626  valueNumbering.clear();
627  expressionNumbering.clear();
628  nextValueNumber = 1;
629}
630
631/// erase - Remove a value from the value numbering
632void ValueTable::erase(Value *V) {
633  valueNumbering.erase(V);
634}
635
636/// verifyRemoved - Verify that the value is removed from all internal data
637/// structures.
638void ValueTable::verifyRemoved(const Value *V) const {
639  for (DenseMap<Value*, uint32_t>::const_iterator
640         I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
641    assert(I->first != V && "Inst still occurs in value numbering map!");
642  }
643}
644
645//===----------------------------------------------------------------------===//
646//                                GVN Pass
647//===----------------------------------------------------------------------===//
648
649namespace {
650  struct ValueNumberScope {
651    ValueNumberScope* parent;
652    DenseMap<uint32_t, Value*> table;
653
654    ValueNumberScope(ValueNumberScope* p) : parent(p) { }
655  };
656}
657
658namespace {
659
660  class GVN : public FunctionPass {
661    bool runOnFunction(Function &F);
662  public:
663    static char ID; // Pass identification, replacement for typeid
664    explicit GVN(bool noloads = false)
665        : FunctionPass(ID), NoLoads(noloads), MD(0) {
666      initializeGVNPass(*PassRegistry::getPassRegistry());
667    }
668
669  private:
670    bool NoLoads;
671    MemoryDependenceAnalysis *MD;
672    DominatorTree *DT;
673
674    ValueTable VN;
675    DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
676
677    // List of critical edges to be split between iterations.
678    SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
679
680    // This transformation requires dominator postdominator info
681    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
682      AU.addRequired<DominatorTree>();
683      if (!NoLoads)
684        AU.addRequired<MemoryDependenceAnalysis>();
685      AU.addRequired<AliasAnalysis>();
686
687      AU.addPreserved<DominatorTree>();
688      AU.addPreserved<AliasAnalysis>();
689    }
690
691    // Helper fuctions
692    // FIXME: eliminate or document these better
693    bool processLoad(LoadInst* L,
694                     SmallVectorImpl<Instruction*> &toErase);
695    bool processInstruction(Instruction *I,
696                            SmallVectorImpl<Instruction*> &toErase);
697    bool processNonLocalLoad(LoadInst* L,
698                             SmallVectorImpl<Instruction*> &toErase);
699    bool processBlock(BasicBlock *BB);
700    void dump(DenseMap<uint32_t, Value*>& d);
701    bool iterateOnFunction(Function &F);
702    Value *CollapsePhi(PHINode* p);
703    bool performPRE(Function& F);
704    Value *lookupNumber(BasicBlock *BB, uint32_t num);
705    void cleanupGlobalSets();
706    void verifyRemoved(const Instruction *I) const;
707    bool splitCriticalEdges();
708  };
709
710  char GVN::ID = 0;
711}
712
713// createGVNPass - The public interface to this file...
714FunctionPass *llvm::createGVNPass(bool NoLoads) {
715  return new GVN(NoLoads);
716}
717
718INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
719INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
720INITIALIZE_PASS_DEPENDENCY(DominatorTree)
721INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
722INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
723
724void GVN::dump(DenseMap<uint32_t, Value*>& d) {
725  errs() << "{\n";
726  for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
727       E = d.end(); I != E; ++I) {
728      errs() << I->first << "\n";
729      I->second->dump();
730  }
731  errs() << "}\n";
732}
733
734static bool isSafeReplacement(PHINode* p, Instruction *inst) {
735  if (!isa<PHINode>(inst))
736    return true;
737
738  for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
739       UI != E; ++UI)
740    if (PHINode* use_phi = dyn_cast<PHINode>(*UI))
741      if (use_phi->getParent() == inst->getParent())
742        return false;
743
744  return true;
745}
746
747Value *GVN::CollapsePhi(PHINode *PN) {
748  Value *ConstVal = PN->hasConstantValue(DT);
749  if (!ConstVal) return 0;
750
751  Instruction *Inst = dyn_cast<Instruction>(ConstVal);
752  if (!Inst)
753    return ConstVal;
754
755  if (DT->dominates(Inst, PN))
756    if (isSafeReplacement(PN, Inst))
757      return Inst;
758  return 0;
759}
760
761/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
762/// we're analyzing is fully available in the specified block.  As we go, keep
763/// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
764/// map is actually a tri-state map with the following values:
765///   0) we know the block *is not* fully available.
766///   1) we know the block *is* fully available.
767///   2) we do not know whether the block is fully available or not, but we are
768///      currently speculating that it will be.
769///   3) we are speculating for this block and have used that to speculate for
770///      other blocks.
771static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
772                            DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
773  // Optimistically assume that the block is fully available and check to see
774  // if we already know about this block in one lookup.
775  std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
776    FullyAvailableBlocks.insert(std::make_pair(BB, 2));
777
778  // If the entry already existed for this block, return the precomputed value.
779  if (!IV.second) {
780    // If this is a speculative "available" value, mark it as being used for
781    // speculation of other blocks.
782    if (IV.first->second == 2)
783      IV.first->second = 3;
784    return IV.first->second != 0;
785  }
786
787  // Otherwise, see if it is fully available in all predecessors.
788  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
789
790  // If this block has no predecessors, it isn't live-in here.
791  if (PI == PE)
792    goto SpeculationFailure;
793
794  for (; PI != PE; ++PI)
795    // If the value isn't fully available in one of our predecessors, then it
796    // isn't fully available in this block either.  Undo our previous
797    // optimistic assumption and bail out.
798    if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
799      goto SpeculationFailure;
800
801  return true;
802
803// SpeculationFailure - If we get here, we found out that this is not, after
804// all, a fully-available block.  We have a problem if we speculated on this and
805// used the speculation to mark other blocks as available.
806SpeculationFailure:
807  char &BBVal = FullyAvailableBlocks[BB];
808
809  // If we didn't speculate on this, just return with it set to false.
810  if (BBVal == 2) {
811    BBVal = 0;
812    return false;
813  }
814
815  // If we did speculate on this value, we could have blocks set to 1 that are
816  // incorrect.  Walk the (transitive) successors of this block and mark them as
817  // 0 if set to one.
818  SmallVector<BasicBlock*, 32> BBWorklist;
819  BBWorklist.push_back(BB);
820
821  do {
822    BasicBlock *Entry = BBWorklist.pop_back_val();
823    // Note that this sets blocks to 0 (unavailable) if they happen to not
824    // already be in FullyAvailableBlocks.  This is safe.
825    char &EntryVal = FullyAvailableBlocks[Entry];
826    if (EntryVal == 0) continue;  // Already unavailable.
827
828    // Mark as unavailable.
829    EntryVal = 0;
830
831    for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
832      BBWorklist.push_back(*I);
833  } while (!BBWorklist.empty());
834
835  return false;
836}
837
838
839/// CanCoerceMustAliasedValueToLoad - Return true if
840/// CoerceAvailableValueToLoadType will succeed.
841static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
842                                            const Type *LoadTy,
843                                            const TargetData &TD) {
844  // If the loaded or stored value is an first class array or struct, don't try
845  // to transform them.  We need to be able to bitcast to integer.
846  if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
847      StoredVal->getType()->isStructTy() ||
848      StoredVal->getType()->isArrayTy())
849    return false;
850
851  // The store has to be at least as big as the load.
852  if (TD.getTypeSizeInBits(StoredVal->getType()) <
853        TD.getTypeSizeInBits(LoadTy))
854    return false;
855
856  return true;
857}
858
859
860/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
861/// then a load from a must-aliased pointer of a different type, try to coerce
862/// the stored value.  LoadedTy is the type of the load we want to replace and
863/// InsertPt is the place to insert new instructions.
864///
865/// If we can't do it, return null.
866static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
867                                             const Type *LoadedTy,
868                                             Instruction *InsertPt,
869                                             const TargetData &TD) {
870  if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
871    return 0;
872
873  const Type *StoredValTy = StoredVal->getType();
874
875  uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
876  uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
877
878  // If the store and reload are the same size, we can always reuse it.
879  if (StoreSize == LoadSize) {
880    if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
881      // Pointer to Pointer -> use bitcast.
882      return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
883    }
884
885    // Convert source pointers to integers, which can be bitcast.
886    if (StoredValTy->isPointerTy()) {
887      StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
888      StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
889    }
890
891    const Type *TypeToCastTo = LoadedTy;
892    if (TypeToCastTo->isPointerTy())
893      TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
894
895    if (StoredValTy != TypeToCastTo)
896      StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
897
898    // Cast to pointer if the load needs a pointer type.
899    if (LoadedTy->isPointerTy())
900      StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
901
902    return StoredVal;
903  }
904
905  // If the loaded value is smaller than the available value, then we can
906  // extract out a piece from it.  If the available value is too small, then we
907  // can't do anything.
908  assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
909
910  // Convert source pointers to integers, which can be manipulated.
911  if (StoredValTy->isPointerTy()) {
912    StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
913    StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
914  }
915
916  // Convert vectors and fp to integer, which can be manipulated.
917  if (!StoredValTy->isIntegerTy()) {
918    StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
919    StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
920  }
921
922  // If this is a big-endian system, we need to shift the value down to the low
923  // bits so that a truncate will work.
924  if (TD.isBigEndian()) {
925    Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
926    StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
927  }
928
929  // Truncate the integer to the right size now.
930  const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
931  StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
932
933  if (LoadedTy == NewIntTy)
934    return StoredVal;
935
936  // If the result is a pointer, inttoptr.
937  if (LoadedTy->isPointerTy())
938    return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
939
940  // Otherwise, bitcast.
941  return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
942}
943
944/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
945/// be expressed as a base pointer plus a constant offset.  Return the base and
946/// offset to the caller.
947static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
948                                        const TargetData &TD) {
949  Operator *PtrOp = dyn_cast<Operator>(Ptr);
950  if (PtrOp == 0) return Ptr;
951
952  // Just look through bitcasts.
953  if (PtrOp->getOpcode() == Instruction::BitCast)
954    return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
955
956  // If this is a GEP with constant indices, we can look through it.
957  GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
958  if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
959
960  gep_type_iterator GTI = gep_type_begin(GEP);
961  for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
962       ++I, ++GTI) {
963    ConstantInt *OpC = cast<ConstantInt>(*I);
964    if (OpC->isZero()) continue;
965
966    // Handle a struct and array indices which add their offset to the pointer.
967    if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
968      Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
969    } else {
970      uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
971      Offset += OpC->getSExtValue()*Size;
972    }
973  }
974
975  // Re-sign extend from the pointer size if needed to get overflow edge cases
976  // right.
977  unsigned PtrSize = TD.getPointerSizeInBits();
978  if (PtrSize < 64)
979    Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
980
981  return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
982}
983
984
985/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
986/// memdep query of a load that ends up being a clobbering memory write (store,
987/// memset, memcpy, memmove).  This means that the write *may* provide bits used
988/// by the load but we can't be sure because the pointers don't mustalias.
989///
990/// Check this case to see if there is anything more we can do before we give
991/// up.  This returns -1 if we have to give up, or a byte number in the stored
992/// value of the piece that feeds the load.
993static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
994                                          Value *WritePtr,
995                                          uint64_t WriteSizeInBits,
996                                          const TargetData &TD) {
997  // If the loaded or stored value is an first class array or struct, don't try
998  // to transform them.  We need to be able to bitcast to integer.
999  if (LoadTy->isStructTy() || LoadTy->isArrayTy())
1000    return -1;
1001
1002  int64_t StoreOffset = 0, LoadOffset = 0;
1003  Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
1004  Value *LoadBase =
1005    GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
1006  if (StoreBase != LoadBase)
1007    return -1;
1008
1009  // If the load and store are to the exact same address, they should have been
1010  // a must alias.  AA must have gotten confused.
1011  // FIXME: Study to see if/when this happens.  One case is forwarding a memset
1012  // to a load from the base of the memset.
1013#if 0
1014  if (LoadOffset == StoreOffset) {
1015    dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1016    << "Base       = " << *StoreBase << "\n"
1017    << "Store Ptr  = " << *WritePtr << "\n"
1018    << "Store Offs = " << StoreOffset << "\n"
1019    << "Load Ptr   = " << *LoadPtr << "\n";
1020    abort();
1021  }
1022#endif
1023
1024  // If the load and store don't overlap at all, the store doesn't provide
1025  // anything to the load.  In this case, they really don't alias at all, AA
1026  // must have gotten confused.
1027  // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1028  // remove this check, as it is duplicated with what we have below.
1029  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1030
1031  if ((WriteSizeInBits & 7) | (LoadSize & 7))
1032    return -1;
1033  uint64_t StoreSize = WriteSizeInBits >> 3;  // Convert to bytes.
1034  LoadSize >>= 3;
1035
1036
1037  bool isAAFailure = false;
1038  if (StoreOffset < LoadOffset)
1039    isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1040  else
1041    isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1042
1043  if (isAAFailure) {
1044#if 0
1045    dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1046    << "Base       = " << *StoreBase << "\n"
1047    << "Store Ptr  = " << *WritePtr << "\n"
1048    << "Store Offs = " << StoreOffset << "\n"
1049    << "Load Ptr   = " << *LoadPtr << "\n";
1050    abort();
1051#endif
1052    return -1;
1053  }
1054
1055  // If the Load isn't completely contained within the stored bits, we don't
1056  // have all the bits to feed it.  We could do something crazy in the future
1057  // (issue a smaller load then merge the bits in) but this seems unlikely to be
1058  // valuable.
1059  if (StoreOffset > LoadOffset ||
1060      StoreOffset+StoreSize < LoadOffset+LoadSize)
1061    return -1;
1062
1063  // Okay, we can do this transformation.  Return the number of bytes into the
1064  // store that the load is.
1065  return LoadOffset-StoreOffset;
1066}
1067
1068/// AnalyzeLoadFromClobberingStore - This function is called when we have a
1069/// memdep query of a load that ends up being a clobbering store.
1070static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1071                                          StoreInst *DepSI,
1072                                          const TargetData &TD) {
1073  // Cannot handle reading from store of first-class aggregate yet.
1074  if (DepSI->getValueOperand()->getType()->isStructTy() ||
1075      DepSI->getValueOperand()->getType()->isArrayTy())
1076    return -1;
1077
1078  Value *StorePtr = DepSI->getPointerOperand();
1079  uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());
1080  return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1081                                        StorePtr, StoreSize, TD);
1082}
1083
1084static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1085                                            MemIntrinsic *MI,
1086                                            const TargetData &TD) {
1087  // If the mem operation is a non-constant size, we can't handle it.
1088  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1089  if (SizeCst == 0) return -1;
1090  uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1091
1092  // If this is memset, we just need to see if the offset is valid in the size
1093  // of the memset..
1094  if (MI->getIntrinsicID() == Intrinsic::memset)
1095    return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1096                                          MemSizeInBits, TD);
1097
1098  // If we have a memcpy/memmove, the only case we can handle is if this is a
1099  // copy from constant memory.  In that case, we can read directly from the
1100  // constant memory.
1101  MemTransferInst *MTI = cast<MemTransferInst>(MI);
1102
1103  Constant *Src = dyn_cast<Constant>(MTI->getSource());
1104  if (Src == 0) return -1;
1105
1106  GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1107  if (GV == 0 || !GV->isConstant()) return -1;
1108
1109  // See if the access is within the bounds of the transfer.
1110  int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1111                                              MI->getDest(), MemSizeInBits, TD);
1112  if (Offset == -1)
1113    return Offset;
1114
1115  // Otherwise, see if we can constant fold a load from the constant with the
1116  // offset applied as appropriate.
1117  Src = ConstantExpr::getBitCast(Src,
1118                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1119  Constant *OffsetCst =
1120    ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1121  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1122  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1123  if (ConstantFoldLoadFromConstPtr(Src, &TD))
1124    return Offset;
1125  return -1;
1126}
1127
1128
1129/// GetStoreValueForLoad - This function is called when we have a
1130/// memdep query of a load that ends up being a clobbering store.  This means
1131/// that the store *may* provide bits used by the load but we can't be sure
1132/// because the pointers don't mustalias.  Check this case to see if there is
1133/// anything more we can do before we give up.
1134static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1135                                   const Type *LoadTy,
1136                                   Instruction *InsertPt, const TargetData &TD){
1137  LLVMContext &Ctx = SrcVal->getType()->getContext();
1138
1139  uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
1140  uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
1141
1142  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1143
1144  // Compute which bits of the stored value are being used by the load.  Convert
1145  // to an integer type to start with.
1146  if (SrcVal->getType()->isPointerTy())
1147    SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1148  if (!SrcVal->getType()->isIntegerTy())
1149    SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1150                                   "tmp");
1151
1152  // Shift the bits to the least significant depending on endianness.
1153  unsigned ShiftAmt;
1154  if (TD.isLittleEndian())
1155    ShiftAmt = Offset*8;
1156  else
1157    ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1158
1159  if (ShiftAmt)
1160    SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1161
1162  if (LoadSize != StoreSize)
1163    SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1164                                 "tmp");
1165
1166  return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1167}
1168
1169/// GetMemInstValueForLoad - This function is called when we have a
1170/// memdep query of a load that ends up being a clobbering mem intrinsic.
1171static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1172                                     const Type *LoadTy, Instruction *InsertPt,
1173                                     const TargetData &TD){
1174  LLVMContext &Ctx = LoadTy->getContext();
1175  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1176
1177  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1178
1179  // We know that this method is only called when the mem transfer fully
1180  // provides the bits for the load.
1181  if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1182    // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1183    // independently of what the offset is.
1184    Value *Val = MSI->getValue();
1185    if (LoadSize != 1)
1186      Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1187
1188    Value *OneElt = Val;
1189
1190    // Splat the value out to the right number of bits.
1191    for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1192      // If we can double the number of bytes set, do it.
1193      if (NumBytesSet*2 <= LoadSize) {
1194        Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1195        Val = Builder.CreateOr(Val, ShVal);
1196        NumBytesSet <<= 1;
1197        continue;
1198      }
1199
1200      // Otherwise insert one byte at a time.
1201      Value *ShVal = Builder.CreateShl(Val, 1*8);
1202      Val = Builder.CreateOr(OneElt, ShVal);
1203      ++NumBytesSet;
1204    }
1205
1206    return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1207  }
1208
1209  // Otherwise, this is a memcpy/memmove from a constant global.
1210  MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1211  Constant *Src = cast<Constant>(MTI->getSource());
1212
1213  // Otherwise, see if we can constant fold a load from the constant with the
1214  // offset applied as appropriate.
1215  Src = ConstantExpr::getBitCast(Src,
1216                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1217  Constant *OffsetCst =
1218  ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1219  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1220  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1221  return ConstantFoldLoadFromConstPtr(Src, &TD);
1222}
1223
1224namespace {
1225
1226struct AvailableValueInBlock {
1227  /// BB - The basic block in question.
1228  BasicBlock *BB;
1229  enum ValType {
1230    SimpleVal,  // A simple offsetted value that is accessed.
1231    MemIntrin   // A memory intrinsic which is loaded from.
1232  };
1233
1234  /// V - The value that is live out of the block.
1235  PointerIntPair<Value *, 1, ValType> Val;
1236
1237  /// Offset - The byte offset in Val that is interesting for the load query.
1238  unsigned Offset;
1239
1240  static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1241                                   unsigned Offset = 0) {
1242    AvailableValueInBlock Res;
1243    Res.BB = BB;
1244    Res.Val.setPointer(V);
1245    Res.Val.setInt(SimpleVal);
1246    Res.Offset = Offset;
1247    return Res;
1248  }
1249
1250  static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1251                                     unsigned Offset = 0) {
1252    AvailableValueInBlock Res;
1253    Res.BB = BB;
1254    Res.Val.setPointer(MI);
1255    Res.Val.setInt(MemIntrin);
1256    Res.Offset = Offset;
1257    return Res;
1258  }
1259
1260  bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1261  Value *getSimpleValue() const {
1262    assert(isSimpleValue() && "Wrong accessor");
1263    return Val.getPointer();
1264  }
1265
1266  MemIntrinsic *getMemIntrinValue() const {
1267    assert(!isSimpleValue() && "Wrong accessor");
1268    return cast<MemIntrinsic>(Val.getPointer());
1269  }
1270
1271  /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1272  /// defined here to the specified type.  This handles various coercion cases.
1273  Value *MaterializeAdjustedValue(const Type *LoadTy,
1274                                  const TargetData *TD) const {
1275    Value *Res;
1276    if (isSimpleValue()) {
1277      Res = getSimpleValue();
1278      if (Res->getType() != LoadTy) {
1279        assert(TD && "Need target data to handle type mismatch case");
1280        Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1281                                   *TD);
1282
1283        DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << "  "
1284                     << *getSimpleValue() << '\n'
1285                     << *Res << '\n' << "\n\n\n");
1286      }
1287    } else {
1288      Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1289                                   LoadTy, BB->getTerminator(), *TD);
1290      DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1291                   << "  " << *getMemIntrinValue() << '\n'
1292                   << *Res << '\n' << "\n\n\n");
1293    }
1294    return Res;
1295  }
1296};
1297
1298}
1299
1300/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1301/// construct SSA form, allowing us to eliminate LI.  This returns the value
1302/// that should be used at LI's definition site.
1303static Value *ConstructSSAForLoadSet(LoadInst *LI,
1304                         SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1305                                     const TargetData *TD,
1306                                     const DominatorTree &DT,
1307                                     AliasAnalysis *AA) {
1308  // Check for the fully redundant, dominating load case.  In this case, we can
1309  // just use the dominating value directly.
1310  if (ValuesPerBlock.size() == 1 &&
1311      DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1312    return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1313
1314  // Otherwise, we have to construct SSA form.
1315  SmallVector<PHINode*, 8> NewPHIs;
1316  SSAUpdater SSAUpdate(&NewPHIs);
1317  SSAUpdate.Initialize(LI->getType(), LI->getName());
1318
1319  const Type *LoadTy = LI->getType();
1320
1321  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1322    const AvailableValueInBlock &AV = ValuesPerBlock[i];
1323    BasicBlock *BB = AV.BB;
1324
1325    if (SSAUpdate.HasValueForBlock(BB))
1326      continue;
1327
1328    SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1329  }
1330
1331  // Perform PHI construction.
1332  Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1333
1334  // If new PHI nodes were created, notify alias analysis.
1335  if (V->getType()->isPointerTy())
1336    for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1337      AA->copyValue(LI, NewPHIs[i]);
1338
1339  return V;
1340}
1341
1342static bool isLifetimeStart(const Instruction *Inst) {
1343  if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1344    return II->getIntrinsicID() == Intrinsic::lifetime_start;
1345  return false;
1346}
1347
1348/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1349/// non-local by performing PHI construction.
1350bool GVN::processNonLocalLoad(LoadInst *LI,
1351                              SmallVectorImpl<Instruction*> &toErase) {
1352  // Find the non-local dependencies of the load.
1353  SmallVector<NonLocalDepResult, 64> Deps;
1354  AliasAnalysis::Location Loc(LI->getPointerOperand(),
1355                         VN.getAliasAnalysis()->getTypeStoreSize(LI->getType()),
1356                              LI->getMetadata(LLVMContext::MD_tbaa));
1357  MD->getNonLocalPointerDependency(Loc, true, LI->getParent(),
1358                                   Deps);
1359  //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1360  //             << Deps.size() << *LI << '\n');
1361
1362  // If we had to process more than one hundred blocks to find the
1363  // dependencies, this load isn't worth worrying about.  Optimizing
1364  // it will be too expensive.
1365  if (Deps.size() > 100)
1366    return false;
1367
1368  // If we had a phi translation failure, we'll have a single entry which is a
1369  // clobber in the current block.  Reject this early.
1370  if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1371    DEBUG(
1372      dbgs() << "GVN: non-local load ";
1373      WriteAsOperand(dbgs(), LI);
1374      dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1375    );
1376    return false;
1377  }
1378
1379  // Filter out useless results (non-locals, etc).  Keep track of the blocks
1380  // where we have a value available in repl, also keep track of whether we see
1381  // dependencies that produce an unknown value for the load (such as a call
1382  // that could potentially clobber the load).
1383  SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1384  SmallVector<BasicBlock*, 16> UnavailableBlocks;
1385
1386  const TargetData *TD = 0;
1387
1388  for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1389    BasicBlock *DepBB = Deps[i].getBB();
1390    MemDepResult DepInfo = Deps[i].getResult();
1391
1392    if (DepInfo.isClobber()) {
1393      // The address being loaded in this non-local block may not be the same as
1394      // the pointer operand of the load if PHI translation occurs.  Make sure
1395      // to consider the right address.
1396      Value *Address = Deps[i].getAddress();
1397
1398      // If the dependence is to a store that writes to a superset of the bits
1399      // read by the load, we can extract the bits we need for the load from the
1400      // stored value.
1401      if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1402        if (TD == 0)
1403          TD = getAnalysisIfAvailable<TargetData>();
1404        if (TD && Address) {
1405          int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1406                                                      DepSI, *TD);
1407          if (Offset != -1) {
1408            ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1409                                                       DepSI->getValueOperand(),
1410                                                                Offset));
1411            continue;
1412          }
1413        }
1414      }
1415
1416      // If the clobbering value is a memset/memcpy/memmove, see if we can
1417      // forward a value on from it.
1418      if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1419        if (TD == 0)
1420          TD = getAnalysisIfAvailable<TargetData>();
1421        if (TD && Address) {
1422          int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1423                                                        DepMI, *TD);
1424          if (Offset != -1) {
1425            ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1426                                                                  Offset));
1427            continue;
1428          }
1429        }
1430      }
1431
1432      UnavailableBlocks.push_back(DepBB);
1433      continue;
1434    }
1435
1436    Instruction *DepInst = DepInfo.getInst();
1437
1438    // Loading the allocation -> undef.
1439    if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1440        // Loading immediately after lifetime begin -> undef.
1441        isLifetimeStart(DepInst)) {
1442      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1443                                             UndefValue::get(LI->getType())));
1444      continue;
1445    }
1446
1447    if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1448      // Reject loads and stores that are to the same address but are of
1449      // different types if we have to.
1450      if (S->getValueOperand()->getType() != LI->getType()) {
1451        if (TD == 0)
1452          TD = getAnalysisIfAvailable<TargetData>();
1453
1454        // If the stored value is larger or equal to the loaded value, we can
1455        // reuse it.
1456        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
1457                                                        LI->getType(), *TD)) {
1458          UnavailableBlocks.push_back(DepBB);
1459          continue;
1460        }
1461      }
1462
1463      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1464                                                         S->getValueOperand()));
1465      continue;
1466    }
1467
1468    if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1469      // If the types mismatch and we can't handle it, reject reuse of the load.
1470      if (LD->getType() != LI->getType()) {
1471        if (TD == 0)
1472          TD = getAnalysisIfAvailable<TargetData>();
1473
1474        // If the stored value is larger or equal to the loaded value, we can
1475        // reuse it.
1476        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1477          UnavailableBlocks.push_back(DepBB);
1478          continue;
1479        }
1480      }
1481      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1482      continue;
1483    }
1484
1485    UnavailableBlocks.push_back(DepBB);
1486    continue;
1487  }
1488
1489  // If we have no predecessors that produce a known value for this load, exit
1490  // early.
1491  if (ValuesPerBlock.empty()) return false;
1492
1493  // If all of the instructions we depend on produce a known value for this
1494  // load, then it is fully redundant and we can use PHI insertion to compute
1495  // its value.  Insert PHIs and remove the fully redundant value now.
1496  if (UnavailableBlocks.empty()) {
1497    DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1498
1499    // Perform PHI construction.
1500    Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1501                                      VN.getAliasAnalysis());
1502    LI->replaceAllUsesWith(V);
1503
1504    if (isa<PHINode>(V))
1505      V->takeName(LI);
1506    if (V->getType()->isPointerTy())
1507      MD->invalidateCachedPointerInfo(V);
1508    VN.erase(LI);
1509    toErase.push_back(LI);
1510    ++NumGVNLoad;
1511    return true;
1512  }
1513
1514  if (!EnablePRE || !EnableLoadPRE)
1515    return false;
1516
1517  // Okay, we have *some* definitions of the value.  This means that the value
1518  // is available in some of our (transitive) predecessors.  Lets think about
1519  // doing PRE of this load.  This will involve inserting a new load into the
1520  // predecessor when it's not available.  We could do this in general, but
1521  // prefer to not increase code size.  As such, we only do this when we know
1522  // that we only have to insert *one* load (which means we're basically moving
1523  // the load, not inserting a new one).
1524
1525  SmallPtrSet<BasicBlock *, 4> Blockers;
1526  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1527    Blockers.insert(UnavailableBlocks[i]);
1528
1529  // Lets find first basic block with more than one predecessor.  Walk backwards
1530  // through predecessors if needed.
1531  BasicBlock *LoadBB = LI->getParent();
1532  BasicBlock *TmpBB = LoadBB;
1533
1534  bool isSinglePred = false;
1535  bool allSingleSucc = true;
1536  while (TmpBB->getSinglePredecessor()) {
1537    isSinglePred = true;
1538    TmpBB = TmpBB->getSinglePredecessor();
1539    if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1540      return false;
1541    if (Blockers.count(TmpBB))
1542      return false;
1543
1544    // If any of these blocks has more than one successor (i.e. if the edge we
1545    // just traversed was critical), then there are other paths through this
1546    // block along which the load may not be anticipated.  Hoisting the load
1547    // above this block would be adding the load to execution paths along
1548    // which it was not previously executed.
1549    if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1550      return false;
1551  }
1552
1553  assert(TmpBB);
1554  LoadBB = TmpBB;
1555
1556  // FIXME: It is extremely unclear what this loop is doing, other than
1557  // artificially restricting loadpre.
1558  if (isSinglePred) {
1559    bool isHot = false;
1560    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1561      const AvailableValueInBlock &AV = ValuesPerBlock[i];
1562      if (AV.isSimpleValue())
1563        // "Hot" Instruction is in some loop (because it dominates its dep.
1564        // instruction).
1565        if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1566          if (DT->dominates(LI, I)) {
1567            isHot = true;
1568            break;
1569          }
1570    }
1571
1572    // We are interested only in "hot" instructions. We don't want to do any
1573    // mis-optimizations here.
1574    if (!isHot)
1575      return false;
1576  }
1577
1578  // Check to see how many predecessors have the loaded value fully
1579  // available.
1580  DenseMap<BasicBlock*, Value*> PredLoads;
1581  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1582  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1583    FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1584  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1585    FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1586
1587  SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
1588  for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1589       PI != E; ++PI) {
1590    BasicBlock *Pred = *PI;
1591    if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1592      continue;
1593    }
1594    PredLoads[Pred] = 0;
1595
1596    if (Pred->getTerminator()->getNumSuccessors() != 1) {
1597      if (isa<IndirectBrInst>(Pred->getTerminator())) {
1598        DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1599              << Pred->getName() << "': " << *LI << '\n');
1600        return false;
1601      }
1602      unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1603      NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1604    }
1605  }
1606  if (!NeedToSplit.empty()) {
1607    toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
1608    return false;
1609  }
1610
1611  // Decide whether PRE is profitable for this load.
1612  unsigned NumUnavailablePreds = PredLoads.size();
1613  assert(NumUnavailablePreds != 0 &&
1614         "Fully available value should be eliminated above!");
1615
1616  // If this load is unavailable in multiple predecessors, reject it.
1617  // FIXME: If we could restructure the CFG, we could make a common pred with
1618  // all the preds that don't have an available LI and insert a new load into
1619  // that one block.
1620  if (NumUnavailablePreds != 1)
1621      return false;
1622
1623  // Check if the load can safely be moved to all the unavailable predecessors.
1624  bool CanDoPRE = true;
1625  SmallVector<Instruction*, 8> NewInsts;
1626  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1627         E = PredLoads.end(); I != E; ++I) {
1628    BasicBlock *UnavailablePred = I->first;
1629
1630    // Do PHI translation to get its value in the predecessor if necessary.  The
1631    // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1632
1633    // If all preds have a single successor, then we know it is safe to insert
1634    // the load on the pred (?!?), so we can insert code to materialize the
1635    // pointer if it is not available.
1636    PHITransAddr Address(LI->getPointerOperand(), TD);
1637    Value *LoadPtr = 0;
1638    if (allSingleSucc) {
1639      LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1640                                                  *DT, NewInsts);
1641    } else {
1642      Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1643      LoadPtr = Address.getAddr();
1644    }
1645
1646    // If we couldn't find or insert a computation of this phi translated value,
1647    // we fail PRE.
1648    if (LoadPtr == 0) {
1649      DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1650            << *LI->getPointerOperand() << "\n");
1651      CanDoPRE = false;
1652      break;
1653    }
1654
1655    // Make sure it is valid to move this load here.  We have to watch out for:
1656    //  @1 = getelementptr (i8* p, ...
1657    //  test p and branch if == 0
1658    //  load @1
1659    // It is valid to have the getelementptr before the test, even if p can be 0,
1660    // as getelementptr only does address arithmetic.
1661    // If we are not pushing the value through any multiple-successor blocks
1662    // we do not have this case.  Otherwise, check that the load is safe to
1663    // put anywhere; this can be improved, but should be conservatively safe.
1664    if (!allSingleSucc &&
1665        // FIXME: REEVALUTE THIS.
1666        !isSafeToLoadUnconditionally(LoadPtr,
1667                                     UnavailablePred->getTerminator(),
1668                                     LI->getAlignment(), TD)) {
1669      CanDoPRE = false;
1670      break;
1671    }
1672
1673    I->second = LoadPtr;
1674  }
1675
1676  if (!CanDoPRE) {
1677    while (!NewInsts.empty())
1678      NewInsts.pop_back_val()->eraseFromParent();
1679    return false;
1680  }
1681
1682  // Okay, we can eliminate this load by inserting a reload in the predecessor
1683  // and using PHI construction to get the value in the other predecessors, do
1684  // it.
1685  DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1686  DEBUG(if (!NewInsts.empty())
1687          dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1688                 << *NewInsts.back() << '\n');
1689
1690  // Assign value numbers to the new instructions.
1691  for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1692    // FIXME: We really _ought_ to insert these value numbers into their
1693    // parent's availability map.  However, in doing so, we risk getting into
1694    // ordering issues.  If a block hasn't been processed yet, we would be
1695    // marking a value as AVAIL-IN, which isn't what we intend.
1696    VN.lookup_or_add(NewInsts[i]);
1697  }
1698
1699  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1700         E = PredLoads.end(); I != E; ++I) {
1701    BasicBlock *UnavailablePred = I->first;
1702    Value *LoadPtr = I->second;
1703
1704    Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1705                                  LI->getAlignment(),
1706                                  UnavailablePred->getTerminator());
1707
1708    // Add the newly created load.
1709    ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1710                                                        NewLoad));
1711    MD->invalidateCachedPointerInfo(LoadPtr);
1712    DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1713  }
1714
1715  // Perform PHI construction.
1716  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1717                                    VN.getAliasAnalysis());
1718  LI->replaceAllUsesWith(V);
1719  if (isa<PHINode>(V))
1720    V->takeName(LI);
1721  if (V->getType()->isPointerTy())
1722    MD->invalidateCachedPointerInfo(V);
1723  VN.erase(LI);
1724  toErase.push_back(LI);
1725  ++NumPRELoad;
1726  return true;
1727}
1728
1729/// processLoad - Attempt to eliminate a load, first by eliminating it
1730/// locally, and then attempting non-local elimination if that fails.
1731bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1732  if (!MD)
1733    return false;
1734
1735  if (L->isVolatile())
1736    return false;
1737
1738  // ... to a pointer that has been loaded from before...
1739  MemDepResult Dep = MD->getDependency(L);
1740
1741  // If the value isn't available, don't do anything!
1742  if (Dep.isClobber()) {
1743    // Check to see if we have something like this:
1744    //   store i32 123, i32* %P
1745    //   %A = bitcast i32* %P to i8*
1746    //   %B = gep i8* %A, i32 1
1747    //   %C = load i8* %B
1748    //
1749    // We could do that by recognizing if the clobber instructions are obviously
1750    // a common base + constant offset, and if the previous store (or memset)
1751    // completely covers this load.  This sort of thing can happen in bitfield
1752    // access code.
1753    Value *AvailVal = 0;
1754    if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1755      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1756        int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1757                                                    L->getPointerOperand(),
1758                                                    DepSI, *TD);
1759        if (Offset != -1)
1760          AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
1761                                          L->getType(), L, *TD);
1762      }
1763
1764    // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1765    // a value on from it.
1766    if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1767      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1768        int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1769                                                      L->getPointerOperand(),
1770                                                      DepMI, *TD);
1771        if (Offset != -1)
1772          AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1773      }
1774    }
1775
1776    if (AvailVal) {
1777      DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1778            << *AvailVal << '\n' << *L << "\n\n\n");
1779
1780      // Replace the load!
1781      L->replaceAllUsesWith(AvailVal);
1782      if (AvailVal->getType()->isPointerTy())
1783        MD->invalidateCachedPointerInfo(AvailVal);
1784      VN.erase(L);
1785      toErase.push_back(L);
1786      ++NumGVNLoad;
1787      return true;
1788    }
1789
1790    DEBUG(
1791      // fast print dep, using operator<< on instruction would be too slow
1792      dbgs() << "GVN: load ";
1793      WriteAsOperand(dbgs(), L);
1794      Instruction *I = Dep.getInst();
1795      dbgs() << " is clobbered by " << *I << '\n';
1796    );
1797    return false;
1798  }
1799
1800  // If it is defined in another block, try harder.
1801  if (Dep.isNonLocal())
1802    return processNonLocalLoad(L, toErase);
1803
1804  Instruction *DepInst = Dep.getInst();
1805  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1806    Value *StoredVal = DepSI->getValueOperand();
1807
1808    // The store and load are to a must-aliased pointer, but they may not
1809    // actually have the same type.  See if we know how to reuse the stored
1810    // value (depending on its type).
1811    const TargetData *TD = 0;
1812    if (StoredVal->getType() != L->getType()) {
1813      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1814        StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1815                                                   L, *TD);
1816        if (StoredVal == 0)
1817          return false;
1818
1819        DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1820                     << '\n' << *L << "\n\n\n");
1821      }
1822      else
1823        return false;
1824    }
1825
1826    // Remove it!
1827    L->replaceAllUsesWith(StoredVal);
1828    if (StoredVal->getType()->isPointerTy())
1829      MD->invalidateCachedPointerInfo(StoredVal);
1830    VN.erase(L);
1831    toErase.push_back(L);
1832    ++NumGVNLoad;
1833    return true;
1834  }
1835
1836  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1837    Value *AvailableVal = DepLI;
1838
1839    // The loads are of a must-aliased pointer, but they may not actually have
1840    // the same type.  See if we know how to reuse the previously loaded value
1841    // (depending on its type).
1842    const TargetData *TD = 0;
1843    if (DepLI->getType() != L->getType()) {
1844      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1845        AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1846        if (AvailableVal == 0)
1847          return false;
1848
1849        DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1850                     << "\n" << *L << "\n\n\n");
1851      }
1852      else
1853        return false;
1854    }
1855
1856    // Remove it!
1857    L->replaceAllUsesWith(AvailableVal);
1858    if (DepLI->getType()->isPointerTy())
1859      MD->invalidateCachedPointerInfo(DepLI);
1860    VN.erase(L);
1861    toErase.push_back(L);
1862    ++NumGVNLoad;
1863    return true;
1864  }
1865
1866  // If this load really doesn't depend on anything, then we must be loading an
1867  // undef value.  This can happen when loading for a fresh allocation with no
1868  // intervening stores, for example.
1869  if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1870    L->replaceAllUsesWith(UndefValue::get(L->getType()));
1871    VN.erase(L);
1872    toErase.push_back(L);
1873    ++NumGVNLoad;
1874    return true;
1875  }
1876
1877  // If this load occurs either right after a lifetime begin,
1878  // then the loaded value is undefined.
1879  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1880    if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1881      L->replaceAllUsesWith(UndefValue::get(L->getType()));
1882      VN.erase(L);
1883      toErase.push_back(L);
1884      ++NumGVNLoad;
1885      return true;
1886    }
1887  }
1888
1889  return false;
1890}
1891
1892Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1893  DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1894  if (I == localAvail.end())
1895    return 0;
1896
1897  ValueNumberScope *Locals = I->second;
1898  while (Locals) {
1899    DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1900    if (I != Locals->table.end())
1901      return I->second;
1902    Locals = Locals->parent;
1903  }
1904
1905  return 0;
1906}
1907
1908
1909/// processInstruction - When calculating availability, handle an instruction
1910/// by inserting it into the appropriate sets
1911bool GVN::processInstruction(Instruction *I,
1912                             SmallVectorImpl<Instruction*> &toErase) {
1913  // Ignore dbg info intrinsics.
1914  if (isa<DbgInfoIntrinsic>(I))
1915    return false;
1916
1917  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1918    bool Changed = processLoad(LI, toErase);
1919
1920    if (!Changed) {
1921      unsigned Num = VN.lookup_or_add(LI);
1922      localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1923    }
1924
1925    return Changed;
1926  }
1927
1928  uint32_t NextNum = VN.getNextUnusedValueNumber();
1929  unsigned Num = VN.lookup_or_add(I);
1930
1931  if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1932    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1933
1934    if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1935      return false;
1936
1937    Value *BranchCond = BI->getCondition();
1938    uint32_t CondVN = VN.lookup_or_add(BranchCond);
1939
1940    BasicBlock *TrueSucc = BI->getSuccessor(0);
1941    BasicBlock *FalseSucc = BI->getSuccessor(1);
1942
1943    if (TrueSucc->getSinglePredecessor())
1944      localAvail[TrueSucc]->table[CondVN] =
1945        ConstantInt::getTrue(TrueSucc->getContext());
1946    if (FalseSucc->getSinglePredecessor())
1947      localAvail[FalseSucc]->table[CondVN] =
1948        ConstantInt::getFalse(TrueSucc->getContext());
1949
1950    return false;
1951
1952  // Allocations are always uniquely numbered, so we can save time and memory
1953  // by fast failing them.
1954  } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1955    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1956    return false;
1957  }
1958
1959  // Collapse PHI nodes
1960  if (PHINode* p = dyn_cast<PHINode>(I)) {
1961    Value *constVal = CollapsePhi(p);
1962
1963    if (constVal) {
1964      p->replaceAllUsesWith(constVal);
1965      if (MD && constVal->getType()->isPointerTy())
1966        MD->invalidateCachedPointerInfo(constVal);
1967      VN.erase(p);
1968
1969      toErase.push_back(p);
1970    } else {
1971      localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1972    }
1973
1974  // If the number we were assigned was a brand new VN, then we don't
1975  // need to do a lookup to see if the number already exists
1976  // somewhere in the domtree: it can't!
1977  } else if (Num == NextNum) {
1978    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1979
1980  // Perform fast-path value-number based elimination of values inherited from
1981  // dominators.
1982  } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1983    // Remove it!
1984    VN.erase(I);
1985    I->replaceAllUsesWith(repl);
1986    if (MD && repl->getType()->isPointerTy())
1987      MD->invalidateCachedPointerInfo(repl);
1988    toErase.push_back(I);
1989    return true;
1990
1991  } else {
1992    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1993  }
1994
1995  return false;
1996}
1997
1998/// runOnFunction - This is the main transformation entry point for a function.
1999bool GVN::runOnFunction(Function& F) {
2000  if (!NoLoads)
2001    MD = &getAnalysis<MemoryDependenceAnalysis>();
2002  DT = &getAnalysis<DominatorTree>();
2003  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
2004  VN.setMemDep(MD);
2005  VN.setDomTree(DT);
2006
2007  bool Changed = false;
2008  bool ShouldContinue = true;
2009
2010  // Merge unconditional branches, allowing PRE to catch more
2011  // optimization opportunities.
2012  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2013    BasicBlock *BB = FI;
2014    ++FI;
2015    bool removedBlock = MergeBlockIntoPredecessor(BB, this);
2016    if (removedBlock) ++NumGVNBlocks;
2017
2018    Changed |= removedBlock;
2019  }
2020
2021  unsigned Iteration = 0;
2022
2023  while (ShouldContinue) {
2024    DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2025    ShouldContinue = iterateOnFunction(F);
2026    if (splitCriticalEdges())
2027      ShouldContinue = true;
2028    Changed |= ShouldContinue;
2029    ++Iteration;
2030  }
2031
2032  if (EnablePRE) {
2033    bool PREChanged = true;
2034    while (PREChanged) {
2035      PREChanged = performPRE(F);
2036      Changed |= PREChanged;
2037    }
2038  }
2039  // FIXME: Should perform GVN again after PRE does something.  PRE can move
2040  // computations into blocks where they become fully redundant.  Note that
2041  // we can't do this until PRE's critical edge splitting updates memdep.
2042  // Actually, when this happens, we should just fully integrate PRE into GVN.
2043
2044  cleanupGlobalSets();
2045
2046  return Changed;
2047}
2048
2049
2050bool GVN::processBlock(BasicBlock *BB) {
2051  // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2052  // incrementing BI before processing an instruction).
2053  SmallVector<Instruction*, 8> toErase;
2054  bool ChangedFunction = false;
2055
2056  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2057       BI != BE;) {
2058    ChangedFunction |= processInstruction(BI, toErase);
2059    if (toErase.empty()) {
2060      ++BI;
2061      continue;
2062    }
2063
2064    // If we need some instructions deleted, do it now.
2065    NumGVNInstr += toErase.size();
2066
2067    // Avoid iterator invalidation.
2068    bool AtStart = BI == BB->begin();
2069    if (!AtStart)
2070      --BI;
2071
2072    for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2073         E = toErase.end(); I != E; ++I) {
2074      DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2075      if (MD) MD->removeInstruction(*I);
2076      (*I)->eraseFromParent();
2077      DEBUG(verifyRemoved(*I));
2078    }
2079    toErase.clear();
2080
2081    if (AtStart)
2082      BI = BB->begin();
2083    else
2084      ++BI;
2085  }
2086
2087  return ChangedFunction;
2088}
2089
2090/// performPRE - Perform a purely local form of PRE that looks for diamond
2091/// control flow patterns and attempts to perform simple PRE at the join point.
2092bool GVN::performPRE(Function &F) {
2093  bool Changed = false;
2094  DenseMap<BasicBlock*, Value*> predMap;
2095  for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2096       DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2097    BasicBlock *CurrentBlock = *DI;
2098
2099    // Nothing to PRE in the entry block.
2100    if (CurrentBlock == &F.getEntryBlock()) continue;
2101
2102    for (BasicBlock::iterator BI = CurrentBlock->begin(),
2103         BE = CurrentBlock->end(); BI != BE; ) {
2104      Instruction *CurInst = BI++;
2105
2106      if (isa<AllocaInst>(CurInst) ||
2107          isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2108          CurInst->getType()->isVoidTy() ||
2109          CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2110          isa<DbgInfoIntrinsic>(CurInst))
2111        continue;
2112
2113      // We don't currently value number ANY inline asm calls.
2114      if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
2115        if (CallI->isInlineAsm())
2116          continue;
2117
2118      uint32_t ValNo = VN.lookup(CurInst);
2119
2120      // Look for the predecessors for PRE opportunities.  We're
2121      // only trying to solve the basic diamond case, where
2122      // a value is computed in the successor and one predecessor,
2123      // but not the other.  We also explicitly disallow cases
2124      // where the successor is its own predecessor, because they're
2125      // more complicated to get right.
2126      unsigned NumWith = 0;
2127      unsigned NumWithout = 0;
2128      BasicBlock *PREPred = 0;
2129      predMap.clear();
2130
2131      for (pred_iterator PI = pred_begin(CurrentBlock),
2132           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2133        BasicBlock *P = *PI;
2134        // We're not interested in PRE where the block is its
2135        // own predecessor, or in blocks with predecessors
2136        // that are not reachable.
2137        if (P == CurrentBlock) {
2138          NumWithout = 2;
2139          break;
2140        } else if (!localAvail.count(P))  {
2141          NumWithout = 2;
2142          break;
2143        }
2144
2145        DenseMap<uint32_t, Value*>::iterator predV =
2146                                            localAvail[P]->table.find(ValNo);
2147        if (predV == localAvail[P]->table.end()) {
2148          PREPred = P;
2149          ++NumWithout;
2150        } else if (predV->second == CurInst) {
2151          NumWithout = 2;
2152        } else {
2153          predMap[P] = predV->second;
2154          ++NumWith;
2155        }
2156      }
2157
2158      // Don't do PRE when it might increase code size, i.e. when
2159      // we would need to insert instructions in more than one pred.
2160      if (NumWithout != 1 || NumWith == 0)
2161        continue;
2162
2163      // Don't do PRE across indirect branch.
2164      if (isa<IndirectBrInst>(PREPred->getTerminator()))
2165        continue;
2166
2167      // We can't do PRE safely on a critical edge, so instead we schedule
2168      // the edge to be split and perform the PRE the next time we iterate
2169      // on the function.
2170      unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2171      if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2172        toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2173        continue;
2174      }
2175
2176      // Instantiate the expression in the predecessor that lacked it.
2177      // Because we are going top-down through the block, all value numbers
2178      // will be available in the predecessor by the time we need them.  Any
2179      // that weren't originally present will have been instantiated earlier
2180      // in this loop.
2181      Instruction *PREInstr = CurInst->clone();
2182      bool success = true;
2183      for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2184        Value *Op = PREInstr->getOperand(i);
2185        if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2186          continue;
2187
2188        if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2189          PREInstr->setOperand(i, V);
2190        } else {
2191          success = false;
2192          break;
2193        }
2194      }
2195
2196      // Fail out if we encounter an operand that is not available in
2197      // the PRE predecessor.  This is typically because of loads which
2198      // are not value numbered precisely.
2199      if (!success) {
2200        delete PREInstr;
2201        DEBUG(verifyRemoved(PREInstr));
2202        continue;
2203      }
2204
2205      PREInstr->insertBefore(PREPred->getTerminator());
2206      PREInstr->setName(CurInst->getName() + ".pre");
2207      predMap[PREPred] = PREInstr;
2208      VN.add(PREInstr, ValNo);
2209      ++NumGVNPRE;
2210
2211      // Update the availability map to include the new instruction.
2212      localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2213
2214      // Create a PHI to make the value available in this block.
2215      PHINode* Phi = PHINode::Create(CurInst->getType(),
2216                                     CurInst->getName() + ".pre-phi",
2217                                     CurrentBlock->begin());
2218      for (pred_iterator PI = pred_begin(CurrentBlock),
2219           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2220        BasicBlock *P = *PI;
2221        Phi->addIncoming(predMap[P], P);
2222      }
2223
2224      VN.add(Phi, ValNo);
2225      localAvail[CurrentBlock]->table[ValNo] = Phi;
2226
2227      CurInst->replaceAllUsesWith(Phi);
2228      if (MD && Phi->getType()->isPointerTy())
2229        MD->invalidateCachedPointerInfo(Phi);
2230      VN.erase(CurInst);
2231
2232      DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2233      if (MD) MD->removeInstruction(CurInst);
2234      CurInst->eraseFromParent();
2235      DEBUG(verifyRemoved(CurInst));
2236      Changed = true;
2237    }
2238  }
2239
2240  if (splitCriticalEdges())
2241    Changed = true;
2242
2243  return Changed;
2244}
2245
2246/// splitCriticalEdges - Split critical edges found during the previous
2247/// iteration that may enable further optimization.
2248bool GVN::splitCriticalEdges() {
2249  if (toSplit.empty())
2250    return false;
2251  do {
2252    std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2253    SplitCriticalEdge(Edge.first, Edge.second, this);
2254  } while (!toSplit.empty());
2255  if (MD) MD->invalidateCachedPredecessors();
2256  return true;
2257}
2258
2259/// iterateOnFunction - Executes one iteration of GVN
2260bool GVN::iterateOnFunction(Function &F) {
2261  cleanupGlobalSets();
2262
2263  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2264       DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2265    if (DI->getIDom())
2266      localAvail[DI->getBlock()] =
2267                   new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2268    else
2269      localAvail[DI->getBlock()] = new ValueNumberScope(0);
2270  }
2271
2272  // Top-down walk of the dominator tree
2273  bool Changed = false;
2274#if 0
2275  // Needed for value numbering with phi construction to work.
2276  ReversePostOrderTraversal<Function*> RPOT(&F);
2277  for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2278       RE = RPOT.end(); RI != RE; ++RI)
2279    Changed |= processBlock(*RI);
2280#else
2281  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2282       DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2283    Changed |= processBlock(DI->getBlock());
2284#endif
2285
2286  return Changed;
2287}
2288
2289void GVN::cleanupGlobalSets() {
2290  VN.clear();
2291
2292  for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2293       I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2294    delete I->second;
2295  localAvail.clear();
2296}
2297
2298/// verifyRemoved - Verify that the specified instruction does not occur in our
2299/// internal data structures.
2300void GVN::verifyRemoved(const Instruction *Inst) const {
2301  VN.verifyRemoved(Inst);
2302
2303  // Walk through the value number scope to make sure the instruction isn't
2304  // ferreted away in it.
2305  for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2306         I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2307    const ValueNumberScope *VNS = I->second;
2308
2309    while (VNS) {
2310      for (DenseMap<uint32_t, Value*>::const_iterator
2311             II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2312        assert(II->second != Inst && "Inst still in value numbering scope!");
2313      }
2314
2315      VNS = VNS->parent;
2316    }
2317  }
2318}
2319