GVN.cpp revision 6d8f2ca646bc283c31f48b6816d5194c836dfec6
1f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//
3f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//                     The LLVM Compiler Infrastructure
4f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//
5f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown// This file is distributed under the University of Illinois Open Source
6f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown// License. See LICENSE.TXT for details.
7f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//
8f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//===----------------------------------------------------------------------===//
9f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//
10f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown// This pass performs global value numbering to eliminate fully redundant
11f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown// instructions.  It also performs simple dead load elimination.
12f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//
13f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown// Note that this pass does the value numbering itself; it does not use the
14f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown// ValueNumbering analysis passes.
15f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//
16f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown//===----------------------------------------------------------------------===//
17f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown
18e4095a80b674642e0e0e8f0883dee3b22f32f19aJeff Tinker#define DEBUG_TYPE "gvn"
19f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Transforms/Scalar.h"
20f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/BasicBlock.h"
21f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Constants.h"
22f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/DerivedTypes.h"
23f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/GlobalVariable.h"
24f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Function.h"
25f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/IntrinsicInst.h"
26e4095a80b674642e0e0e8f0883dee3b22f32f19aJeff Tinker#include "llvm/LLVMContext.h"
27f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Operator.h"
28f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Value.h"
29f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/ADT/DenseMap.h"
30f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/ADT/DepthFirstIterator.h"
31f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/ADT/PostOrderIterator.h"
32f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/ADT/SmallPtrSet.h"
33f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/ADT/SmallVector.h"
34f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/ADT/Statistic.h"
35f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Analysis/AliasAnalysis.h"
36f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Analysis/ConstantFolding.h"
37f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Analysis/Dominators.h"
38f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Analysis/MemoryBuiltins.h"
39f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Analysis/MemoryDependenceAnalysis.h"
40f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Analysis/PHITransAddr.h"
41f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Support/CFG.h"
42f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Support/CommandLine.h"
43f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Support/Debug.h"
44f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Support/ErrorHandling.h"
45f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Support/GetElementPtrTypeIterator.h"
46f3c99e883f46c56e5e2877e844b902b6eb45545bJeff Brown#include "llvm/Support/IRBuilder.h"
47#include "llvm/Support/raw_ostream.h"
48#include "llvm/Target/TargetData.h"
49#include "llvm/Transforms/Utils/BasicBlockUtils.h"
50#include "llvm/Transforms/Utils/Local.h"
51#include "llvm/Transforms/Utils/SSAUpdater.h"
52using namespace llvm;
53
54STATISTIC(NumGVNInstr,  "Number of instructions deleted");
55STATISTIC(NumGVNLoad,   "Number of loads deleted");
56STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
57STATISTIC(NumGVNBlocks, "Number of blocks merged");
58STATISTIC(NumPRELoad,   "Number of loads PRE'd");
59
60static cl::opt<bool> EnablePRE("enable-pre",
61                               cl::init(true), cl::Hidden);
62static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
63static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
64
65//===----------------------------------------------------------------------===//
66//                         ValueTable Class
67//===----------------------------------------------------------------------===//
68
69/// This class holds the mapping between values and value numbers.  It is used
70/// as an efficient mechanism to determine the expression-wise equivalence of
71/// two values.
72namespace {
73  struct Expression {
74    enum ExpressionOpcode {
75      ADD = Instruction::Add,
76      FADD = Instruction::FAdd,
77      SUB = Instruction::Sub,
78      FSUB = Instruction::FSub,
79      MUL = Instruction::Mul,
80      FMUL = Instruction::FMul,
81      UDIV = Instruction::UDiv,
82      SDIV = Instruction::SDiv,
83      FDIV = Instruction::FDiv,
84      UREM = Instruction::URem,
85      SREM = Instruction::SRem,
86      FREM = Instruction::FRem,
87      SHL = Instruction::Shl,
88      LSHR = Instruction::LShr,
89      ASHR = Instruction::AShr,
90      AND = Instruction::And,
91      OR = Instruction::Or,
92      XOR = Instruction::Xor,
93      TRUNC = Instruction::Trunc,
94      ZEXT = Instruction::ZExt,
95      SEXT = Instruction::SExt,
96      FPTOUI = Instruction::FPToUI,
97      FPTOSI = Instruction::FPToSI,
98      UITOFP = Instruction::UIToFP,
99      SITOFP = Instruction::SIToFP,
100      FPTRUNC = Instruction::FPTrunc,
101      FPEXT = Instruction::FPExt,
102      PTRTOINT = Instruction::PtrToInt,
103      INTTOPTR = Instruction::IntToPtr,
104      BITCAST = Instruction::BitCast,
105      ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
106      ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
107      FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
108      FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
109      FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
110      SHUFFLE, SELECT, GEP, CALL, CONSTANT,
111      INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
112
113    ExpressionOpcode opcode;
114    const Type* type;
115    SmallVector<uint32_t, 4> varargs;
116    Value *function;
117
118    Expression() { }
119    Expression(ExpressionOpcode o) : opcode(o) { }
120
121    bool operator==(const Expression &other) const {
122      if (opcode != other.opcode)
123        return false;
124      else if (opcode == EMPTY || opcode == TOMBSTONE)
125        return true;
126      else if (type != other.type)
127        return false;
128      else if (function != other.function)
129        return false;
130      else {
131        if (varargs.size() != other.varargs.size())
132          return false;
133
134        for (size_t i = 0; i < varargs.size(); ++i)
135          if (varargs[i] != other.varargs[i])
136            return false;
137
138        return true;
139      }
140    }
141
142    bool operator!=(const Expression &other) const {
143      return !(*this == other);
144    }
145  };
146
147  class ValueTable {
148    private:
149      DenseMap<Value*, uint32_t> valueNumbering;
150      DenseMap<Expression, uint32_t> expressionNumbering;
151      AliasAnalysis* AA;
152      MemoryDependenceAnalysis* MD;
153      DominatorTree* DT;
154
155      uint32_t nextValueNumber;
156
157      Expression::ExpressionOpcode getOpcode(CmpInst* C);
158      Expression create_expression(BinaryOperator* BO);
159      Expression create_expression(CmpInst* C);
160      Expression create_expression(ShuffleVectorInst* V);
161      Expression create_expression(ExtractElementInst* C);
162      Expression create_expression(InsertElementInst* V);
163      Expression create_expression(SelectInst* V);
164      Expression create_expression(CastInst* C);
165      Expression create_expression(GetElementPtrInst* G);
166      Expression create_expression(CallInst* C);
167      Expression create_expression(Constant* C);
168      Expression create_expression(ExtractValueInst* C);
169      Expression create_expression(InsertValueInst* C);
170
171      uint32_t lookup_or_add_call(CallInst* C);
172    public:
173      ValueTable() : nextValueNumber(1) { }
174      uint32_t lookup_or_add(Value *V);
175      uint32_t lookup(Value *V) const;
176      void add(Value *V, uint32_t num);
177      void clear();
178      void erase(Value *v);
179      unsigned size();
180      void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
181      AliasAnalysis *getAliasAnalysis() const { return AA; }
182      void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
183      void setDomTree(DominatorTree* D) { DT = D; }
184      uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
185      void verifyRemoved(const Value *) const;
186  };
187}
188
189namespace llvm {
190template <> struct DenseMapInfo<Expression> {
191  static inline Expression getEmptyKey() {
192    return Expression(Expression::EMPTY);
193  }
194
195  static inline Expression getTombstoneKey() {
196    return Expression(Expression::TOMBSTONE);
197  }
198
199  static unsigned getHashValue(const Expression e) {
200    unsigned hash = e.opcode;
201
202    hash = ((unsigned)((uintptr_t)e.type >> 4) ^
203            (unsigned)((uintptr_t)e.type >> 9));
204
205    for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
206         E = e.varargs.end(); I != E; ++I)
207      hash = *I + hash * 37;
208
209    hash = ((unsigned)((uintptr_t)e.function >> 4) ^
210            (unsigned)((uintptr_t)e.function >> 9)) +
211           hash * 37;
212
213    return hash;
214  }
215  static bool isEqual(const Expression &LHS, const Expression &RHS) {
216    return LHS == RHS;
217  }
218};
219
220template <>
221struct isPodLike<Expression> { static const bool value = true; };
222
223}
224
225//===----------------------------------------------------------------------===//
226//                     ValueTable Internal Functions
227//===----------------------------------------------------------------------===//
228
229Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
230  if (isa<ICmpInst>(C)) {
231    switch (C->getPredicate()) {
232    default:  // THIS SHOULD NEVER HAPPEN
233      llvm_unreachable("Comparison with unknown predicate?");
234    case ICmpInst::ICMP_EQ:  return Expression::ICMPEQ;
235    case ICmpInst::ICMP_NE:  return Expression::ICMPNE;
236    case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
237    case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
238    case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
239    case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
240    case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
241    case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
242    case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
243    case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
244    }
245  } else {
246    switch (C->getPredicate()) {
247    default: // THIS SHOULD NEVER HAPPEN
248      llvm_unreachable("Comparison with unknown predicate?");
249    case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
250    case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
251    case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
252    case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
253    case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
254    case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
255    case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
256    case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
257    case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
258    case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
259    case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
260    case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
261    case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
262    case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
263    }
264  }
265}
266
267Expression ValueTable::create_expression(CallInst* C) {
268  Expression e;
269
270  e.type = C->getType();
271  e.function = C->getCalledFunction();
272  e.opcode = Expression::CALL;
273
274  for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
275       I != E; ++I)
276    e.varargs.push_back(lookup_or_add(*I));
277
278  return e;
279}
280
281Expression ValueTable::create_expression(BinaryOperator* BO) {
282  Expression e;
283  e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
284  e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
285  e.function = 0;
286  e.type = BO->getType();
287  e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
288
289  return e;
290}
291
292Expression ValueTable::create_expression(CmpInst* C) {
293  Expression e;
294
295  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
296  e.varargs.push_back(lookup_or_add(C->getOperand(1)));
297  e.function = 0;
298  e.type = C->getType();
299  e.opcode = getOpcode(C);
300
301  return e;
302}
303
304Expression ValueTable::create_expression(CastInst* C) {
305  Expression e;
306
307  e.varargs.push_back(lookup_or_add(C->getOperand(0)));
308  e.function = 0;
309  e.type = C->getType();
310  e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
311
312  return e;
313}
314
315Expression ValueTable::create_expression(ShuffleVectorInst* S) {
316  Expression e;
317
318  e.varargs.push_back(lookup_or_add(S->getOperand(0)));
319  e.varargs.push_back(lookup_or_add(S->getOperand(1)));
320  e.varargs.push_back(lookup_or_add(S->getOperand(2)));
321  e.function = 0;
322  e.type = S->getType();
323  e.opcode = Expression::SHUFFLE;
324
325  return e;
326}
327
328Expression ValueTable::create_expression(ExtractElementInst* E) {
329  Expression e;
330
331  e.varargs.push_back(lookup_or_add(E->getOperand(0)));
332  e.varargs.push_back(lookup_or_add(E->getOperand(1)));
333  e.function = 0;
334  e.type = E->getType();
335  e.opcode = Expression::EXTRACT;
336
337  return e;
338}
339
340Expression ValueTable::create_expression(InsertElementInst* I) {
341  Expression e;
342
343  e.varargs.push_back(lookup_or_add(I->getOperand(0)));
344  e.varargs.push_back(lookup_or_add(I->getOperand(1)));
345  e.varargs.push_back(lookup_or_add(I->getOperand(2)));
346  e.function = 0;
347  e.type = I->getType();
348  e.opcode = Expression::INSERT;
349
350  return e;
351}
352
353Expression ValueTable::create_expression(SelectInst* I) {
354  Expression e;
355
356  e.varargs.push_back(lookup_or_add(I->getCondition()));
357  e.varargs.push_back(lookup_or_add(I->getTrueValue()));
358  e.varargs.push_back(lookup_or_add(I->getFalseValue()));
359  e.function = 0;
360  e.type = I->getType();
361  e.opcode = Expression::SELECT;
362
363  return e;
364}
365
366Expression ValueTable::create_expression(GetElementPtrInst* G) {
367  Expression e;
368
369  e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
370  e.function = 0;
371  e.type = G->getType();
372  e.opcode = Expression::GEP;
373
374  for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
375       I != E; ++I)
376    e.varargs.push_back(lookup_or_add(*I));
377
378  return e;
379}
380
381Expression ValueTable::create_expression(ExtractValueInst* E) {
382  Expression e;
383
384  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
385  for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
386       II != IE; ++II)
387    e.varargs.push_back(*II);
388  e.function = 0;
389  e.type = E->getType();
390  e.opcode = Expression::EXTRACTVALUE;
391
392  return e;
393}
394
395Expression ValueTable::create_expression(InsertValueInst* E) {
396  Expression e;
397
398  e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
399  e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
400  for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
401       II != IE; ++II)
402    e.varargs.push_back(*II);
403  e.function = 0;
404  e.type = E->getType();
405  e.opcode = Expression::INSERTVALUE;
406
407  return e;
408}
409
410//===----------------------------------------------------------------------===//
411//                     ValueTable External Functions
412//===----------------------------------------------------------------------===//
413
414/// add - Insert a value into the table with a specified value number.
415void ValueTable::add(Value *V, uint32_t num) {
416  valueNumbering.insert(std::make_pair(V, num));
417}
418
419uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
420  if (AA->doesNotAccessMemory(C)) {
421    Expression exp = create_expression(C);
422    uint32_t& e = expressionNumbering[exp];
423    if (!e) e = nextValueNumber++;
424    valueNumbering[C] = e;
425    return e;
426  } else if (AA->onlyReadsMemory(C)) {
427    Expression exp = create_expression(C);
428    uint32_t& e = expressionNumbering[exp];
429    if (!e) {
430      e = nextValueNumber++;
431      valueNumbering[C] = e;
432      return e;
433    }
434    if (!MD) {
435      e = nextValueNumber++;
436      valueNumbering[C] = e;
437      return e;
438    }
439
440    MemDepResult local_dep = MD->getDependency(C);
441
442    if (!local_dep.isDef() && !local_dep.isNonLocal()) {
443      valueNumbering[C] =  nextValueNumber;
444      return nextValueNumber++;
445    }
446
447    if (local_dep.isDef()) {
448      CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
449
450      if (local_cdep->getNumOperands() != C->getNumOperands()) {
451        valueNumbering[C] = nextValueNumber;
452        return nextValueNumber++;
453      }
454
455      for (unsigned i = 1; i < C->getNumOperands(); ++i) {
456        uint32_t c_vn = lookup_or_add(C->getOperand(i));
457        uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
458        if (c_vn != cd_vn) {
459          valueNumbering[C] = nextValueNumber;
460          return nextValueNumber++;
461        }
462      }
463
464      uint32_t v = lookup_or_add(local_cdep);
465      valueNumbering[C] = v;
466      return v;
467    }
468
469    // Non-local case.
470    const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
471      MD->getNonLocalCallDependency(CallSite(C));
472    // FIXME: call/call dependencies for readonly calls should return def, not
473    // clobber!  Move the checking logic to MemDep!
474    CallInst* cdep = 0;
475
476    // Check to see if we have a single dominating call instruction that is
477    // identical to C.
478    for (unsigned i = 0, e = deps.size(); i != e; ++i) {
479      const NonLocalDepEntry *I = &deps[i];
480      // Ignore non-local dependencies.
481      if (I->getResult().isNonLocal())
482        continue;
483
484      // We don't handle non-depedencies.  If we already have a call, reject
485      // instruction dependencies.
486      if (I->getResult().isClobber() || cdep != 0) {
487        cdep = 0;
488        break;
489      }
490
491      CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
492      // FIXME: All duplicated with non-local case.
493      if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
494        cdep = NonLocalDepCall;
495        continue;
496      }
497
498      cdep = 0;
499      break;
500    }
501
502    if (!cdep) {
503      valueNumbering[C] = nextValueNumber;
504      return nextValueNumber++;
505    }
506
507    if (cdep->getNumOperands() != C->getNumOperands()) {
508      valueNumbering[C] = nextValueNumber;
509      return nextValueNumber++;
510    }
511    for (unsigned i = 1; i < C->getNumOperands(); ++i) {
512      uint32_t c_vn = lookup_or_add(C->getOperand(i));
513      uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
514      if (c_vn != cd_vn) {
515        valueNumbering[C] = nextValueNumber;
516        return nextValueNumber++;
517      }
518    }
519
520    uint32_t v = lookup_or_add(cdep);
521    valueNumbering[C] = v;
522    return v;
523
524  } else {
525    valueNumbering[C] = nextValueNumber;
526    return nextValueNumber++;
527  }
528}
529
530/// lookup_or_add - Returns the value number for the specified value, assigning
531/// it a new number if it did not have one before.
532uint32_t ValueTable::lookup_or_add(Value *V) {
533  DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
534  if (VI != valueNumbering.end())
535    return VI->second;
536
537  if (!isa<Instruction>(V)) {
538    valueNumbering[V] = nextValueNumber;
539    return nextValueNumber++;
540  }
541
542  Instruction* I = cast<Instruction>(V);
543  Expression exp;
544  switch (I->getOpcode()) {
545    case Instruction::Call:
546      return lookup_or_add_call(cast<CallInst>(I));
547    case Instruction::Add:
548    case Instruction::FAdd:
549    case Instruction::Sub:
550    case Instruction::FSub:
551    case Instruction::Mul:
552    case Instruction::FMul:
553    case Instruction::UDiv:
554    case Instruction::SDiv:
555    case Instruction::FDiv:
556    case Instruction::URem:
557    case Instruction::SRem:
558    case Instruction::FRem:
559    case Instruction::Shl:
560    case Instruction::LShr:
561    case Instruction::AShr:
562    case Instruction::And:
563    case Instruction::Or :
564    case Instruction::Xor:
565      exp = create_expression(cast<BinaryOperator>(I));
566      break;
567    case Instruction::ICmp:
568    case Instruction::FCmp:
569      exp = create_expression(cast<CmpInst>(I));
570      break;
571    case Instruction::Trunc:
572    case Instruction::ZExt:
573    case Instruction::SExt:
574    case Instruction::FPToUI:
575    case Instruction::FPToSI:
576    case Instruction::UIToFP:
577    case Instruction::SIToFP:
578    case Instruction::FPTrunc:
579    case Instruction::FPExt:
580    case Instruction::PtrToInt:
581    case Instruction::IntToPtr:
582    case Instruction::BitCast:
583      exp = create_expression(cast<CastInst>(I));
584      break;
585    case Instruction::Select:
586      exp = create_expression(cast<SelectInst>(I));
587      break;
588    case Instruction::ExtractElement:
589      exp = create_expression(cast<ExtractElementInst>(I));
590      break;
591    case Instruction::InsertElement:
592      exp = create_expression(cast<InsertElementInst>(I));
593      break;
594    case Instruction::ShuffleVector:
595      exp = create_expression(cast<ShuffleVectorInst>(I));
596      break;
597    case Instruction::ExtractValue:
598      exp = create_expression(cast<ExtractValueInst>(I));
599      break;
600    case Instruction::InsertValue:
601      exp = create_expression(cast<InsertValueInst>(I));
602      break;
603    case Instruction::GetElementPtr:
604      exp = create_expression(cast<GetElementPtrInst>(I));
605      break;
606    default:
607      valueNumbering[V] = nextValueNumber;
608      return nextValueNumber++;
609  }
610
611  uint32_t& e = expressionNumbering[exp];
612  if (!e) e = nextValueNumber++;
613  valueNumbering[V] = e;
614  return e;
615}
616
617/// lookup - Returns the value number of the specified value. Fails if
618/// the value has not yet been numbered.
619uint32_t ValueTable::lookup(Value *V) const {
620  DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
621  assert(VI != valueNumbering.end() && "Value not numbered?");
622  return VI->second;
623}
624
625/// clear - Remove all entries from the ValueTable
626void ValueTable::clear() {
627  valueNumbering.clear();
628  expressionNumbering.clear();
629  nextValueNumber = 1;
630}
631
632/// erase - Remove a value from the value numbering
633void ValueTable::erase(Value *V) {
634  valueNumbering.erase(V);
635}
636
637/// verifyRemoved - Verify that the value is removed from all internal data
638/// structures.
639void ValueTable::verifyRemoved(const Value *V) const {
640  for (DenseMap<Value*, uint32_t>::const_iterator
641         I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
642    assert(I->first != V && "Inst still occurs in value numbering map!");
643  }
644}
645
646//===----------------------------------------------------------------------===//
647//                                GVN Pass
648//===----------------------------------------------------------------------===//
649
650namespace {
651  struct ValueNumberScope {
652    ValueNumberScope* parent;
653    DenseMap<uint32_t, Value*> table;
654
655    ValueNumberScope(ValueNumberScope* p) : parent(p) { }
656  };
657}
658
659namespace {
660
661  class GVN : public FunctionPass {
662    bool runOnFunction(Function &F);
663  public:
664    static char ID; // Pass identification, replacement for typeid
665    explicit GVN(bool nopre = false, bool noloads = false)
666      : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { }
667
668  private:
669    bool NoPRE;
670    bool NoLoads;
671    MemoryDependenceAnalysis *MD;
672    DominatorTree *DT;
673
674    ValueTable VN;
675    DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
676
677    // List of critical edges to be split between iterations.
678    SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
679
680    // This transformation requires dominator postdominator info
681    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
682      AU.addRequired<DominatorTree>();
683      if (!NoLoads)
684        AU.addRequired<MemoryDependenceAnalysis>();
685      AU.addRequired<AliasAnalysis>();
686
687      AU.addPreserved<DominatorTree>();
688      AU.addPreserved<AliasAnalysis>();
689    }
690
691    // Helper fuctions
692    // FIXME: eliminate or document these better
693    bool processLoad(LoadInst* L,
694                     SmallVectorImpl<Instruction*> &toErase);
695    bool processInstruction(Instruction *I,
696                            SmallVectorImpl<Instruction*> &toErase);
697    bool processNonLocalLoad(LoadInst* L,
698                             SmallVectorImpl<Instruction*> &toErase);
699    bool processBlock(BasicBlock *BB);
700    void dump(DenseMap<uint32_t, Value*>& d);
701    bool iterateOnFunction(Function &F);
702    Value *CollapsePhi(PHINode* p);
703    bool performPRE(Function& F);
704    Value *lookupNumber(BasicBlock *BB, uint32_t num);
705    void cleanupGlobalSets();
706    void verifyRemoved(const Instruction *I) const;
707    bool splitCriticalEdges();
708  };
709
710  char GVN::ID = 0;
711}
712
713// createGVNPass - The public interface to this file...
714FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) {
715  return new GVN(NoPRE, NoLoads);
716}
717
718static RegisterPass<GVN> X("gvn",
719                           "Global Value Numbering");
720
721void GVN::dump(DenseMap<uint32_t, Value*>& d) {
722  errs() << "{\n";
723  for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
724       E = d.end(); I != E; ++I) {
725      errs() << I->first << "\n";
726      I->second->dump();
727  }
728  errs() << "}\n";
729}
730
731static bool isSafeReplacement(PHINode* p, Instruction *inst) {
732  if (!isa<PHINode>(inst))
733    return true;
734
735  for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
736       UI != E; ++UI)
737    if (PHINode* use_phi = dyn_cast<PHINode>(UI))
738      if (use_phi->getParent() == inst->getParent())
739        return false;
740
741  return true;
742}
743
744Value *GVN::CollapsePhi(PHINode *PN) {
745  Value *ConstVal = PN->hasConstantValue(DT);
746  if (!ConstVal) return 0;
747
748  Instruction *Inst = dyn_cast<Instruction>(ConstVal);
749  if (!Inst)
750    return ConstVal;
751
752  if (DT->dominates(Inst, PN))
753    if (isSafeReplacement(PN, Inst))
754      return Inst;
755  return 0;
756}
757
758/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
759/// we're analyzing is fully available in the specified block.  As we go, keep
760/// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
761/// map is actually a tri-state map with the following values:
762///   0) we know the block *is not* fully available.
763///   1) we know the block *is* fully available.
764///   2) we do not know whether the block is fully available or not, but we are
765///      currently speculating that it will be.
766///   3) we are speculating for this block and have used that to speculate for
767///      other blocks.
768static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
769                            DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
770  // Optimistically assume that the block is fully available and check to see
771  // if we already know about this block in one lookup.
772  std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
773    FullyAvailableBlocks.insert(std::make_pair(BB, 2));
774
775  // If the entry already existed for this block, return the precomputed value.
776  if (!IV.second) {
777    // If this is a speculative "available" value, mark it as being used for
778    // speculation of other blocks.
779    if (IV.first->second == 2)
780      IV.first->second = 3;
781    return IV.first->second != 0;
782  }
783
784  // Otherwise, see if it is fully available in all predecessors.
785  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
786
787  // If this block has no predecessors, it isn't live-in here.
788  if (PI == PE)
789    goto SpeculationFailure;
790
791  for (; PI != PE; ++PI)
792    // If the value isn't fully available in one of our predecessors, then it
793    // isn't fully available in this block either.  Undo our previous
794    // optimistic assumption and bail out.
795    if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
796      goto SpeculationFailure;
797
798  return true;
799
800// SpeculationFailure - If we get here, we found out that this is not, after
801// all, a fully-available block.  We have a problem if we speculated on this and
802// used the speculation to mark other blocks as available.
803SpeculationFailure:
804  char &BBVal = FullyAvailableBlocks[BB];
805
806  // If we didn't speculate on this, just return with it set to false.
807  if (BBVal == 2) {
808    BBVal = 0;
809    return false;
810  }
811
812  // If we did speculate on this value, we could have blocks set to 1 that are
813  // incorrect.  Walk the (transitive) successors of this block and mark them as
814  // 0 if set to one.
815  SmallVector<BasicBlock*, 32> BBWorklist;
816  BBWorklist.push_back(BB);
817
818  do {
819    BasicBlock *Entry = BBWorklist.pop_back_val();
820    // Note that this sets blocks to 0 (unavailable) if they happen to not
821    // already be in FullyAvailableBlocks.  This is safe.
822    char &EntryVal = FullyAvailableBlocks[Entry];
823    if (EntryVal == 0) continue;  // Already unavailable.
824
825    // Mark as unavailable.
826    EntryVal = 0;
827
828    for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
829      BBWorklist.push_back(*I);
830  } while (!BBWorklist.empty());
831
832  return false;
833}
834
835
836/// CanCoerceMustAliasedValueToLoad - Return true if
837/// CoerceAvailableValueToLoadType will succeed.
838static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
839                                            const Type *LoadTy,
840                                            const TargetData &TD) {
841  // If the loaded or stored value is an first class array or struct, don't try
842  // to transform them.  We need to be able to bitcast to integer.
843  if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
844      StoredVal->getType()->isStructTy() ||
845      StoredVal->getType()->isArrayTy())
846    return false;
847
848  // The store has to be at least as big as the load.
849  if (TD.getTypeSizeInBits(StoredVal->getType()) <
850        TD.getTypeSizeInBits(LoadTy))
851    return false;
852
853  return true;
854}
855
856
857/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
858/// then a load from a must-aliased pointer of a different type, try to coerce
859/// the stored value.  LoadedTy is the type of the load we want to replace and
860/// InsertPt is the place to insert new instructions.
861///
862/// If we can't do it, return null.
863static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
864                                             const Type *LoadedTy,
865                                             Instruction *InsertPt,
866                                             const TargetData &TD) {
867  if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
868    return 0;
869
870  const Type *StoredValTy = StoredVal->getType();
871
872  uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
873  uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
874
875  // If the store and reload are the same size, we can always reuse it.
876  if (StoreSize == LoadSize) {
877    if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
878      // Pointer to Pointer -> use bitcast.
879      return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
880    }
881
882    // Convert source pointers to integers, which can be bitcast.
883    if (StoredValTy->isPointerTy()) {
884      StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
885      StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
886    }
887
888    const Type *TypeToCastTo = LoadedTy;
889    if (TypeToCastTo->isPointerTy())
890      TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
891
892    if (StoredValTy != TypeToCastTo)
893      StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
894
895    // Cast to pointer if the load needs a pointer type.
896    if (LoadedTy->isPointerTy())
897      StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
898
899    return StoredVal;
900  }
901
902  // If the loaded value is smaller than the available value, then we can
903  // extract out a piece from it.  If the available value is too small, then we
904  // can't do anything.
905  assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
906
907  // Convert source pointers to integers, which can be manipulated.
908  if (StoredValTy->isPointerTy()) {
909    StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
910    StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
911  }
912
913  // Convert vectors and fp to integer, which can be manipulated.
914  if (!StoredValTy->isIntegerTy()) {
915    StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
916    StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
917  }
918
919  // If this is a big-endian system, we need to shift the value down to the low
920  // bits so that a truncate will work.
921  if (TD.isBigEndian()) {
922    Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
923    StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
924  }
925
926  // Truncate the integer to the right size now.
927  const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
928  StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
929
930  if (LoadedTy == NewIntTy)
931    return StoredVal;
932
933  // If the result is a pointer, inttoptr.
934  if (LoadedTy->isPointerTy())
935    return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
936
937  // Otherwise, bitcast.
938  return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
939}
940
941/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
942/// be expressed as a base pointer plus a constant offset.  Return the base and
943/// offset to the caller.
944static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
945                                        const TargetData &TD) {
946  Operator *PtrOp = dyn_cast<Operator>(Ptr);
947  if (PtrOp == 0) return Ptr;
948
949  // Just look through bitcasts.
950  if (PtrOp->getOpcode() == Instruction::BitCast)
951    return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
952
953  // If this is a GEP with constant indices, we can look through it.
954  GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
955  if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
956
957  gep_type_iterator GTI = gep_type_begin(GEP);
958  for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
959       ++I, ++GTI) {
960    ConstantInt *OpC = cast<ConstantInt>(*I);
961    if (OpC->isZero()) continue;
962
963    // Handle a struct and array indices which add their offset to the pointer.
964    if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
965      Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
966    } else {
967      uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
968      Offset += OpC->getSExtValue()*Size;
969    }
970  }
971
972  // Re-sign extend from the pointer size if needed to get overflow edge cases
973  // right.
974  unsigned PtrSize = TD.getPointerSizeInBits();
975  if (PtrSize < 64)
976    Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
977
978  return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
979}
980
981
982/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
983/// memdep query of a load that ends up being a clobbering memory write (store,
984/// memset, memcpy, memmove).  This means that the write *may* provide bits used
985/// by the load but we can't be sure because the pointers don't mustalias.
986///
987/// Check this case to see if there is anything more we can do before we give
988/// up.  This returns -1 if we have to give up, or a byte number in the stored
989/// value of the piece that feeds the load.
990static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
991                                          Value *WritePtr,
992                                          uint64_t WriteSizeInBits,
993                                          const TargetData &TD) {
994  // If the loaded or stored value is an first class array or struct, don't try
995  // to transform them.  We need to be able to bitcast to integer.
996  if (LoadTy->isStructTy() || LoadTy->isArrayTy())
997    return -1;
998
999  int64_t StoreOffset = 0, LoadOffset = 0;
1000  Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
1001  Value *LoadBase =
1002    GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
1003  if (StoreBase != LoadBase)
1004    return -1;
1005
1006  // If the load and store are to the exact same address, they should have been
1007  // a must alias.  AA must have gotten confused.
1008  // FIXME: Study to see if/when this happens.
1009  if (LoadOffset == StoreOffset) {
1010#if 0
1011    dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1012    << "Base       = " << *StoreBase << "\n"
1013    << "Store Ptr  = " << *WritePtr << "\n"
1014    << "Store Offs = " << StoreOffset << "\n"
1015    << "Load Ptr   = " << *LoadPtr << "\n";
1016    abort();
1017#endif
1018    return -1;
1019  }
1020
1021  // If the load and store don't overlap at all, the store doesn't provide
1022  // anything to the load.  In this case, they really don't alias at all, AA
1023  // must have gotten confused.
1024  // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1025  // remove this check, as it is duplicated with what we have below.
1026  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1027
1028  if ((WriteSizeInBits & 7) | (LoadSize & 7))
1029    return -1;
1030  uint64_t StoreSize = WriteSizeInBits >> 3;  // Convert to bytes.
1031  LoadSize >>= 3;
1032
1033
1034  bool isAAFailure = false;
1035  if (StoreOffset < LoadOffset) {
1036    isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1037  } else {
1038    isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1039  }
1040  if (isAAFailure) {
1041#if 0
1042    dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1043    << "Base       = " << *StoreBase << "\n"
1044    << "Store Ptr  = " << *WritePtr << "\n"
1045    << "Store Offs = " << StoreOffset << "\n"
1046    << "Load Ptr   = " << *LoadPtr << "\n";
1047    abort();
1048#endif
1049    return -1;
1050  }
1051
1052  // If the Load isn't completely contained within the stored bits, we don't
1053  // have all the bits to feed it.  We could do something crazy in the future
1054  // (issue a smaller load then merge the bits in) but this seems unlikely to be
1055  // valuable.
1056  if (StoreOffset > LoadOffset ||
1057      StoreOffset+StoreSize < LoadOffset+LoadSize)
1058    return -1;
1059
1060  // Okay, we can do this transformation.  Return the number of bytes into the
1061  // store that the load is.
1062  return LoadOffset-StoreOffset;
1063}
1064
1065/// AnalyzeLoadFromClobberingStore - This function is called when we have a
1066/// memdep query of a load that ends up being a clobbering store.
1067static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1068                                          StoreInst *DepSI,
1069                                          const TargetData &TD) {
1070  // Cannot handle reading from store of first-class aggregate yet.
1071  if (DepSI->getOperand(0)->getType()->isStructTy() ||
1072      DepSI->getOperand(0)->getType()->isArrayTy())
1073    return -1;
1074
1075  Value *StorePtr = DepSI->getPointerOperand();
1076  uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1077  return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1078                                        StorePtr, StoreSize, TD);
1079}
1080
1081static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1082                                            MemIntrinsic *MI,
1083                                            const TargetData &TD) {
1084  // If the mem operation is a non-constant size, we can't handle it.
1085  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1086  if (SizeCst == 0) return -1;
1087  uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1088
1089  // If this is memset, we just need to see if the offset is valid in the size
1090  // of the memset..
1091  if (MI->getIntrinsicID() == Intrinsic::memset)
1092    return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1093                                          MemSizeInBits, TD);
1094
1095  // If we have a memcpy/memmove, the only case we can handle is if this is a
1096  // copy from constant memory.  In that case, we can read directly from the
1097  // constant memory.
1098  MemTransferInst *MTI = cast<MemTransferInst>(MI);
1099
1100  Constant *Src = dyn_cast<Constant>(MTI->getSource());
1101  if (Src == 0) return -1;
1102
1103  GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1104  if (GV == 0 || !GV->isConstant()) return -1;
1105
1106  // See if the access is within the bounds of the transfer.
1107  int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1108                                              MI->getDest(), MemSizeInBits, TD);
1109  if (Offset == -1)
1110    return Offset;
1111
1112  // Otherwise, see if we can constant fold a load from the constant with the
1113  // offset applied as appropriate.
1114  Src = ConstantExpr::getBitCast(Src,
1115                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1116  Constant *OffsetCst =
1117    ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1118  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1119  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1120  if (ConstantFoldLoadFromConstPtr(Src, &TD))
1121    return Offset;
1122  return -1;
1123}
1124
1125
1126/// GetStoreValueForLoad - This function is called when we have a
1127/// memdep query of a load that ends up being a clobbering store.  This means
1128/// that the store *may* provide bits used by the load but we can't be sure
1129/// because the pointers don't mustalias.  Check this case to see if there is
1130/// anything more we can do before we give up.
1131static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1132                                   const Type *LoadTy,
1133                                   Instruction *InsertPt, const TargetData &TD){
1134  LLVMContext &Ctx = SrcVal->getType()->getContext();
1135
1136  uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
1137  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1138
1139  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1140
1141  // Compute which bits of the stored value are being used by the load.  Convert
1142  // to an integer type to start with.
1143  if (SrcVal->getType()->isPointerTy())
1144    SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1145  if (!SrcVal->getType()->isIntegerTy())
1146    SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1147                                   "tmp");
1148
1149  // Shift the bits to the least significant depending on endianness.
1150  unsigned ShiftAmt;
1151  if (TD.isLittleEndian())
1152    ShiftAmt = Offset*8;
1153  else
1154    ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1155
1156  if (ShiftAmt)
1157    SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1158
1159  if (LoadSize != StoreSize)
1160    SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1161                                 "tmp");
1162
1163  return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1164}
1165
1166/// GetMemInstValueForLoad - This function is called when we have a
1167/// memdep query of a load that ends up being a clobbering mem intrinsic.
1168static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1169                                     const Type *LoadTy, Instruction *InsertPt,
1170                                     const TargetData &TD){
1171  LLVMContext &Ctx = LoadTy->getContext();
1172  uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1173
1174  IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1175
1176  // We know that this method is only called when the mem transfer fully
1177  // provides the bits for the load.
1178  if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1179    // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1180    // independently of what the offset is.
1181    Value *Val = MSI->getValue();
1182    if (LoadSize != 1)
1183      Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1184
1185    Value *OneElt = Val;
1186
1187    // Splat the value out to the right number of bits.
1188    for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1189      // If we can double the number of bytes set, do it.
1190      if (NumBytesSet*2 <= LoadSize) {
1191        Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1192        Val = Builder.CreateOr(Val, ShVal);
1193        NumBytesSet <<= 1;
1194        continue;
1195      }
1196
1197      // Otherwise insert one byte at a time.
1198      Value *ShVal = Builder.CreateShl(Val, 1*8);
1199      Val = Builder.CreateOr(OneElt, ShVal);
1200      ++NumBytesSet;
1201    }
1202
1203    return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1204  }
1205
1206  // Otherwise, this is a memcpy/memmove from a constant global.
1207  MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1208  Constant *Src = cast<Constant>(MTI->getSource());
1209
1210  // Otherwise, see if we can constant fold a load from the constant with the
1211  // offset applied as appropriate.
1212  Src = ConstantExpr::getBitCast(Src,
1213                                 llvm::Type::getInt8PtrTy(Src->getContext()));
1214  Constant *OffsetCst =
1215  ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1216  Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1217  Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1218  return ConstantFoldLoadFromConstPtr(Src, &TD);
1219}
1220
1221
1222
1223struct AvailableValueInBlock {
1224  /// BB - The basic block in question.
1225  BasicBlock *BB;
1226  enum ValType {
1227    SimpleVal,  // A simple offsetted value that is accessed.
1228    MemIntrin   // A memory intrinsic which is loaded from.
1229  };
1230
1231  /// V - The value that is live out of the block.
1232  PointerIntPair<Value *, 1, ValType> Val;
1233
1234  /// Offset - The byte offset in Val that is interesting for the load query.
1235  unsigned Offset;
1236
1237  static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1238                                   unsigned Offset = 0) {
1239    AvailableValueInBlock Res;
1240    Res.BB = BB;
1241    Res.Val.setPointer(V);
1242    Res.Val.setInt(SimpleVal);
1243    Res.Offset = Offset;
1244    return Res;
1245  }
1246
1247  static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1248                                     unsigned Offset = 0) {
1249    AvailableValueInBlock Res;
1250    Res.BB = BB;
1251    Res.Val.setPointer(MI);
1252    Res.Val.setInt(MemIntrin);
1253    Res.Offset = Offset;
1254    return Res;
1255  }
1256
1257  bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1258  Value *getSimpleValue() const {
1259    assert(isSimpleValue() && "Wrong accessor");
1260    return Val.getPointer();
1261  }
1262
1263  MemIntrinsic *getMemIntrinValue() const {
1264    assert(!isSimpleValue() && "Wrong accessor");
1265    return cast<MemIntrinsic>(Val.getPointer());
1266  }
1267
1268  /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1269  /// defined here to the specified type.  This handles various coercion cases.
1270  Value *MaterializeAdjustedValue(const Type *LoadTy,
1271                                  const TargetData *TD) const {
1272    Value *Res;
1273    if (isSimpleValue()) {
1274      Res = getSimpleValue();
1275      if (Res->getType() != LoadTy) {
1276        assert(TD && "Need target data to handle type mismatch case");
1277        Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1278                                   *TD);
1279
1280        DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << "  "
1281                     << *getSimpleValue() << '\n'
1282                     << *Res << '\n' << "\n\n\n");
1283      }
1284    } else {
1285      Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1286                                   LoadTy, BB->getTerminator(), *TD);
1287      DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1288                   << "  " << *getMemIntrinValue() << '\n'
1289                   << *Res << '\n' << "\n\n\n");
1290    }
1291    return Res;
1292  }
1293};
1294
1295/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1296/// construct SSA form, allowing us to eliminate LI.  This returns the value
1297/// that should be used at LI's definition site.
1298static Value *ConstructSSAForLoadSet(LoadInst *LI,
1299                         SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1300                                     const TargetData *TD,
1301                                     const DominatorTree &DT,
1302                                     AliasAnalysis *AA) {
1303  // Check for the fully redundant, dominating load case.  In this case, we can
1304  // just use the dominating value directly.
1305  if (ValuesPerBlock.size() == 1 &&
1306      DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1307    return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1308
1309  // Otherwise, we have to construct SSA form.
1310  SmallVector<PHINode*, 8> NewPHIs;
1311  SSAUpdater SSAUpdate(&NewPHIs);
1312  SSAUpdate.Initialize(LI);
1313
1314  const Type *LoadTy = LI->getType();
1315
1316  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1317    const AvailableValueInBlock &AV = ValuesPerBlock[i];
1318    BasicBlock *BB = AV.BB;
1319
1320    if (SSAUpdate.HasValueForBlock(BB))
1321      continue;
1322
1323    SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1324  }
1325
1326  // Perform PHI construction.
1327  Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1328
1329  // If new PHI nodes were created, notify alias analysis.
1330  if (V->getType()->isPointerTy())
1331    for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1332      AA->copyValue(LI, NewPHIs[i]);
1333
1334  return V;
1335}
1336
1337static bool isLifetimeStart(Instruction *Inst) {
1338  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1339    return II->getIntrinsicID() == Intrinsic::lifetime_start;
1340  return false;
1341}
1342
1343/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1344/// non-local by performing PHI construction.
1345bool GVN::processNonLocalLoad(LoadInst *LI,
1346                              SmallVectorImpl<Instruction*> &toErase) {
1347  // Find the non-local dependencies of the load.
1348  SmallVector<NonLocalDepResult, 64> Deps;
1349  MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1350                                   Deps);
1351  //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1352  //             << Deps.size() << *LI << '\n');
1353
1354  // If we had to process more than one hundred blocks to find the
1355  // dependencies, this load isn't worth worrying about.  Optimizing
1356  // it will be too expensive.
1357  if (Deps.size() > 100)
1358    return false;
1359
1360  // If we had a phi translation failure, we'll have a single entry which is a
1361  // clobber in the current block.  Reject this early.
1362  if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1363    DEBUG(
1364      dbgs() << "GVN: non-local load ";
1365      WriteAsOperand(dbgs(), LI);
1366      dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1367    );
1368    return false;
1369  }
1370
1371  // Filter out useless results (non-locals, etc).  Keep track of the blocks
1372  // where we have a value available in repl, also keep track of whether we see
1373  // dependencies that produce an unknown value for the load (such as a call
1374  // that could potentially clobber the load).
1375  SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1376  SmallVector<BasicBlock*, 16> UnavailableBlocks;
1377
1378  const TargetData *TD = 0;
1379
1380  for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1381    BasicBlock *DepBB = Deps[i].getBB();
1382    MemDepResult DepInfo = Deps[i].getResult();
1383
1384    if (DepInfo.isClobber()) {
1385      // The address being loaded in this non-local block may not be the same as
1386      // the pointer operand of the load if PHI translation occurs.  Make sure
1387      // to consider the right address.
1388      Value *Address = Deps[i].getAddress();
1389
1390      // If the dependence is to a store that writes to a superset of the bits
1391      // read by the load, we can extract the bits we need for the load from the
1392      // stored value.
1393      if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1394        if (TD == 0)
1395          TD = getAnalysisIfAvailable<TargetData>();
1396        if (TD && Address) {
1397          int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1398                                                      DepSI, *TD);
1399          if (Offset != -1) {
1400            ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1401                                                           DepSI->getOperand(0),
1402                                                                Offset));
1403            continue;
1404          }
1405        }
1406      }
1407
1408      // If the clobbering value is a memset/memcpy/memmove, see if we can
1409      // forward a value on from it.
1410      if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1411        if (TD == 0)
1412          TD = getAnalysisIfAvailable<TargetData>();
1413        if (TD && Address) {
1414          int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1415                                                        DepMI, *TD);
1416          if (Offset != -1) {
1417            ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1418                                                                  Offset));
1419            continue;
1420          }
1421        }
1422      }
1423
1424      UnavailableBlocks.push_back(DepBB);
1425      continue;
1426    }
1427
1428    Instruction *DepInst = DepInfo.getInst();
1429
1430    // Loading the allocation -> undef.
1431    if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1432        // Loading immediately after lifetime begin -> undef.
1433        isLifetimeStart(DepInst)) {
1434      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1435                                             UndefValue::get(LI->getType())));
1436      continue;
1437    }
1438
1439    if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1440      // Reject loads and stores that are to the same address but are of
1441      // different types if we have to.
1442      if (S->getOperand(0)->getType() != LI->getType()) {
1443        if (TD == 0)
1444          TD = getAnalysisIfAvailable<TargetData>();
1445
1446        // If the stored value is larger or equal to the loaded value, we can
1447        // reuse it.
1448        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1449                                                        LI->getType(), *TD)) {
1450          UnavailableBlocks.push_back(DepBB);
1451          continue;
1452        }
1453      }
1454
1455      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1456                                                          S->getOperand(0)));
1457      continue;
1458    }
1459
1460    if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1461      // If the types mismatch and we can't handle it, reject reuse of the load.
1462      if (LD->getType() != LI->getType()) {
1463        if (TD == 0)
1464          TD = getAnalysisIfAvailable<TargetData>();
1465
1466        // If the stored value is larger or equal to the loaded value, we can
1467        // reuse it.
1468        if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1469          UnavailableBlocks.push_back(DepBB);
1470          continue;
1471        }
1472      }
1473      ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1474      continue;
1475    }
1476
1477    UnavailableBlocks.push_back(DepBB);
1478    continue;
1479  }
1480
1481  // If we have no predecessors that produce a known value for this load, exit
1482  // early.
1483  if (ValuesPerBlock.empty()) return false;
1484
1485  // If all of the instructions we depend on produce a known value for this
1486  // load, then it is fully redundant and we can use PHI insertion to compute
1487  // its value.  Insert PHIs and remove the fully redundant value now.
1488  if (UnavailableBlocks.empty()) {
1489    DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1490
1491    // Perform PHI construction.
1492    Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1493                                      VN.getAliasAnalysis());
1494    LI->replaceAllUsesWith(V);
1495
1496    if (isa<PHINode>(V))
1497      V->takeName(LI);
1498    if (V->getType()->isPointerTy())
1499      MD->invalidateCachedPointerInfo(V);
1500    VN.erase(LI);
1501    toErase.push_back(LI);
1502    NumGVNLoad++;
1503    return true;
1504  }
1505
1506  if (!EnablePRE || !EnableLoadPRE)
1507    return false;
1508
1509  // Okay, we have *some* definitions of the value.  This means that the value
1510  // is available in some of our (transitive) predecessors.  Lets think about
1511  // doing PRE of this load.  This will involve inserting a new load into the
1512  // predecessor when it's not available.  We could do this in general, but
1513  // prefer to not increase code size.  As such, we only do this when we know
1514  // that we only have to insert *one* load (which means we're basically moving
1515  // the load, not inserting a new one).
1516
1517  SmallPtrSet<BasicBlock *, 4> Blockers;
1518  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1519    Blockers.insert(UnavailableBlocks[i]);
1520
1521  // Lets find first basic block with more than one predecessor.  Walk backwards
1522  // through predecessors if needed.
1523  BasicBlock *LoadBB = LI->getParent();
1524  BasicBlock *TmpBB = LoadBB;
1525
1526  bool isSinglePred = false;
1527  bool allSingleSucc = true;
1528  while (TmpBB->getSinglePredecessor()) {
1529    isSinglePred = true;
1530    TmpBB = TmpBB->getSinglePredecessor();
1531    if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1532      return false;
1533    if (Blockers.count(TmpBB))
1534      return false;
1535    if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1536      allSingleSucc = false;
1537  }
1538
1539  assert(TmpBB);
1540  LoadBB = TmpBB;
1541
1542  // If we have a repl set with LI itself in it, this means we have a loop where
1543  // at least one of the values is LI.  Since this means that we won't be able
1544  // to eliminate LI even if we insert uses in the other predecessors, we will
1545  // end up increasing code size.  Reject this by scanning for LI.
1546  if (!EnableFullLoadPRE) {
1547    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1548      if (ValuesPerBlock[i].isSimpleValue() &&
1549          ValuesPerBlock[i].getSimpleValue() == LI)
1550        return false;
1551  }
1552
1553  // FIXME: It is extremely unclear what this loop is doing, other than
1554  // artificially restricting loadpre.
1555  if (isSinglePred) {
1556    bool isHot = false;
1557    for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1558      const AvailableValueInBlock &AV = ValuesPerBlock[i];
1559      if (AV.isSimpleValue())
1560        // "Hot" Instruction is in some loop (because it dominates its dep.
1561        // instruction).
1562        if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1563          if (DT->dominates(LI, I)) {
1564            isHot = true;
1565            break;
1566          }
1567    }
1568
1569    // We are interested only in "hot" instructions. We don't want to do any
1570    // mis-optimizations here.
1571    if (!isHot)
1572      return false;
1573  }
1574
1575  // Check to see how many predecessors have the loaded value fully
1576  // available.
1577  DenseMap<BasicBlock*, Value*> PredLoads;
1578  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1579  for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1580    FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1581  for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1582    FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1583
1584  for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1585       PI != E; ++PI) {
1586    BasicBlock *Pred = *PI;
1587    if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1588      continue;
1589    }
1590    PredLoads[Pred] = 0;
1591
1592    if (Pred->getTerminator()->getNumSuccessors() != 1) {
1593      if (isa<IndirectBrInst>(Pred->getTerminator())) {
1594        DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1595              << Pred->getName() << "': " << *LI << '\n');
1596        return false;
1597      }
1598      unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1599      toSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1600      return false;
1601    }
1602  }
1603
1604  // Decide whether PRE is profitable for this load.
1605  unsigned NumUnavailablePreds = PredLoads.size();
1606  assert(NumUnavailablePreds != 0 &&
1607         "Fully available value should be eliminated above!");
1608  if (!EnableFullLoadPRE) {
1609    // If this load is unavailable in multiple predecessors, reject it.
1610    // FIXME: If we could restructure the CFG, we could make a common pred with
1611    // all the preds that don't have an available LI and insert a new load into
1612    // that one block.
1613    if (NumUnavailablePreds != 1)
1614      return false;
1615  }
1616
1617  // Check if the load can safely be moved to all the unavailable predecessors.
1618  bool CanDoPRE = true;
1619  SmallVector<Instruction*, 8> NewInsts;
1620  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1621         E = PredLoads.end(); I != E; ++I) {
1622    BasicBlock *UnavailablePred = I->first;
1623
1624    // Do PHI translation to get its value in the predecessor if necessary.  The
1625    // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1626
1627    // If all preds have a single successor, then we know it is safe to insert
1628    // the load on the pred (?!?), so we can insert code to materialize the
1629    // pointer if it is not available.
1630    PHITransAddr Address(LI->getOperand(0), TD);
1631    Value *LoadPtr = 0;
1632    if (allSingleSucc) {
1633      LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1634                                                  *DT, NewInsts);
1635    } else {
1636      Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1637      LoadPtr = Address.getAddr();
1638    }
1639
1640    // If we couldn't find or insert a computation of this phi translated value,
1641    // we fail PRE.
1642    if (LoadPtr == 0) {
1643      DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1644            << *LI->getOperand(0) << "\n");
1645      CanDoPRE = false;
1646      break;
1647    }
1648
1649    // Make sure it is valid to move this load here.  We have to watch out for:
1650    //  @1 = getelementptr (i8* p, ...
1651    //  test p and branch if == 0
1652    //  load @1
1653    // It is valid to have the getelementptr before the test, even if p can be 0,
1654    // as getelementptr only does address arithmetic.
1655    // If we are not pushing the value through any multiple-successor blocks
1656    // we do not have this case.  Otherwise, check that the load is safe to
1657    // put anywhere; this can be improved, but should be conservatively safe.
1658    if (!allSingleSucc &&
1659        // FIXME: REEVALUTE THIS.
1660        !isSafeToLoadUnconditionally(LoadPtr,
1661                                     UnavailablePred->getTerminator(),
1662                                     LI->getAlignment(), TD)) {
1663      CanDoPRE = false;
1664      break;
1665    }
1666
1667    I->second = LoadPtr;
1668  }
1669
1670  if (!CanDoPRE) {
1671    while (!NewInsts.empty())
1672      NewInsts.pop_back_val()->eraseFromParent();
1673    return false;
1674  }
1675
1676  // Okay, we can eliminate this load by inserting a reload in the predecessor
1677  // and using PHI construction to get the value in the other predecessors, do
1678  // it.
1679  DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1680  DEBUG(if (!NewInsts.empty())
1681          dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1682                 << *NewInsts.back() << '\n');
1683
1684  // Assign value numbers to the new instructions.
1685  for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1686    // FIXME: We really _ought_ to insert these value numbers into their
1687    // parent's availability map.  However, in doing so, we risk getting into
1688    // ordering issues.  If a block hasn't been processed yet, we would be
1689    // marking a value as AVAIL-IN, which isn't what we intend.
1690    VN.lookup_or_add(NewInsts[i]);
1691  }
1692
1693  for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1694         E = PredLoads.end(); I != E; ++I) {
1695    BasicBlock *UnavailablePred = I->first;
1696    Value *LoadPtr = I->second;
1697
1698    Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1699                                  LI->getAlignment(),
1700                                  UnavailablePred->getTerminator());
1701
1702    // Add the newly created load.
1703    ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1704                                                        NewLoad));
1705    MD->invalidateCachedPointerInfo(LoadPtr);
1706    DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1707  }
1708
1709  // Perform PHI construction.
1710  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1711                                    VN.getAliasAnalysis());
1712  LI->replaceAllUsesWith(V);
1713  if (isa<PHINode>(V))
1714    V->takeName(LI);
1715  if (V->getType()->isPointerTy())
1716    MD->invalidateCachedPointerInfo(V);
1717  VN.erase(LI);
1718  toErase.push_back(LI);
1719  NumPRELoad++;
1720  return true;
1721}
1722
1723/// processLoad - Attempt to eliminate a load, first by eliminating it
1724/// locally, and then attempting non-local elimination if that fails.
1725bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1726  if (!MD)
1727    return false;
1728
1729  if (L->isVolatile())
1730    return false;
1731
1732  // ... to a pointer that has been loaded from before...
1733  MemDepResult Dep = MD->getDependency(L);
1734
1735  // If the value isn't available, don't do anything!
1736  if (Dep.isClobber()) {
1737    // Check to see if we have something like this:
1738    //   store i32 123, i32* %P
1739    //   %A = bitcast i32* %P to i8*
1740    //   %B = gep i8* %A, i32 1
1741    //   %C = load i8* %B
1742    //
1743    // We could do that by recognizing if the clobber instructions are obviously
1744    // a common base + constant offset, and if the previous store (or memset)
1745    // completely covers this load.  This sort of thing can happen in bitfield
1746    // access code.
1747    Value *AvailVal = 0;
1748    if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1749      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1750        int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1751                                                    L->getPointerOperand(),
1752                                                    DepSI, *TD);
1753        if (Offset != -1)
1754          AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1755                                          L->getType(), L, *TD);
1756      }
1757
1758    // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1759    // a value on from it.
1760    if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1761      if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1762        int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1763                                                      L->getPointerOperand(),
1764                                                      DepMI, *TD);
1765        if (Offset != -1)
1766          AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1767      }
1768    }
1769
1770    if (AvailVal) {
1771      DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1772            << *AvailVal << '\n' << *L << "\n\n\n");
1773
1774      // Replace the load!
1775      L->replaceAllUsesWith(AvailVal);
1776      if (AvailVal->getType()->isPointerTy())
1777        MD->invalidateCachedPointerInfo(AvailVal);
1778      VN.erase(L);
1779      toErase.push_back(L);
1780      NumGVNLoad++;
1781      return true;
1782    }
1783
1784    DEBUG(
1785      // fast print dep, using operator<< on instruction would be too slow
1786      dbgs() << "GVN: load ";
1787      WriteAsOperand(dbgs(), L);
1788      Instruction *I = Dep.getInst();
1789      dbgs() << " is clobbered by " << *I << '\n';
1790    );
1791    return false;
1792  }
1793
1794  // If it is defined in another block, try harder.
1795  if (Dep.isNonLocal())
1796    return processNonLocalLoad(L, toErase);
1797
1798  Instruction *DepInst = Dep.getInst();
1799  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1800    Value *StoredVal = DepSI->getOperand(0);
1801
1802    // The store and load are to a must-aliased pointer, but they may not
1803    // actually have the same type.  See if we know how to reuse the stored
1804    // value (depending on its type).
1805    const TargetData *TD = 0;
1806    if (StoredVal->getType() != L->getType()) {
1807      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1808        StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1809                                                   L, *TD);
1810        if (StoredVal == 0)
1811          return false;
1812
1813        DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1814                     << '\n' << *L << "\n\n\n");
1815      }
1816      else
1817        return false;
1818    }
1819
1820    // Remove it!
1821    L->replaceAllUsesWith(StoredVal);
1822    if (StoredVal->getType()->isPointerTy())
1823      MD->invalidateCachedPointerInfo(StoredVal);
1824    VN.erase(L);
1825    toErase.push_back(L);
1826    NumGVNLoad++;
1827    return true;
1828  }
1829
1830  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1831    Value *AvailableVal = DepLI;
1832
1833    // The loads are of a must-aliased pointer, but they may not actually have
1834    // the same type.  See if we know how to reuse the previously loaded value
1835    // (depending on its type).
1836    const TargetData *TD = 0;
1837    if (DepLI->getType() != L->getType()) {
1838      if ((TD = getAnalysisIfAvailable<TargetData>())) {
1839        AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1840        if (AvailableVal == 0)
1841          return false;
1842
1843        DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1844                     << "\n" << *L << "\n\n\n");
1845      }
1846      else
1847        return false;
1848    }
1849
1850    // Remove it!
1851    L->replaceAllUsesWith(AvailableVal);
1852    if (DepLI->getType()->isPointerTy())
1853      MD->invalidateCachedPointerInfo(DepLI);
1854    VN.erase(L);
1855    toErase.push_back(L);
1856    NumGVNLoad++;
1857    return true;
1858  }
1859
1860  // If this load really doesn't depend on anything, then we must be loading an
1861  // undef value.  This can happen when loading for a fresh allocation with no
1862  // intervening stores, for example.
1863  if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1864    L->replaceAllUsesWith(UndefValue::get(L->getType()));
1865    VN.erase(L);
1866    toErase.push_back(L);
1867    NumGVNLoad++;
1868    return true;
1869  }
1870
1871  // If this load occurs either right after a lifetime begin,
1872  // then the loaded value is undefined.
1873  if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1874    if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1875      L->replaceAllUsesWith(UndefValue::get(L->getType()));
1876      VN.erase(L);
1877      toErase.push_back(L);
1878      NumGVNLoad++;
1879      return true;
1880    }
1881  }
1882
1883  return false;
1884}
1885
1886Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1887  DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1888  if (I == localAvail.end())
1889    return 0;
1890
1891  ValueNumberScope *Locals = I->second;
1892  while (Locals) {
1893    DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1894    if (I != Locals->table.end())
1895      return I->second;
1896    Locals = Locals->parent;
1897  }
1898
1899  return 0;
1900}
1901
1902
1903/// processInstruction - When calculating availability, handle an instruction
1904/// by inserting it into the appropriate sets
1905bool GVN::processInstruction(Instruction *I,
1906                             SmallVectorImpl<Instruction*> &toErase) {
1907  // Ignore dbg info intrinsics.
1908  if (isa<DbgInfoIntrinsic>(I))
1909    return false;
1910
1911  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1912    bool Changed = processLoad(LI, toErase);
1913
1914    if (!Changed) {
1915      unsigned Num = VN.lookup_or_add(LI);
1916      localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1917    }
1918
1919    return Changed;
1920  }
1921
1922  uint32_t NextNum = VN.getNextUnusedValueNumber();
1923  unsigned Num = VN.lookup_or_add(I);
1924
1925  if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1926    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1927
1928    if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1929      return false;
1930
1931    Value *BranchCond = BI->getCondition();
1932    uint32_t CondVN = VN.lookup_or_add(BranchCond);
1933
1934    BasicBlock *TrueSucc = BI->getSuccessor(0);
1935    BasicBlock *FalseSucc = BI->getSuccessor(1);
1936
1937    if (TrueSucc->getSinglePredecessor())
1938      localAvail[TrueSucc]->table[CondVN] =
1939        ConstantInt::getTrue(TrueSucc->getContext());
1940    if (FalseSucc->getSinglePredecessor())
1941      localAvail[FalseSucc]->table[CondVN] =
1942        ConstantInt::getFalse(TrueSucc->getContext());
1943
1944    return false;
1945
1946  // Allocations are always uniquely numbered, so we can save time and memory
1947  // by fast failing them.
1948  } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1949    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1950    return false;
1951  }
1952
1953  // Collapse PHI nodes
1954  if (PHINode* p = dyn_cast<PHINode>(I)) {
1955    Value *constVal = CollapsePhi(p);
1956
1957    if (constVal) {
1958      p->replaceAllUsesWith(constVal);
1959      if (MD && constVal->getType()->isPointerTy())
1960        MD->invalidateCachedPointerInfo(constVal);
1961      VN.erase(p);
1962
1963      toErase.push_back(p);
1964    } else {
1965      localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1966    }
1967
1968  // If the number we were assigned was a brand new VN, then we don't
1969  // need to do a lookup to see if the number already exists
1970  // somewhere in the domtree: it can't!
1971  } else if (Num == NextNum) {
1972    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1973
1974  // Perform fast-path value-number based elimination of values inherited from
1975  // dominators.
1976  } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1977    // Remove it!
1978    VN.erase(I);
1979    I->replaceAllUsesWith(repl);
1980    if (MD && repl->getType()->isPointerTy())
1981      MD->invalidateCachedPointerInfo(repl);
1982    toErase.push_back(I);
1983    return true;
1984
1985  } else {
1986    localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1987  }
1988
1989  return false;
1990}
1991
1992/// runOnFunction - This is the main transformation entry point for a function.
1993bool GVN::runOnFunction(Function& F) {
1994  if (!NoLoads)
1995    MD = &getAnalysis<MemoryDependenceAnalysis>();
1996  DT = &getAnalysis<DominatorTree>();
1997  VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1998  VN.setMemDep(MD);
1999  VN.setDomTree(DT);
2000
2001  bool Changed = false;
2002  bool ShouldContinue = true;
2003
2004  // Merge unconditional branches, allowing PRE to catch more
2005  // optimization opportunities.
2006  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2007    BasicBlock *BB = FI;
2008    ++FI;
2009    bool removedBlock = MergeBlockIntoPredecessor(BB, this);
2010    if (removedBlock) NumGVNBlocks++;
2011
2012    Changed |= removedBlock;
2013  }
2014
2015  unsigned Iteration = 0;
2016
2017  while (ShouldContinue) {
2018    DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2019    ShouldContinue = iterateOnFunction(F);
2020    if (splitCriticalEdges())
2021      ShouldContinue = true;
2022    Changed |= ShouldContinue;
2023    ++Iteration;
2024  }
2025
2026  if (EnablePRE) {
2027    bool PREChanged = true;
2028    while (PREChanged) {
2029      PREChanged = performPRE(F);
2030      Changed |= PREChanged;
2031    }
2032  }
2033  // FIXME: Should perform GVN again after PRE does something.  PRE can move
2034  // computations into blocks where they become fully redundant.  Note that
2035  // we can't do this until PRE's critical edge splitting updates memdep.
2036  // Actually, when this happens, we should just fully integrate PRE into GVN.
2037
2038  cleanupGlobalSets();
2039
2040  return Changed;
2041}
2042
2043
2044bool GVN::processBlock(BasicBlock *BB) {
2045  // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2046  // incrementing BI before processing an instruction).
2047  SmallVector<Instruction*, 8> toErase;
2048  bool ChangedFunction = false;
2049
2050  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2051       BI != BE;) {
2052    ChangedFunction |= processInstruction(BI, toErase);
2053    if (toErase.empty()) {
2054      ++BI;
2055      continue;
2056    }
2057
2058    // If we need some instructions deleted, do it now.
2059    NumGVNInstr += toErase.size();
2060
2061    // Avoid iterator invalidation.
2062    bool AtStart = BI == BB->begin();
2063    if (!AtStart)
2064      --BI;
2065
2066    for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2067         E = toErase.end(); I != E; ++I) {
2068      DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2069      if (MD) MD->removeInstruction(*I);
2070      (*I)->eraseFromParent();
2071      DEBUG(verifyRemoved(*I));
2072    }
2073    toErase.clear();
2074
2075    if (AtStart)
2076      BI = BB->begin();
2077    else
2078      ++BI;
2079  }
2080
2081  return ChangedFunction;
2082}
2083
2084/// performPRE - Perform a purely local form of PRE that looks for diamond
2085/// control flow patterns and attempts to perform simple PRE at the join point.
2086bool GVN::performPRE(Function &F) {
2087  bool Changed = false;
2088  DenseMap<BasicBlock*, Value*> predMap;
2089  for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2090       DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2091    BasicBlock *CurrentBlock = *DI;
2092
2093    // Nothing to PRE in the entry block.
2094    if (CurrentBlock == &F.getEntryBlock()) continue;
2095
2096    for (BasicBlock::iterator BI = CurrentBlock->begin(),
2097         BE = CurrentBlock->end(); BI != BE; ) {
2098      Instruction *CurInst = BI++;
2099
2100      if (isa<AllocaInst>(CurInst) ||
2101          isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2102          CurInst->getType()->isVoidTy() ||
2103          CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2104          isa<DbgInfoIntrinsic>(CurInst))
2105        continue;
2106
2107      uint32_t ValNo = VN.lookup(CurInst);
2108
2109      // Look for the predecessors for PRE opportunities.  We're
2110      // only trying to solve the basic diamond case, where
2111      // a value is computed in the successor and one predecessor,
2112      // but not the other.  We also explicitly disallow cases
2113      // where the successor is its own predecessor, because they're
2114      // more complicated to get right.
2115      unsigned NumWith = 0;
2116      unsigned NumWithout = 0;
2117      BasicBlock *PREPred = 0;
2118      predMap.clear();
2119
2120      for (pred_iterator PI = pred_begin(CurrentBlock),
2121           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2122        // We're not interested in PRE where the block is its
2123        // own predecessor, or in blocks with predecessors
2124        // that are not reachable.
2125        if (*PI == CurrentBlock) {
2126          NumWithout = 2;
2127          break;
2128        } else if (!localAvail.count(*PI))  {
2129          NumWithout = 2;
2130          break;
2131        }
2132
2133        DenseMap<uint32_t, Value*>::iterator predV =
2134                                            localAvail[*PI]->table.find(ValNo);
2135        if (predV == localAvail[*PI]->table.end()) {
2136          PREPred = *PI;
2137          NumWithout++;
2138        } else if (predV->second == CurInst) {
2139          NumWithout = 2;
2140        } else {
2141          predMap[*PI] = predV->second;
2142          NumWith++;
2143        }
2144      }
2145
2146      // Don't do PRE when it might increase code size, i.e. when
2147      // we would need to insert instructions in more than one pred.
2148      if (NumWithout != 1 || NumWith == 0)
2149        continue;
2150
2151      // Don't do PRE across indirect branch.
2152      if (isa<IndirectBrInst>(PREPred->getTerminator()))
2153        continue;
2154
2155      // We can't do PRE safely on a critical edge, so instead we schedule
2156      // the edge to be split and perform the PRE the next time we iterate
2157      // on the function.
2158      unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2159      if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2160        toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2161        continue;
2162      }
2163
2164      // Instantiate the expression in the predecessor that lacked it.
2165      // Because we are going top-down through the block, all value numbers
2166      // will be available in the predecessor by the time we need them.  Any
2167      // that weren't originally present will have been instantiated earlier
2168      // in this loop.
2169      Instruction *PREInstr = CurInst->clone();
2170      bool success = true;
2171      for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2172        Value *Op = PREInstr->getOperand(i);
2173        if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2174          continue;
2175
2176        if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2177          PREInstr->setOperand(i, V);
2178        } else {
2179          success = false;
2180          break;
2181        }
2182      }
2183
2184      // Fail out if we encounter an operand that is not available in
2185      // the PRE predecessor.  This is typically because of loads which
2186      // are not value numbered precisely.
2187      if (!success) {
2188        delete PREInstr;
2189        DEBUG(verifyRemoved(PREInstr));
2190        continue;
2191      }
2192
2193      PREInstr->insertBefore(PREPred->getTerminator());
2194      PREInstr->setName(CurInst->getName() + ".pre");
2195      predMap[PREPred] = PREInstr;
2196      VN.add(PREInstr, ValNo);
2197      NumGVNPRE++;
2198
2199      // Update the availability map to include the new instruction.
2200      localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2201
2202      // Create a PHI to make the value available in this block.
2203      PHINode* Phi = PHINode::Create(CurInst->getType(),
2204                                     CurInst->getName() + ".pre-phi",
2205                                     CurrentBlock->begin());
2206      for (pred_iterator PI = pred_begin(CurrentBlock),
2207           PE = pred_end(CurrentBlock); PI != PE; ++PI)
2208        Phi->addIncoming(predMap[*PI], *PI);
2209
2210      VN.add(Phi, ValNo);
2211      localAvail[CurrentBlock]->table[ValNo] = Phi;
2212
2213      CurInst->replaceAllUsesWith(Phi);
2214      if (MD && Phi->getType()->isPointerTy())
2215        MD->invalidateCachedPointerInfo(Phi);
2216      VN.erase(CurInst);
2217
2218      DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2219      if (MD) MD->removeInstruction(CurInst);
2220      CurInst->eraseFromParent();
2221      DEBUG(verifyRemoved(CurInst));
2222      Changed = true;
2223    }
2224  }
2225
2226  if (splitCriticalEdges())
2227    Changed = true;
2228
2229  return Changed;
2230}
2231
2232/// splitCriticalEdges - Split critical edges found during the previous
2233/// iteration that may enable further optimization.
2234bool GVN::splitCriticalEdges() {
2235  if (toSplit.empty())
2236    return false;
2237  do {
2238    std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2239    SplitCriticalEdge(Edge.first, Edge.second, this);
2240  } while (!toSplit.empty());
2241  MD->invalidateCachedPredecessors();
2242  return true;
2243}
2244
2245/// iterateOnFunction - Executes one iteration of GVN
2246bool GVN::iterateOnFunction(Function &F) {
2247  cleanupGlobalSets();
2248
2249  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2250       DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2251    if (DI->getIDom())
2252      localAvail[DI->getBlock()] =
2253                   new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2254    else
2255      localAvail[DI->getBlock()] = new ValueNumberScope(0);
2256  }
2257
2258  // Top-down walk of the dominator tree
2259  bool Changed = false;
2260#if 0
2261  // Needed for value numbering with phi construction to work.
2262  ReversePostOrderTraversal<Function*> RPOT(&F);
2263  for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2264       RE = RPOT.end(); RI != RE; ++RI)
2265    Changed |= processBlock(*RI);
2266#else
2267  for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2268       DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2269    Changed |= processBlock(DI->getBlock());
2270#endif
2271
2272  return Changed;
2273}
2274
2275void GVN::cleanupGlobalSets() {
2276  VN.clear();
2277
2278  for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2279       I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2280    delete I->second;
2281  localAvail.clear();
2282}
2283
2284/// verifyRemoved - Verify that the specified instruction does not occur in our
2285/// internal data structures.
2286void GVN::verifyRemoved(const Instruction *Inst) const {
2287  VN.verifyRemoved(Inst);
2288
2289  // Walk through the value number scope to make sure the instruction isn't
2290  // ferreted away in it.
2291  for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2292         I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2293    const ValueNumberScope *VNS = I->second;
2294
2295    while (VNS) {
2296      for (DenseMap<uint32_t, Value*>::const_iterator
2297             II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2298        assert(II->second != Inst && "Inst still in value numbering scope!");
2299      }
2300
2301      VNS = VNS->parent;
2302    }
2303  }
2304}
2305