InstructionCombining.cpp revision a311c34d2af7c750f016ef5e4c41bee77a1dfac7
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// InstructionCombining - Combine instructions to form fewer, simple
11// instructions.  This pass does not modify the CFG.  This pass is where
12// algebraic simplification happens.
13//
14// This pass combines things like:
15//    %Y = add i32 %X, 1
16//    %Z = add i32 %Y, 1
17// into:
18//    %Z = add i32 %X, 2
19//
20// This is a simple worklist driven algorithm.
21//
22// This pass guarantees that the following canonicalizations are performed on
23// the program:
24//    1. If a binary operator has a constant operand, it is moved to the RHS
25//    2. Bitwise operators with constant operands are always grouped so that
26//       shifts are performed first, then or's, then and's, then xor's.
27//    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28//    4. All cmp instructions on boolean values are replaced with logical ops
29//    5. add X, X is represented as (X*2) => (X << 1)
30//    6. Multiplies with a power-of-two constant argument are transformed into
31//       shifts.
32//   ... etc.
33//
34//===----------------------------------------------------------------------===//
35
36#define DEBUG_TYPE "instcombine"
37#include "llvm/Transforms/Scalar.h"
38#include "InstCombine.h"
39#include "llvm/IntrinsicInst.h"
40#include "llvm/Analysis/ConstantFolding.h"
41#include "llvm/Analysis/InstructionSimplify.h"
42#include "llvm/Analysis/MemoryBuiltins.h"
43#include "llvm/Target/TargetData.h"
44#include "llvm/Transforms/Utils/Local.h"
45#include "llvm/Support/CFG.h"
46#include "llvm/Support/Debug.h"
47#include "llvm/Support/GetElementPtrTypeIterator.h"
48#include "llvm/Support/PatternMatch.h"
49#include "llvm/ADT/SmallPtrSet.h"
50#include "llvm/ADT/Statistic.h"
51#include "llvm-c/Initialization.h"
52#include <algorithm>
53#include <climits>
54using namespace llvm;
55using namespace llvm::PatternMatch;
56
57STATISTIC(NumCombined , "Number of insts combined");
58STATISTIC(NumConstProp, "Number of constant folds");
59STATISTIC(NumDeadInst , "Number of dead inst eliminated");
60STATISTIC(NumSunkInst , "Number of instructions sunk");
61STATISTIC(NumExpand,    "Number of expansions");
62STATISTIC(NumFactor   , "Number of factorizations");
63STATISTIC(NumReassoc  , "Number of reassociations");
64
65// Initialization Routines
66void llvm::initializeInstCombine(PassRegistry &Registry) {
67  initializeInstCombinerPass(Registry);
68}
69
70void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
71  initializeInstCombine(*unwrap(R));
72}
73
74char InstCombiner::ID = 0;
75INITIALIZE_PASS(InstCombiner, "instcombine",
76                "Combine redundant instructions", false, false)
77
78void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
79  AU.setPreservesCFG();
80}
81
82
83/// ShouldChangeType - Return true if it is desirable to convert a computation
84/// from 'From' to 'To'.  We don't want to convert from a legal to an illegal
85/// type for example, or from a smaller to a larger illegal type.
86bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
87  assert(From->isIntegerTy() && To->isIntegerTy());
88
89  // If we don't have TD, we don't know if the source/dest are legal.
90  if (!TD) return false;
91
92  unsigned FromWidth = From->getPrimitiveSizeInBits();
93  unsigned ToWidth = To->getPrimitiveSizeInBits();
94  bool FromLegal = TD->isLegalInteger(FromWidth);
95  bool ToLegal = TD->isLegalInteger(ToWidth);
96
97  // If this is a legal integer from type, and the result would be an illegal
98  // type, don't do the transformation.
99  if (FromLegal && !ToLegal)
100    return false;
101
102  // Otherwise, if both are illegal, do not increase the size of the result. We
103  // do allow things like i160 -> i64, but not i64 -> i160.
104  if (!FromLegal && !ToLegal && ToWidth > FromWidth)
105    return false;
106
107  return true;
108}
109
110
111/// SimplifyAssociativeOrCommutative - This performs a few simplifications for
112/// operators which are associative or commutative:
113//
114//  Commutative operators:
115//
116//  1. Order operands such that they are listed from right (least complex) to
117//     left (most complex).  This puts constants before unary operators before
118//     binary operators.
119//
120//  Associative operators:
121//
122//  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
123//  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
124//
125//  Associative and commutative operators:
126//
127//  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
128//  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
129//  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
130//     if C1 and C2 are constants.
131//
132bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
133  Instruction::BinaryOps Opcode = I.getOpcode();
134  bool Changed = false;
135
136  do {
137    // Order operands such that they are listed from right (least complex) to
138    // left (most complex).  This puts constants before unary operators before
139    // binary operators.
140    if (I.isCommutative() && getComplexity(I.getOperand(0)) <
141        getComplexity(I.getOperand(1)))
142      Changed = !I.swapOperands();
143
144    BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
145    BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
146
147    if (I.isAssociative()) {
148      // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
149      if (Op0 && Op0->getOpcode() == Opcode) {
150        Value *A = Op0->getOperand(0);
151        Value *B = Op0->getOperand(1);
152        Value *C = I.getOperand(1);
153
154        // Does "B op C" simplify?
155        if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
156          // It simplifies to V.  Form "A op V".
157          I.setOperand(0, A);
158          I.setOperand(1, V);
159          // Conservatively clear the optional flags, since they may not be
160          // preserved by the reassociation.
161          I.clearSubclassOptionalData();
162          Changed = true;
163          ++NumReassoc;
164          continue;
165        }
166      }
167
168      // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
169      if (Op1 && Op1->getOpcode() == Opcode) {
170        Value *A = I.getOperand(0);
171        Value *B = Op1->getOperand(0);
172        Value *C = Op1->getOperand(1);
173
174        // Does "A op B" simplify?
175        if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
176          // It simplifies to V.  Form "V op C".
177          I.setOperand(0, V);
178          I.setOperand(1, C);
179          // Conservatively clear the optional flags, since they may not be
180          // preserved by the reassociation.
181          I.clearSubclassOptionalData();
182          Changed = true;
183          ++NumReassoc;
184          continue;
185        }
186      }
187    }
188
189    if (I.isAssociative() && I.isCommutative()) {
190      // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
191      if (Op0 && Op0->getOpcode() == Opcode) {
192        Value *A = Op0->getOperand(0);
193        Value *B = Op0->getOperand(1);
194        Value *C = I.getOperand(1);
195
196        // Does "C op A" simplify?
197        if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
198          // It simplifies to V.  Form "V op B".
199          I.setOperand(0, V);
200          I.setOperand(1, B);
201          // Conservatively clear the optional flags, since they may not be
202          // preserved by the reassociation.
203          I.clearSubclassOptionalData();
204          Changed = true;
205          ++NumReassoc;
206          continue;
207        }
208      }
209
210      // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
211      if (Op1 && Op1->getOpcode() == Opcode) {
212        Value *A = I.getOperand(0);
213        Value *B = Op1->getOperand(0);
214        Value *C = Op1->getOperand(1);
215
216        // Does "C op A" simplify?
217        if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
218          // It simplifies to V.  Form "B op V".
219          I.setOperand(0, B);
220          I.setOperand(1, V);
221          // Conservatively clear the optional flags, since they may not be
222          // preserved by the reassociation.
223          I.clearSubclassOptionalData();
224          Changed = true;
225          ++NumReassoc;
226          continue;
227        }
228      }
229
230      // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
231      // if C1 and C2 are constants.
232      if (Op0 && Op1 &&
233          Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
234          isa<Constant>(Op0->getOperand(1)) &&
235          isa<Constant>(Op1->getOperand(1)) &&
236          Op0->hasOneUse() && Op1->hasOneUse()) {
237        Value *A = Op0->getOperand(0);
238        Constant *C1 = cast<Constant>(Op0->getOperand(1));
239        Value *B = Op1->getOperand(0);
240        Constant *C2 = cast<Constant>(Op1->getOperand(1));
241
242        Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
243        Instruction *New = BinaryOperator::Create(Opcode, A, B);
244        InsertNewInstWith(New, I);
245        New->takeName(Op1);
246        I.setOperand(0, New);
247        I.setOperand(1, Folded);
248        // Conservatively clear the optional flags, since they may not be
249        // preserved by the reassociation.
250        I.clearSubclassOptionalData();
251        Changed = true;
252        continue;
253      }
254    }
255
256    // No further simplifications.
257    return Changed;
258  } while (1);
259}
260
261/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
262/// "(X LOp Y) ROp (X LOp Z)".
263static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
264                                     Instruction::BinaryOps ROp) {
265  switch (LOp) {
266  default:
267    return false;
268
269  case Instruction::And:
270    // And distributes over Or and Xor.
271    switch (ROp) {
272    default:
273      return false;
274    case Instruction::Or:
275    case Instruction::Xor:
276      return true;
277    }
278
279  case Instruction::Mul:
280    // Multiplication distributes over addition and subtraction.
281    switch (ROp) {
282    default:
283      return false;
284    case Instruction::Add:
285    case Instruction::Sub:
286      return true;
287    }
288
289  case Instruction::Or:
290    // Or distributes over And.
291    switch (ROp) {
292    default:
293      return false;
294    case Instruction::And:
295      return true;
296    }
297  }
298}
299
300/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
301/// "(X ROp Z) LOp (Y ROp Z)".
302static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
303                                     Instruction::BinaryOps ROp) {
304  if (Instruction::isCommutative(ROp))
305    return LeftDistributesOverRight(ROp, LOp);
306  // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
307  // but this requires knowing that the addition does not overflow and other
308  // such subtleties.
309  return false;
310}
311
312/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
313/// which some other binary operation distributes over either by factorizing
314/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
315/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
316/// a win).  Returns the simplified value, or null if it didn't simplify.
317Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
318  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
319  BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
320  BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
321  Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
322
323  // Factorization.
324  if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
325    // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
326    // a common term.
327    Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
328    Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
329    Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
330
331    // Does "X op' Y" always equal "Y op' X"?
332    bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
333
334    // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
335    if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
336      // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
337      // commutative case, "(A op' B) op (C op' A)"?
338      if (A == C || (InnerCommutative && A == D)) {
339        if (A != C)
340          std::swap(C, D);
341        // Consider forming "A op' (B op D)".
342        // If "B op D" simplifies then it can be formed with no cost.
343        Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
344        // If "B op D" doesn't simplify then only go on if both of the existing
345        // operations "A op' B" and "C op' D" will be zapped as no longer used.
346        if (!V && Op0->hasOneUse() && Op1->hasOneUse())
347          V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
348        if (V) {
349          ++NumFactor;
350          V = Builder->CreateBinOp(InnerOpcode, A, V);
351          V->takeName(&I);
352          return V;
353        }
354      }
355
356    // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
357    if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
358      // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
359      // commutative case, "(A op' B) op (B op' D)"?
360      if (B == D || (InnerCommutative && B == C)) {
361        if (B != D)
362          std::swap(C, D);
363        // Consider forming "(A op C) op' B".
364        // If "A op C" simplifies then it can be formed with no cost.
365        Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
366        // If "A op C" doesn't simplify then only go on if both of the existing
367        // operations "A op' B" and "C op' D" will be zapped as no longer used.
368        if (!V && Op0->hasOneUse() && Op1->hasOneUse())
369          V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
370        if (V) {
371          ++NumFactor;
372          V = Builder->CreateBinOp(InnerOpcode, V, B);
373          V->takeName(&I);
374          return V;
375        }
376      }
377  }
378
379  // Expansion.
380  if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
381    // The instruction has the form "(A op' B) op C".  See if expanding it out
382    // to "(A op C) op' (B op C)" results in simplifications.
383    Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
384    Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
385
386    // Do "A op C" and "B op C" both simplify?
387    if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
388      if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
389        // They do! Return "L op' R".
390        ++NumExpand;
391        // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
392        if ((L == A && R == B) ||
393            (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
394          return Op0;
395        // Otherwise return "L op' R" if it simplifies.
396        if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
397          return V;
398        // Otherwise, create a new instruction.
399        C = Builder->CreateBinOp(InnerOpcode, L, R);
400        C->takeName(&I);
401        return C;
402      }
403  }
404
405  if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
406    // The instruction has the form "A op (B op' C)".  See if expanding it out
407    // to "(A op B) op' (A op C)" results in simplifications.
408    Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
409    Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
410
411    // Do "A op B" and "A op C" both simplify?
412    if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
413      if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
414        // They do! Return "L op' R".
415        ++NumExpand;
416        // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
417        if ((L == B && R == C) ||
418            (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
419          return Op1;
420        // Otherwise return "L op' R" if it simplifies.
421        if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
422          return V;
423        // Otherwise, create a new instruction.
424        A = Builder->CreateBinOp(InnerOpcode, L, R);
425        A->takeName(&I);
426        return A;
427      }
428  }
429
430  return 0;
431}
432
433// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
434// if the LHS is a constant zero (which is the 'negate' form).
435//
436Value *InstCombiner::dyn_castNegVal(Value *V) const {
437  if (BinaryOperator::isNeg(V))
438    return BinaryOperator::getNegArgument(V);
439
440  // Constants can be considered to be negated values if they can be folded.
441  if (ConstantInt *C = dyn_cast<ConstantInt>(V))
442    return ConstantExpr::getNeg(C);
443
444  if (ConstantVector *C = dyn_cast<ConstantVector>(V))
445    if (C->getType()->getElementType()->isIntegerTy())
446      return ConstantExpr::getNeg(C);
447
448  return 0;
449}
450
451// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
452// instruction if the LHS is a constant negative zero (which is the 'negate'
453// form).
454//
455Value *InstCombiner::dyn_castFNegVal(Value *V) const {
456  if (BinaryOperator::isFNeg(V))
457    return BinaryOperator::getFNegArgument(V);
458
459  // Constants can be considered to be negated values if they can be folded.
460  if (ConstantFP *C = dyn_cast<ConstantFP>(V))
461    return ConstantExpr::getFNeg(C);
462
463  if (ConstantVector *C = dyn_cast<ConstantVector>(V))
464    if (C->getType()->getElementType()->isFloatingPointTy())
465      return ConstantExpr::getFNeg(C);
466
467  return 0;
468}
469
470static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
471                                             InstCombiner *IC) {
472  if (CastInst *CI = dyn_cast<CastInst>(&I)) {
473    return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
474  }
475
476  // Figure out if the constant is the left or the right argument.
477  bool ConstIsRHS = isa<Constant>(I.getOperand(1));
478  Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
479
480  if (Constant *SOC = dyn_cast<Constant>(SO)) {
481    if (ConstIsRHS)
482      return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
483    return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
484  }
485
486  Value *Op0 = SO, *Op1 = ConstOperand;
487  if (!ConstIsRHS)
488    std::swap(Op0, Op1);
489
490  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
491    return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
492                                    SO->getName()+".op");
493  if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
494    return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
495                                   SO->getName()+".cmp");
496  if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
497    return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
498                                   SO->getName()+".cmp");
499  llvm_unreachable("Unknown binary instruction type!");
500}
501
502// FoldOpIntoSelect - Given an instruction with a select as one operand and a
503// constant as the other operand, try to fold the binary operator into the
504// select arguments.  This also works for Cast instructions, which obviously do
505// not have a second operand.
506Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
507  // Don't modify shared select instructions
508  if (!SI->hasOneUse()) return 0;
509  Value *TV = SI->getOperand(1);
510  Value *FV = SI->getOperand(2);
511
512  if (isa<Constant>(TV) || isa<Constant>(FV)) {
513    // Bool selects with constant operands can be folded to logical ops.
514    if (SI->getType()->isIntegerTy(1)) return 0;
515
516    // If it's a bitcast involving vectors, make sure it has the same number of
517    // elements on both sides.
518    if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
519      const VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
520      const VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
521
522      // Verify that either both or neither are vectors.
523      if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
524      // If vectors, verify that they have the same number of elements.
525      if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
526        return 0;
527    }
528
529    Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
530    Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
531
532    return SelectInst::Create(SI->getCondition(),
533                              SelectTrueVal, SelectFalseVal);
534  }
535  return 0;
536}
537
538
539/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
540/// has a PHI node as operand #0, see if we can fold the instruction into the
541/// PHI (which is only possible if all operands to the PHI are constants).
542///
543Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
544  PHINode *PN = cast<PHINode>(I.getOperand(0));
545  unsigned NumPHIValues = PN->getNumIncomingValues();
546  if (NumPHIValues == 0)
547    return 0;
548
549  // We normally only transform phis with a single use.  However, if a PHI has
550  // multiple uses and they are all the same operation, we can fold *all* of the
551  // uses into the PHI.
552  if (!PN->hasOneUse()) {
553    // Walk the use list for the instruction, comparing them to I.
554    for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
555         UI != E; ++UI) {
556      Instruction *User = cast<Instruction>(*UI);
557      if (User != &I && !I.isIdenticalTo(User))
558        return 0;
559    }
560    // Otherwise, we can replace *all* users with the new PHI we form.
561  }
562
563  // Check to see if all of the operands of the PHI are simple constants
564  // (constantint/constantfp/undef).  If there is one non-constant value,
565  // remember the BB it is in.  If there is more than one or if *it* is a PHI,
566  // bail out.  We don't do arbitrary constant expressions here because moving
567  // their computation can be expensive without a cost model.
568  BasicBlock *NonConstBB = 0;
569  for (unsigned i = 0; i != NumPHIValues; ++i) {
570    Value *InVal = PN->getIncomingValue(i);
571    if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
572      continue;
573
574    if (isa<PHINode>(InVal)) return 0;  // Itself a phi.
575    if (NonConstBB) return 0;  // More than one non-const value.
576
577    NonConstBB = PN->getIncomingBlock(i);
578
579    // If the InVal is an invoke at the end of the pred block, then we can't
580    // insert a computation after it without breaking the edge.
581    if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
582      if (II->getParent() == NonConstBB)
583        return 0;
584
585    // If the incoming non-constant value is in I's block, we will remove one
586    // instruction, but insert another equivalent one, leading to infinite
587    // instcombine.
588    if (NonConstBB == I.getParent())
589      return 0;
590  }
591
592  // If there is exactly one non-constant value, we can insert a copy of the
593  // operation in that block.  However, if this is a critical edge, we would be
594  // inserting the computation one some other paths (e.g. inside a loop).  Only
595  // do this if the pred block is unconditionally branching into the phi block.
596  if (NonConstBB != 0) {
597    BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
598    if (!BI || !BI->isUnconditional()) return 0;
599  }
600
601  // Okay, we can do the transformation: create the new PHI node.
602  PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
603  InsertNewInstBefore(NewPN, *PN);
604  NewPN->takeName(PN);
605
606  // If we are going to have to insert a new computation, do so right before the
607  // predecessors terminator.
608  if (NonConstBB)
609    Builder->SetInsertPoint(NonConstBB->getTerminator());
610
611  // Next, add all of the operands to the PHI.
612  if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
613    // We only currently try to fold the condition of a select when it is a phi,
614    // not the true/false values.
615    Value *TrueV = SI->getTrueValue();
616    Value *FalseV = SI->getFalseValue();
617    BasicBlock *PhiTransBB = PN->getParent();
618    for (unsigned i = 0; i != NumPHIValues; ++i) {
619      BasicBlock *ThisBB = PN->getIncomingBlock(i);
620      Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
621      Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
622      Value *InV = 0;
623      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
624        InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
625      else
626        InV = Builder->CreateSelect(PN->getIncomingValue(i),
627                                    TrueVInPred, FalseVInPred, "phitmp");
628      NewPN->addIncoming(InV, ThisBB);
629    }
630  } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
631    Constant *C = cast<Constant>(I.getOperand(1));
632    for (unsigned i = 0; i != NumPHIValues; ++i) {
633      Value *InV = 0;
634      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
635        InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
636      else if (isa<ICmpInst>(CI))
637        InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
638                                  C, "phitmp");
639      else
640        InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
641                                  C, "phitmp");
642      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
643    }
644  } else if (I.getNumOperands() == 2) {
645    Constant *C = cast<Constant>(I.getOperand(1));
646    for (unsigned i = 0; i != NumPHIValues; ++i) {
647      Value *InV = 0;
648      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
649        InV = ConstantExpr::get(I.getOpcode(), InC, C);
650      else
651        InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
652                                   PN->getIncomingValue(i), C, "phitmp");
653      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
654    }
655  } else {
656    CastInst *CI = cast<CastInst>(&I);
657    const Type *RetTy = CI->getType();
658    for (unsigned i = 0; i != NumPHIValues; ++i) {
659      Value *InV;
660      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
661        InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
662      else
663        InV = Builder->CreateCast(CI->getOpcode(),
664                                PN->getIncomingValue(i), I.getType(), "phitmp");
665      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
666    }
667  }
668
669  for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
670       UI != E; ) {
671    Instruction *User = cast<Instruction>(*UI++);
672    if (User == &I) continue;
673    ReplaceInstUsesWith(*User, NewPN);
674    EraseInstFromFunction(*User);
675  }
676  return ReplaceInstUsesWith(I, NewPN);
677}
678
679/// FindElementAtOffset - Given a type and a constant offset, determine whether
680/// or not there is a sequence of GEP indices into the type that will land us at
681/// the specified offset.  If so, fill them into NewIndices and return the
682/// resultant element type, otherwise return null.
683const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
684                                          SmallVectorImpl<Value*> &NewIndices) {
685  if (!TD) return 0;
686  if (!Ty->isSized()) return 0;
687
688  // Start with the index over the outer type.  Note that the type size
689  // might be zero (even if the offset isn't zero) if the indexed type
690  // is something like [0 x {int, int}]
691  const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
692  int64_t FirstIdx = 0;
693  if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
694    FirstIdx = Offset/TySize;
695    Offset -= FirstIdx*TySize;
696
697    // Handle hosts where % returns negative instead of values [0..TySize).
698    if (Offset < 0) {
699      --FirstIdx;
700      Offset += TySize;
701      assert(Offset >= 0);
702    }
703    assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
704  }
705
706  NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
707
708  // Index into the types.  If we fail, set OrigBase to null.
709  while (Offset) {
710    // Indexing into tail padding between struct/array elements.
711    if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
712      return 0;
713
714    if (const StructType *STy = dyn_cast<StructType>(Ty)) {
715      const StructLayout *SL = TD->getStructLayout(STy);
716      assert(Offset < (int64_t)SL->getSizeInBytes() &&
717             "Offset must stay within the indexed type");
718
719      unsigned Elt = SL->getElementContainingOffset(Offset);
720      NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
721                                            Elt));
722
723      Offset -= SL->getElementOffset(Elt);
724      Ty = STy->getElementType(Elt);
725    } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
726      uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
727      assert(EltSize && "Cannot index into a zero-sized array");
728      NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
729      Offset %= EltSize;
730      Ty = AT->getElementType();
731    } else {
732      // Otherwise, we can't index into the middle of this atomic type, bail.
733      return 0;
734    }
735  }
736
737  return Ty;
738}
739
740
741
742Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
743  SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
744
745  if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD))
746    return ReplaceInstUsesWith(GEP, V);
747
748  Value *PtrOp = GEP.getOperand(0);
749
750  // Eliminate unneeded casts for indices, and replace indices which displace
751  // by multiples of a zero size type with zero.
752  if (TD) {
753    bool MadeChange = false;
754    const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
755
756    gep_type_iterator GTI = gep_type_begin(GEP);
757    for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
758         I != E; ++I, ++GTI) {
759      // Skip indices into struct types.
760      const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
761      if (!SeqTy) continue;
762
763      // If the element type has zero size then any index over it is equivalent
764      // to an index of zero, so replace it with zero if it is not zero already.
765      if (SeqTy->getElementType()->isSized() &&
766          TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
767        if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
768          *I = Constant::getNullValue(IntPtrTy);
769          MadeChange = true;
770        }
771
772      if ((*I)->getType() != IntPtrTy) {
773        // If we are using a wider index than needed for this platform, shrink
774        // it to what we need.  If narrower, sign-extend it to what we need.
775        // This explicit cast can make subsequent optimizations more obvious.
776        *I = Builder->CreateIntCast(*I, IntPtrTy, true);
777        MadeChange = true;
778      }
779    }
780    if (MadeChange) return &GEP;
781  }
782
783  // Combine Indices - If the source pointer to this getelementptr instruction
784  // is a getelementptr instruction, combine the indices of the two
785  // getelementptr instructions into a single instruction.
786  //
787  if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
788    // Note that if our source is a gep chain itself that we wait for that
789    // chain to be resolved before we perform this transformation.  This
790    // avoids us creating a TON of code in some cases.
791    //
792    if (GetElementPtrInst *SrcGEP =
793          dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
794      if (SrcGEP->getNumOperands() == 2)
795        return 0;   // Wait until our source is folded to completion.
796
797    SmallVector<Value*, 8> Indices;
798
799    // Find out whether the last index in the source GEP is a sequential idx.
800    bool EndsWithSequential = false;
801    for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
802         I != E; ++I)
803      EndsWithSequential = !(*I)->isStructTy();
804
805    // Can we combine the two pointer arithmetics offsets?
806    if (EndsWithSequential) {
807      // Replace: gep (gep %P, long B), long A, ...
808      // With:    T = long A+B; gep %P, T, ...
809      //
810      Value *Sum;
811      Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
812      Value *GO1 = GEP.getOperand(1);
813      if (SO1 == Constant::getNullValue(SO1->getType())) {
814        Sum = GO1;
815      } else if (GO1 == Constant::getNullValue(GO1->getType())) {
816        Sum = SO1;
817      } else {
818        // If they aren't the same type, then the input hasn't been processed
819        // by the loop above yet (which canonicalizes sequential index types to
820        // intptr_t).  Just avoid transforming this until the input has been
821        // normalized.
822        if (SO1->getType() != GO1->getType())
823          return 0;
824        Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
825      }
826
827      // Update the GEP in place if possible.
828      if (Src->getNumOperands() == 2) {
829        GEP.setOperand(0, Src->getOperand(0));
830        GEP.setOperand(1, Sum);
831        return &GEP;
832      }
833      Indices.append(Src->op_begin()+1, Src->op_end()-1);
834      Indices.push_back(Sum);
835      Indices.append(GEP.op_begin()+2, GEP.op_end());
836    } else if (isa<Constant>(*GEP.idx_begin()) &&
837               cast<Constant>(*GEP.idx_begin())->isNullValue() &&
838               Src->getNumOperands() != 1) {
839      // Otherwise we can do the fold if the first index of the GEP is a zero
840      Indices.append(Src->op_begin()+1, Src->op_end());
841      Indices.append(GEP.idx_begin()+1, GEP.idx_end());
842    }
843
844    if (!Indices.empty())
845      return (GEP.isInBounds() && Src->isInBounds()) ?
846        GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
847                                          Indices.end(), GEP.getName()) :
848        GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
849                                  Indices.end(), GEP.getName());
850  }
851
852  // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
853  Value *StrippedPtr = PtrOp->stripPointerCasts();
854  const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
855  if (StrippedPtr != PtrOp &&
856    StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
857
858    bool HasZeroPointerIndex = false;
859    if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
860      HasZeroPointerIndex = C->isZero();
861
862    // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
863    // into     : GEP [10 x i8]* X, i32 0, ...
864    //
865    // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
866    //           into     : GEP i8* X, ...
867    //
868    // This occurs when the program declares an array extern like "int X[];"
869    if (HasZeroPointerIndex) {
870      const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
871      if (const ArrayType *CATy =
872          dyn_cast<ArrayType>(CPTy->getElementType())) {
873        // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
874        if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
875          // -> GEP i8* X, ...
876          SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
877          GetElementPtrInst *Res =
878            GetElementPtrInst::Create(StrippedPtr, Idx.begin(),
879                                      Idx.end(), GEP.getName());
880          Res->setIsInBounds(GEP.isInBounds());
881          return Res;
882        }
883
884        if (const ArrayType *XATy =
885              dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
886          // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
887          if (CATy->getElementType() == XATy->getElementType()) {
888            // -> GEP [10 x i8]* X, i32 0, ...
889            // At this point, we know that the cast source type is a pointer
890            // to an array of the same type as the destination pointer
891            // array.  Because the array type is never stepped over (there
892            // is a leading zero) we can fold the cast into this GEP.
893            GEP.setOperand(0, StrippedPtr);
894            return &GEP;
895          }
896        }
897      }
898    } else if (GEP.getNumOperands() == 2) {
899      // Transform things like:
900      // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
901      // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
902      const Type *SrcElTy = StrippedPtrTy->getElementType();
903      const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
904      if (TD && SrcElTy->isArrayTy() &&
905          TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
906          TD->getTypeAllocSize(ResElTy)) {
907        Value *Idx[2];
908        Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
909        Idx[1] = GEP.getOperand(1);
910        Value *NewGEP = GEP.isInBounds() ?
911          Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()) :
912          Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName());
913        // V and GEP are both pointer types --> BitCast
914        return new BitCastInst(NewGEP, GEP.getType());
915      }
916
917      // Transform things like:
918      // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
919      //   (where tmp = 8*tmp2) into:
920      // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
921
922      if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
923        uint64_t ArrayEltSize =
924            TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
925
926        // Check to see if "tmp" is a scale by a multiple of ArrayEltSize.  We
927        // allow either a mul, shift, or constant here.
928        Value *NewIdx = 0;
929        ConstantInt *Scale = 0;
930        if (ArrayEltSize == 1) {
931          NewIdx = GEP.getOperand(1);
932          Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
933        } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
934          NewIdx = ConstantInt::get(CI->getType(), 1);
935          Scale = CI;
936        } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
937          if (Inst->getOpcode() == Instruction::Shl &&
938              isa<ConstantInt>(Inst->getOperand(1))) {
939            ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
940            uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
941            Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
942                                     1ULL << ShAmtVal);
943            NewIdx = Inst->getOperand(0);
944          } else if (Inst->getOpcode() == Instruction::Mul &&
945                     isa<ConstantInt>(Inst->getOperand(1))) {
946            Scale = cast<ConstantInt>(Inst->getOperand(1));
947            NewIdx = Inst->getOperand(0);
948          }
949        }
950
951        // If the index will be to exactly the right offset with the scale taken
952        // out, perform the transformation. Note, we don't know whether Scale is
953        // signed or not. We'll use unsigned version of division/modulo
954        // operation after making sure Scale doesn't have the sign bit set.
955        if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
956            Scale->getZExtValue() % ArrayEltSize == 0) {
957          Scale = ConstantInt::get(Scale->getType(),
958                                   Scale->getZExtValue() / ArrayEltSize);
959          if (Scale->getZExtValue() != 1) {
960            Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
961                                                       false /*ZExt*/);
962            NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
963          }
964
965          // Insert the new GEP instruction.
966          Value *Idx[2];
967          Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
968          Idx[1] = NewIdx;
969          Value *NewGEP = GEP.isInBounds() ?
970            Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2,GEP.getName()):
971            Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName());
972          // The NewGEP must be pointer typed, so must the old one -> BitCast
973          return new BitCastInst(NewGEP, GEP.getType());
974        }
975      }
976    }
977  }
978
979  /// See if we can simplify:
980  ///   X = bitcast A* to B*
981  ///   Y = gep X, <...constant indices...>
982  /// into a gep of the original struct.  This is important for SROA and alias
983  /// analysis of unions.  If "A" is also a bitcast, wait for A/X to be merged.
984  if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
985    if (TD &&
986        !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() &&
987        StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
988
989      // Determine how much the GEP moves the pointer.  We are guaranteed to get
990      // a constant back from EmitGEPOffset.
991      ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
992      int64_t Offset = OffsetV->getSExtValue();
993
994      // If this GEP instruction doesn't move the pointer, just replace the GEP
995      // with a bitcast of the real input to the dest type.
996      if (Offset == 0) {
997        // If the bitcast is of an allocation, and the allocation will be
998        // converted to match the type of the cast, don't touch this.
999        if (isa<AllocaInst>(BCI->getOperand(0)) ||
1000            isMalloc(BCI->getOperand(0))) {
1001          // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1002          if (Instruction *I = visitBitCast(*BCI)) {
1003            if (I != BCI) {
1004              I->takeName(BCI);
1005              BCI->getParent()->getInstList().insert(BCI, I);
1006              ReplaceInstUsesWith(*BCI, I);
1007            }
1008            return &GEP;
1009          }
1010        }
1011        return new BitCastInst(BCI->getOperand(0), GEP.getType());
1012      }
1013
1014      // Otherwise, if the offset is non-zero, we need to find out if there is a
1015      // field at Offset in 'A's type.  If so, we can pull the cast through the
1016      // GEP.
1017      SmallVector<Value*, 8> NewIndices;
1018      const Type *InTy =
1019        cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
1020      if (FindElementAtOffset(InTy, Offset, NewIndices)) {
1021        Value *NGEP = GEP.isInBounds() ?
1022          Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
1023                                     NewIndices.end()) :
1024          Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
1025                             NewIndices.end());
1026
1027        if (NGEP->getType() == GEP.getType())
1028          return ReplaceInstUsesWith(GEP, NGEP);
1029        NGEP->takeName(&GEP);
1030        return new BitCastInst(NGEP, GEP.getType());
1031      }
1032    }
1033  }
1034
1035  return 0;
1036}
1037
1038
1039
1040static bool IsOnlyNullComparedAndFreed(const Value &V) {
1041  for (Value::const_use_iterator UI = V.use_begin(), UE = V.use_end();
1042       UI != UE; ++UI) {
1043    const User *U = *UI;
1044    if (isFreeCall(U))
1045      continue;
1046    if (const ICmpInst *ICI = dyn_cast<ICmpInst>(U))
1047      if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1)))
1048        continue;
1049    return false;
1050  }
1051  return true;
1052}
1053
1054Instruction *InstCombiner::visitMalloc(Instruction &MI) {
1055  // If we have a malloc call which is only used in any amount of comparisons
1056  // to null and free calls, delete the calls and replace the comparisons with
1057  // true or false as appropriate.
1058  if (IsOnlyNullComparedAndFreed(MI)) {
1059    for (Value::use_iterator UI = MI.use_begin(), UE = MI.use_end();
1060         UI != UE;) {
1061      // We can assume that every remaining use is a free call or an icmp eq/ne
1062      // to null, so the cast is safe.
1063      Instruction *I = cast<Instruction>(*UI);
1064
1065      // Early increment here, as we're about to get rid of the user.
1066      ++UI;
1067
1068      if (isFreeCall(I)) {
1069        EraseInstFromFunction(*cast<CallInst>(I));
1070        continue;
1071      }
1072      // Again, the cast is safe.
1073      ICmpInst *C = cast<ICmpInst>(I);
1074      ReplaceInstUsesWith(*C, ConstantInt::get(Type::getInt1Ty(C->getContext()),
1075                                               C->isFalseWhenEqual()));
1076      EraseInstFromFunction(*C);
1077    }
1078    return EraseInstFromFunction(MI);
1079  }
1080  return 0;
1081}
1082
1083
1084
1085Instruction *InstCombiner::visitFree(CallInst &FI) {
1086  Value *Op = FI.getArgOperand(0);
1087
1088  // free undef -> unreachable.
1089  if (isa<UndefValue>(Op)) {
1090    // Insert a new store to null because we cannot modify the CFG here.
1091    Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
1092                         UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
1093    return EraseInstFromFunction(FI);
1094  }
1095
1096  // If we have 'free null' delete the instruction.  This can happen in stl code
1097  // when lots of inlining happens.
1098  if (isa<ConstantPointerNull>(Op))
1099    return EraseInstFromFunction(FI);
1100
1101  return 0;
1102}
1103
1104
1105
1106Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
1107  // Change br (not X), label True, label False to: br X, label False, True
1108  Value *X = 0;
1109  BasicBlock *TrueDest;
1110  BasicBlock *FalseDest;
1111  if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
1112      !isa<Constant>(X)) {
1113    // Swap Destinations and condition...
1114    BI.setCondition(X);
1115    BI.setSuccessor(0, FalseDest);
1116    BI.setSuccessor(1, TrueDest);
1117    return &BI;
1118  }
1119
1120  // Cannonicalize fcmp_one -> fcmp_oeq
1121  FCmpInst::Predicate FPred; Value *Y;
1122  if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
1123                             TrueDest, FalseDest)) &&
1124      BI.getCondition()->hasOneUse())
1125    if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
1126        FPred == FCmpInst::FCMP_OGE) {
1127      FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
1128      Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
1129
1130      // Swap Destinations and condition.
1131      BI.setSuccessor(0, FalseDest);
1132      BI.setSuccessor(1, TrueDest);
1133      Worklist.Add(Cond);
1134      return &BI;
1135    }
1136
1137  // Cannonicalize icmp_ne -> icmp_eq
1138  ICmpInst::Predicate IPred;
1139  if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
1140                      TrueDest, FalseDest)) &&
1141      BI.getCondition()->hasOneUse())
1142    if (IPred == ICmpInst::ICMP_NE  || IPred == ICmpInst::ICMP_ULE ||
1143        IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
1144        IPred == ICmpInst::ICMP_SGE) {
1145      ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
1146      Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
1147      // Swap Destinations and condition.
1148      BI.setSuccessor(0, FalseDest);
1149      BI.setSuccessor(1, TrueDest);
1150      Worklist.Add(Cond);
1151      return &BI;
1152    }
1153
1154  return 0;
1155}
1156
1157Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
1158  Value *Cond = SI.getCondition();
1159  if (Instruction *I = dyn_cast<Instruction>(Cond)) {
1160    if (I->getOpcode() == Instruction::Add)
1161      if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1162        // change 'switch (X+4) case 1:' into 'switch (X) case -3'
1163        for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
1164          SI.setOperand(i,
1165                   ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
1166                                                AddRHS));
1167        SI.setOperand(0, I->getOperand(0));
1168        Worklist.Add(I);
1169        return &SI;
1170      }
1171  }
1172  return 0;
1173}
1174
1175Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
1176  Value *Agg = EV.getAggregateOperand();
1177
1178  if (!EV.hasIndices())
1179    return ReplaceInstUsesWith(EV, Agg);
1180
1181  if (Constant *C = dyn_cast<Constant>(Agg)) {
1182    if (isa<UndefValue>(C))
1183      return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
1184
1185    if (isa<ConstantAggregateZero>(C))
1186      return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
1187
1188    if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
1189      // Extract the element indexed by the first index out of the constant
1190      Value *V = C->getOperand(*EV.idx_begin());
1191      if (EV.getNumIndices() > 1)
1192        // Extract the remaining indices out of the constant indexed by the
1193        // first index
1194        return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
1195      else
1196        return ReplaceInstUsesWith(EV, V);
1197    }
1198    return 0; // Can't handle other constants
1199  }
1200  if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
1201    // We're extracting from an insertvalue instruction, compare the indices
1202    const unsigned *exti, *exte, *insi, *inse;
1203    for (exti = EV.idx_begin(), insi = IV->idx_begin(),
1204         exte = EV.idx_end(), inse = IV->idx_end();
1205         exti != exte && insi != inse;
1206         ++exti, ++insi) {
1207      if (*insi != *exti)
1208        // The insert and extract both reference distinctly different elements.
1209        // This means the extract is not influenced by the insert, and we can
1210        // replace the aggregate operand of the extract with the aggregate
1211        // operand of the insert. i.e., replace
1212        // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1213        // %E = extractvalue { i32, { i32 } } %I, 0
1214        // with
1215        // %E = extractvalue { i32, { i32 } } %A, 0
1216        return ExtractValueInst::Create(IV->getAggregateOperand(),
1217                                        EV.idx_begin(), EV.idx_end());
1218    }
1219    if (exti == exte && insi == inse)
1220      // Both iterators are at the end: Index lists are identical. Replace
1221      // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1222      // %C = extractvalue { i32, { i32 } } %B, 1, 0
1223      // with "i32 42"
1224      return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
1225    if (exti == exte) {
1226      // The extract list is a prefix of the insert list. i.e. replace
1227      // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1228      // %E = extractvalue { i32, { i32 } } %I, 1
1229      // with
1230      // %X = extractvalue { i32, { i32 } } %A, 1
1231      // %E = insertvalue { i32 } %X, i32 42, 0
1232      // by switching the order of the insert and extract (though the
1233      // insertvalue should be left in, since it may have other uses).
1234      Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
1235                                                 EV.idx_begin(), EV.idx_end());
1236      return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
1237                                     insi, inse);
1238    }
1239    if (insi == inse)
1240      // The insert list is a prefix of the extract list
1241      // We can simply remove the common indices from the extract and make it
1242      // operate on the inserted value instead of the insertvalue result.
1243      // i.e., replace
1244      // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1245      // %E = extractvalue { i32, { i32 } } %I, 1, 0
1246      // with
1247      // %E extractvalue { i32 } { i32 42 }, 0
1248      return ExtractValueInst::Create(IV->getInsertedValueOperand(),
1249                                      exti, exte);
1250  }
1251  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
1252    // We're extracting from an intrinsic, see if we're the only user, which
1253    // allows us to simplify multiple result intrinsics to simpler things that
1254    // just get one value.
1255    if (II->hasOneUse()) {
1256      // Check if we're grabbing the overflow bit or the result of a 'with
1257      // overflow' intrinsic.  If it's the latter we can remove the intrinsic
1258      // and replace it with a traditional binary instruction.
1259      switch (II->getIntrinsicID()) {
1260      case Intrinsic::uadd_with_overflow:
1261      case Intrinsic::sadd_with_overflow:
1262        if (*EV.idx_begin() == 0) {  // Normal result.
1263          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1264          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1265          EraseInstFromFunction(*II);
1266          return BinaryOperator::CreateAdd(LHS, RHS);
1267        }
1268
1269        // If the normal result of the add is dead, and the RHS is a constant,
1270        // we can transform this into a range comparison.
1271        // overflow = uadd a, -4  -->  overflow = icmp ugt a, 3
1272        if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
1273          if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
1274            return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
1275                                ConstantExpr::getNot(CI));
1276        break;
1277      case Intrinsic::usub_with_overflow:
1278      case Intrinsic::ssub_with_overflow:
1279        if (*EV.idx_begin() == 0) {  // Normal result.
1280          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1281          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1282          EraseInstFromFunction(*II);
1283          return BinaryOperator::CreateSub(LHS, RHS);
1284        }
1285        break;
1286      case Intrinsic::umul_with_overflow:
1287      case Intrinsic::smul_with_overflow:
1288        if (*EV.idx_begin() == 0) {  // Normal result.
1289          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1290          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1291          EraseInstFromFunction(*II);
1292          return BinaryOperator::CreateMul(LHS, RHS);
1293        }
1294        break;
1295      default:
1296        break;
1297      }
1298    }
1299  }
1300  if (LoadInst *L = dyn_cast<LoadInst>(Agg))
1301    // If the (non-volatile) load only has one use, we can rewrite this to a
1302    // load from a GEP. This reduces the size of the load.
1303    // FIXME: If a load is used only by extractvalue instructions then this
1304    //        could be done regardless of having multiple uses.
1305    if (!L->isVolatile() && L->hasOneUse()) {
1306      // extractvalue has integer indices, getelementptr has Value*s. Convert.
1307      SmallVector<Value*, 4> Indices;
1308      // Prefix an i32 0 since we need the first element.
1309      Indices.push_back(Builder->getInt32(0));
1310      for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
1311            I != E; ++I)
1312        Indices.push_back(Builder->getInt32(*I));
1313
1314      // We need to insert these at the location of the old load, not at that of
1315      // the extractvalue.
1316      Builder->SetInsertPoint(L->getParent(), L);
1317      Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(),
1318                                              Indices.begin(), Indices.end());
1319      // Returning the load directly will cause the main loop to insert it in
1320      // the wrong spot, so use ReplaceInstUsesWith().
1321      return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
1322    }
1323  // We could simplify extracts from other values. Note that nested extracts may
1324  // already be simplified implicitly by the above: extract (extract (insert) )
1325  // will be translated into extract ( insert ( extract ) ) first and then just
1326  // the value inserted, if appropriate. Similarly for extracts from single-use
1327  // loads: extract (extract (load)) will be translated to extract (load (gep))
1328  // and if again single-use then via load (gep (gep)) to load (gep).
1329  // However, double extracts from e.g. function arguments or return values
1330  // aren't handled yet.
1331  return 0;
1332}
1333
1334
1335
1336
1337/// TryToSinkInstruction - Try to move the specified instruction from its
1338/// current block into the beginning of DestBlock, which can only happen if it's
1339/// safe to move the instruction past all of the instructions between it and the
1340/// end of its block.
1341static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
1342  assert(I->hasOneUse() && "Invariants didn't hold!");
1343
1344  // Cannot move control-flow-involving, volatile loads, vaarg, etc.
1345  if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
1346    return false;
1347
1348  // Do not sink alloca instructions out of the entry block.
1349  if (isa<AllocaInst>(I) && I->getParent() ==
1350        &DestBlock->getParent()->getEntryBlock())
1351    return false;
1352
1353  // We can only sink load instructions if there is nothing between the load and
1354  // the end of block that could change the value.
1355  if (I->mayReadFromMemory()) {
1356    for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
1357         Scan != E; ++Scan)
1358      if (Scan->mayWriteToMemory())
1359        return false;
1360  }
1361
1362  BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
1363
1364  I->moveBefore(InsertPos);
1365  ++NumSunkInst;
1366  return true;
1367}
1368
1369
1370/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
1371/// all reachable code to the worklist.
1372///
1373/// This has a couple of tricks to make the code faster and more powerful.  In
1374/// particular, we constant fold and DCE instructions as we go, to avoid adding
1375/// them to the worklist (this significantly speeds up instcombine on code where
1376/// many instructions are dead or constant).  Additionally, if we find a branch
1377/// whose condition is a known constant, we only visit the reachable successors.
1378///
1379static bool AddReachableCodeToWorklist(BasicBlock *BB,
1380                                       SmallPtrSet<BasicBlock*, 64> &Visited,
1381                                       InstCombiner &IC,
1382                                       const TargetData *TD) {
1383  bool MadeIRChange = false;
1384  SmallVector<BasicBlock*, 256> Worklist;
1385  Worklist.push_back(BB);
1386
1387  SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
1388  DenseMap<ConstantExpr*, Constant*> FoldedConstants;
1389
1390  do {
1391    BB = Worklist.pop_back_val();
1392
1393    // We have now visited this block!  If we've already been here, ignore it.
1394    if (!Visited.insert(BB)) continue;
1395
1396    for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
1397      Instruction *Inst = BBI++;
1398
1399      // DCE instruction if trivially dead.
1400      if (isInstructionTriviallyDead(Inst)) {
1401        ++NumDeadInst;
1402        DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
1403        Inst->eraseFromParent();
1404        continue;
1405      }
1406
1407      // ConstantProp instruction if trivially constant.
1408      if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
1409        if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
1410          DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
1411                       << *Inst << '\n');
1412          Inst->replaceAllUsesWith(C);
1413          ++NumConstProp;
1414          Inst->eraseFromParent();
1415          continue;
1416        }
1417
1418      if (TD) {
1419        // See if we can constant fold its operands.
1420        for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
1421             i != e; ++i) {
1422          ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
1423          if (CE == 0) continue;
1424
1425          Constant*& FoldRes = FoldedConstants[CE];
1426          if (!FoldRes)
1427            FoldRes = ConstantFoldConstantExpression(CE, TD);
1428          if (!FoldRes)
1429            FoldRes = CE;
1430
1431          if (FoldRes != CE) {
1432            *i = FoldRes;
1433            MadeIRChange = true;
1434          }
1435        }
1436      }
1437
1438      InstrsForInstCombineWorklist.push_back(Inst);
1439    }
1440
1441    // Recursively visit successors.  If this is a branch or switch on a
1442    // constant, only visit the reachable successor.
1443    TerminatorInst *TI = BB->getTerminator();
1444    if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1445      if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
1446        bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
1447        BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
1448        Worklist.push_back(ReachableBB);
1449        continue;
1450      }
1451    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1452      if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
1453        // See if this is an explicit destination.
1454        for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
1455          if (SI->getCaseValue(i) == Cond) {
1456            BasicBlock *ReachableBB = SI->getSuccessor(i);
1457            Worklist.push_back(ReachableBB);
1458            continue;
1459          }
1460
1461        // Otherwise it is the default destination.
1462        Worklist.push_back(SI->getSuccessor(0));
1463        continue;
1464      }
1465    }
1466
1467    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
1468      Worklist.push_back(TI->getSuccessor(i));
1469  } while (!Worklist.empty());
1470
1471  // Once we've found all of the instructions to add to instcombine's worklist,
1472  // add them in reverse order.  This way instcombine will visit from the top
1473  // of the function down.  This jives well with the way that it adds all uses
1474  // of instructions to the worklist after doing a transformation, thus avoiding
1475  // some N^2 behavior in pathological cases.
1476  IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
1477                              InstrsForInstCombineWorklist.size());
1478
1479  return MadeIRChange;
1480}
1481
1482bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
1483  MadeIRChange = false;
1484
1485  DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
1486        << F.getNameStr() << "\n");
1487
1488  {
1489    // Do a depth-first traversal of the function, populate the worklist with
1490    // the reachable instructions.  Ignore blocks that are not reachable.  Keep
1491    // track of which blocks we visit.
1492    SmallPtrSet<BasicBlock*, 64> Visited;
1493    MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
1494
1495    // Do a quick scan over the function.  If we find any blocks that are
1496    // unreachable, remove any instructions inside of them.  This prevents
1497    // the instcombine code from having to deal with some bad special cases.
1498    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1499      if (!Visited.count(BB)) {
1500        Instruction *Term = BB->getTerminator();
1501        while (Term != BB->begin()) {   // Remove instrs bottom-up
1502          BasicBlock::iterator I = Term; --I;
1503
1504          DEBUG(errs() << "IC: DCE: " << *I << '\n');
1505          // A debug intrinsic shouldn't force another iteration if we weren't
1506          // going to do one without it.
1507          if (!isa<DbgInfoIntrinsic>(I)) {
1508            ++NumDeadInst;
1509            MadeIRChange = true;
1510          }
1511
1512          // If I is not void type then replaceAllUsesWith undef.
1513          // This allows ValueHandlers and custom metadata to adjust itself.
1514          if (!I->getType()->isVoidTy())
1515            I->replaceAllUsesWith(UndefValue::get(I->getType()));
1516          I->eraseFromParent();
1517        }
1518      }
1519  }
1520
1521  while (!Worklist.isEmpty()) {
1522    Instruction *I = Worklist.RemoveOne();
1523    if (I == 0) continue;  // skip null values.
1524
1525    // Check to see if we can DCE the instruction.
1526    if (isInstructionTriviallyDead(I)) {
1527      DEBUG(errs() << "IC: DCE: " << *I << '\n');
1528      EraseInstFromFunction(*I);
1529      ++NumDeadInst;
1530      MadeIRChange = true;
1531      continue;
1532    }
1533
1534    // Instruction isn't dead, see if we can constant propagate it.
1535    if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
1536      if (Constant *C = ConstantFoldInstruction(I, TD)) {
1537        DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
1538
1539        // Add operands to the worklist.
1540        ReplaceInstUsesWith(*I, C);
1541        ++NumConstProp;
1542        EraseInstFromFunction(*I);
1543        MadeIRChange = true;
1544        continue;
1545      }
1546
1547    // See if we can trivially sink this instruction to a successor basic block.
1548    if (I->hasOneUse()) {
1549      BasicBlock *BB = I->getParent();
1550      Instruction *UserInst = cast<Instruction>(I->use_back());
1551      BasicBlock *UserParent;
1552
1553      // Get the block the use occurs in.
1554      if (PHINode *PN = dyn_cast<PHINode>(UserInst))
1555        UserParent = PN->getIncomingBlock(I->use_begin().getUse());
1556      else
1557        UserParent = UserInst->getParent();
1558
1559      if (UserParent != BB) {
1560        bool UserIsSuccessor = false;
1561        // See if the user is one of our successors.
1562        for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
1563          if (*SI == UserParent) {
1564            UserIsSuccessor = true;
1565            break;
1566          }
1567
1568        // If the user is one of our immediate successors, and if that successor
1569        // only has us as a predecessors (we'd have to split the critical edge
1570        // otherwise), we can keep going.
1571        if (UserIsSuccessor && UserParent->getSinglePredecessor())
1572          // Okay, the CFG is simple enough, try to sink this instruction.
1573          MadeIRChange |= TryToSinkInstruction(I, UserParent);
1574      }
1575    }
1576
1577    // Now that we have an instruction, try combining it to simplify it.
1578    Builder->SetInsertPoint(I->getParent(), I);
1579    Builder->SetCurrentDebugLocation(I->getDebugLoc());
1580
1581#ifndef NDEBUG
1582    std::string OrigI;
1583#endif
1584    DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
1585    DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
1586
1587    if (Instruction *Result = visit(*I)) {
1588      ++NumCombined;
1589      // Should we replace the old instruction with a new one?
1590      if (Result != I) {
1591        DEBUG(errs() << "IC: Old = " << *I << '\n'
1592                     << "    New = " << *Result << '\n');
1593
1594        if (!I->getDebugLoc().isUnknown())
1595          Result->setDebugLoc(I->getDebugLoc());
1596        // Everything uses the new instruction now.
1597        I->replaceAllUsesWith(Result);
1598
1599        // Push the new instruction and any users onto the worklist.
1600        Worklist.Add(Result);
1601        Worklist.AddUsersToWorkList(*Result);
1602
1603        // Move the name to the new instruction first.
1604        Result->takeName(I);
1605
1606        // Insert the new instruction into the basic block...
1607        BasicBlock *InstParent = I->getParent();
1608        BasicBlock::iterator InsertPos = I;
1609
1610        if (!isa<PHINode>(Result))        // If combining a PHI, don't insert
1611          while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
1612            ++InsertPos;
1613
1614        InstParent->getInstList().insert(InsertPos, Result);
1615
1616        EraseInstFromFunction(*I);
1617      } else {
1618#ifndef NDEBUG
1619        DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
1620                     << "    New = " << *I << '\n');
1621#endif
1622
1623        // If the instruction was modified, it's possible that it is now dead.
1624        // if so, remove it.
1625        if (isInstructionTriviallyDead(I)) {
1626          EraseInstFromFunction(*I);
1627        } else {
1628          Worklist.Add(I);
1629          Worklist.AddUsersToWorkList(*I);
1630        }
1631      }
1632      MadeIRChange = true;
1633    }
1634  }
1635
1636  Worklist.Zap();
1637  return MadeIRChange;
1638}
1639
1640
1641bool InstCombiner::runOnFunction(Function &F) {
1642  TD = getAnalysisIfAvailable<TargetData>();
1643
1644
1645  /// Builder - This is an IRBuilder that automatically inserts new
1646  /// instructions into the worklist when they are created.
1647  IRBuilder<true, TargetFolder, InstCombineIRInserter>
1648    TheBuilder(F.getContext(), TargetFolder(TD),
1649               InstCombineIRInserter(Worklist));
1650  Builder = &TheBuilder;
1651
1652  bool EverMadeChange = false;
1653
1654  // Lower dbg.declare intrinsics otherwise their value may be clobbered
1655  // by instcombiner.
1656  EverMadeChange = LowerDbgDeclare(F);
1657
1658  // Iterate while there is work to do.
1659  unsigned Iteration = 0;
1660  while (DoOneIteration(F, Iteration++))
1661    EverMadeChange = true;
1662
1663  Builder = 0;
1664  return EverMadeChange;
1665}
1666
1667FunctionPass *llvm::createInstructionCombiningPass() {
1668  return new InstCombiner();
1669}
1670