1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// InstructionCombining - Combine instructions to form fewer, simple
11// instructions.  This pass does not modify the CFG.  This pass is where
12// algebraic simplification happens.
13//
14// This pass combines things like:
15//    %Y = add i32 %X, 1
16//    %Z = add i32 %Y, 1
17// into:
18//    %Z = add i32 %X, 2
19//
20// This is a simple worklist driven algorithm.
21//
22// This pass guarantees that the following canonicalizations are performed on
23// the program:
24//    1. If a binary operator has a constant operand, it is moved to the RHS
25//    2. Bitwise operators with constant operands are always grouped so that
26//       shifts are performed first, then or's, then and's, then xor's.
27//    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28//    4. All cmp instructions on boolean values are replaced with logical ops
29//    5. add X, X is represented as (X*2) => (X << 1)
30//    6. Multiplies with a power-of-two constant argument are transformed into
31//       shifts.
32//   ... etc.
33//
34//===----------------------------------------------------------------------===//
35
36#include "llvm/Transforms/Scalar.h"
37#include "InstCombine.h"
38#include "llvm-c/Initialization.h"
39#include "llvm/ADT/SmallPtrSet.h"
40#include "llvm/ADT/Statistic.h"
41#include "llvm/ADT/StringSwitch.h"
42#include "llvm/Analysis/ConstantFolding.h"
43#include "llvm/Analysis/InstructionSimplify.h"
44#include "llvm/Analysis/MemoryBuiltins.h"
45#include "llvm/Analysis/ValueTracking.h"
46#include "llvm/IR/CFG.h"
47#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/GetElementPtrTypeIterator.h"
49#include "llvm/IR/IntrinsicInst.h"
50#include "llvm/IR/PatternMatch.h"
51#include "llvm/IR/ValueHandle.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Target/TargetLibraryInfo.h"
55#include "llvm/Transforms/Utils/Local.h"
56#include <algorithm>
57#include <climits>
58using namespace llvm;
59using namespace llvm::PatternMatch;
60
61#define DEBUG_TYPE "instcombine"
62
63STATISTIC(NumCombined , "Number of insts combined");
64STATISTIC(NumConstProp, "Number of constant folds");
65STATISTIC(NumDeadInst , "Number of dead inst eliminated");
66STATISTIC(NumSunkInst , "Number of instructions sunk");
67STATISTIC(NumExpand,    "Number of expansions");
68STATISTIC(NumFactor   , "Number of factorizations");
69STATISTIC(NumReassoc  , "Number of reassociations");
70
71static cl::opt<bool> UnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
72                                   cl::init(false),
73                                   cl::desc("Enable unsafe double to float "
74                                            "shrinking for math lib calls"));
75
76// Initialization Routines
77void llvm::initializeInstCombine(PassRegistry &Registry) {
78  initializeInstCombinerPass(Registry);
79}
80
81void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
82  initializeInstCombine(*unwrap(R));
83}
84
85char InstCombiner::ID = 0;
86INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine",
87                "Combine redundant instructions", false, false)
88INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
89INITIALIZE_PASS_END(InstCombiner, "instcombine",
90                "Combine redundant instructions", false, false)
91
92void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
93  AU.setPreservesCFG();
94  AU.addRequired<TargetLibraryInfo>();
95}
96
97
98Value *InstCombiner::EmitGEPOffset(User *GEP) {
99  return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
100}
101
102/// ShouldChangeType - Return true if it is desirable to convert a computation
103/// from 'From' to 'To'.  We don't want to convert from a legal to an illegal
104/// type for example, or from a smaller to a larger illegal type.
105bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
106  assert(From->isIntegerTy() && To->isIntegerTy());
107
108  // If we don't have DL, we don't know if the source/dest are legal.
109  if (!DL) return false;
110
111  unsigned FromWidth = From->getPrimitiveSizeInBits();
112  unsigned ToWidth = To->getPrimitiveSizeInBits();
113  bool FromLegal = DL->isLegalInteger(FromWidth);
114  bool ToLegal = DL->isLegalInteger(ToWidth);
115
116  // If this is a legal integer from type, and the result would be an illegal
117  // type, don't do the transformation.
118  if (FromLegal && !ToLegal)
119    return false;
120
121  // Otherwise, if both are illegal, do not increase the size of the result. We
122  // do allow things like i160 -> i64, but not i64 -> i160.
123  if (!FromLegal && !ToLegal && ToWidth > FromWidth)
124    return false;
125
126  return true;
127}
128
129// Return true, if No Signed Wrap should be maintained for I.
130// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
131// where both B and C should be ConstantInts, results in a constant that does
132// not overflow. This function only handles the Add and Sub opcodes. For
133// all other opcodes, the function conservatively returns false.
134static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
135  OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
136  if (!OBO || !OBO->hasNoSignedWrap()) {
137    return false;
138  }
139
140  // We reason about Add and Sub Only.
141  Instruction::BinaryOps Opcode = I.getOpcode();
142  if (Opcode != Instruction::Add &&
143      Opcode != Instruction::Sub) {
144    return false;
145  }
146
147  ConstantInt *CB = dyn_cast<ConstantInt>(B);
148  ConstantInt *CC = dyn_cast<ConstantInt>(C);
149
150  if (!CB || !CC) {
151    return false;
152  }
153
154  const APInt &BVal = CB->getValue();
155  const APInt &CVal = CC->getValue();
156  bool Overflow = false;
157
158  if (Opcode == Instruction::Add) {
159    BVal.sadd_ov(CVal, Overflow);
160  } else {
161    BVal.ssub_ov(CVal, Overflow);
162  }
163
164  return !Overflow;
165}
166
167/// Conservatively clears subclassOptionalData after a reassociation or
168/// commutation. We preserve fast-math flags when applicable as they can be
169/// preserved.
170static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
171  FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
172  if (!FPMO) {
173    I.clearSubclassOptionalData();
174    return;
175  }
176
177  FastMathFlags FMF = I.getFastMathFlags();
178  I.clearSubclassOptionalData();
179  I.setFastMathFlags(FMF);
180}
181
182/// SimplifyAssociativeOrCommutative - This performs a few simplifications for
183/// operators which are associative or commutative:
184//
185//  Commutative operators:
186//
187//  1. Order operands such that they are listed from right (least complex) to
188//     left (most complex).  This puts constants before unary operators before
189//     binary operators.
190//
191//  Associative operators:
192//
193//  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
194//  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
195//
196//  Associative and commutative operators:
197//
198//  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
199//  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
200//  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
201//     if C1 and C2 are constants.
202//
203bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
204  Instruction::BinaryOps Opcode = I.getOpcode();
205  bool Changed = false;
206
207  do {
208    // Order operands such that they are listed from right (least complex) to
209    // left (most complex).  This puts constants before unary operators before
210    // binary operators.
211    if (I.isCommutative() && getComplexity(I.getOperand(0)) <
212        getComplexity(I.getOperand(1)))
213      Changed = !I.swapOperands();
214
215    BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
216    BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
217
218    if (I.isAssociative()) {
219      // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
220      if (Op0 && Op0->getOpcode() == Opcode) {
221        Value *A = Op0->getOperand(0);
222        Value *B = Op0->getOperand(1);
223        Value *C = I.getOperand(1);
224
225        // Does "B op C" simplify?
226        if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {
227          // It simplifies to V.  Form "A op V".
228          I.setOperand(0, A);
229          I.setOperand(1, V);
230          // Conservatively clear the optional flags, since they may not be
231          // preserved by the reassociation.
232          if (MaintainNoSignedWrap(I, B, C) &&
233              (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
234            // Note: this is only valid because SimplifyBinOp doesn't look at
235            // the operands to Op0.
236            I.clearSubclassOptionalData();
237            I.setHasNoSignedWrap(true);
238          } else {
239            ClearSubclassDataAfterReassociation(I);
240          }
241
242          Changed = true;
243          ++NumReassoc;
244          continue;
245        }
246      }
247
248      // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
249      if (Op1 && Op1->getOpcode() == Opcode) {
250        Value *A = I.getOperand(0);
251        Value *B = Op1->getOperand(0);
252        Value *C = Op1->getOperand(1);
253
254        // Does "A op B" simplify?
255        if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {
256          // It simplifies to V.  Form "V op C".
257          I.setOperand(0, V);
258          I.setOperand(1, C);
259          // Conservatively clear the optional flags, since they may not be
260          // preserved by the reassociation.
261          ClearSubclassDataAfterReassociation(I);
262          Changed = true;
263          ++NumReassoc;
264          continue;
265        }
266      }
267    }
268
269    if (I.isAssociative() && I.isCommutative()) {
270      // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
271      if (Op0 && Op0->getOpcode() == Opcode) {
272        Value *A = Op0->getOperand(0);
273        Value *B = Op0->getOperand(1);
274        Value *C = I.getOperand(1);
275
276        // Does "C op A" simplify?
277        if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
278          // It simplifies to V.  Form "V op B".
279          I.setOperand(0, V);
280          I.setOperand(1, B);
281          // Conservatively clear the optional flags, since they may not be
282          // preserved by the reassociation.
283          ClearSubclassDataAfterReassociation(I);
284          Changed = true;
285          ++NumReassoc;
286          continue;
287        }
288      }
289
290      // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
291      if (Op1 && Op1->getOpcode() == Opcode) {
292        Value *A = I.getOperand(0);
293        Value *B = Op1->getOperand(0);
294        Value *C = Op1->getOperand(1);
295
296        // Does "C op A" simplify?
297        if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
298          // It simplifies to V.  Form "B op V".
299          I.setOperand(0, B);
300          I.setOperand(1, V);
301          // Conservatively clear the optional flags, since they may not be
302          // preserved by the reassociation.
303          ClearSubclassDataAfterReassociation(I);
304          Changed = true;
305          ++NumReassoc;
306          continue;
307        }
308      }
309
310      // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
311      // if C1 and C2 are constants.
312      if (Op0 && Op1 &&
313          Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
314          isa<Constant>(Op0->getOperand(1)) &&
315          isa<Constant>(Op1->getOperand(1)) &&
316          Op0->hasOneUse() && Op1->hasOneUse()) {
317        Value *A = Op0->getOperand(0);
318        Constant *C1 = cast<Constant>(Op0->getOperand(1));
319        Value *B = Op1->getOperand(0);
320        Constant *C2 = cast<Constant>(Op1->getOperand(1));
321
322        Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
323        BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
324        if (isa<FPMathOperator>(New)) {
325          FastMathFlags Flags = I.getFastMathFlags();
326          Flags &= Op0->getFastMathFlags();
327          Flags &= Op1->getFastMathFlags();
328          New->setFastMathFlags(Flags);
329        }
330        InsertNewInstWith(New, I);
331        New->takeName(Op1);
332        I.setOperand(0, New);
333        I.setOperand(1, Folded);
334        // Conservatively clear the optional flags, since they may not be
335        // preserved by the reassociation.
336        ClearSubclassDataAfterReassociation(I);
337
338        Changed = true;
339        continue;
340      }
341    }
342
343    // No further simplifications.
344    return Changed;
345  } while (1);
346}
347
348/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
349/// "(X LOp Y) ROp (X LOp Z)".
350static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
351                                     Instruction::BinaryOps ROp) {
352  switch (LOp) {
353  default:
354    return false;
355
356  case Instruction::And:
357    // And distributes over Or and Xor.
358    switch (ROp) {
359    default:
360      return false;
361    case Instruction::Or:
362    case Instruction::Xor:
363      return true;
364    }
365
366  case Instruction::Mul:
367    // Multiplication distributes over addition and subtraction.
368    switch (ROp) {
369    default:
370      return false;
371    case Instruction::Add:
372    case Instruction::Sub:
373      return true;
374    }
375
376  case Instruction::Or:
377    // Or distributes over And.
378    switch (ROp) {
379    default:
380      return false;
381    case Instruction::And:
382      return true;
383    }
384  }
385}
386
387/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
388/// "(X ROp Z) LOp (Y ROp Z)".
389static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
390                                     Instruction::BinaryOps ROp) {
391  if (Instruction::isCommutative(ROp))
392    return LeftDistributesOverRight(ROp, LOp);
393  // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
394  // but this requires knowing that the addition does not overflow and other
395  // such subtleties.
396  return false;
397}
398
399/// This function returns identity value for given opcode, which can be used to
400/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
401static Value *getIdentityValue(Instruction::BinaryOps OpCode, Value *V) {
402  if (isa<Constant>(V))
403    return nullptr;
404
405  if (OpCode == Instruction::Mul)
406    return ConstantInt::get(V->getType(), 1);
407
408  // TODO: We can handle other cases e.g. Instruction::And, Instruction::Or etc.
409
410  return nullptr;
411}
412
413/// This function factors binary ops which can be combined using distributive
414/// laws. This also factor SHL as MUL e.g. SHL(X, 2) ==> MUL(X, 4).
415static Instruction::BinaryOps
416getBinOpsForFactorization(BinaryOperator *Op, Value *&LHS, Value *&RHS) {
417  if (!Op)
418    return Instruction::BinaryOpsEnd;
419
420  if (Op->getOpcode() == Instruction::Shl) {
421    if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
422      // The multiplier is really 1 << CST.
423      RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
424      LHS = Op->getOperand(0);
425      return Instruction::Mul;
426    }
427  }
428
429  // TODO: We can add other conversions e.g. shr => div etc.
430
431  LHS = Op->getOperand(0);
432  RHS = Op->getOperand(1);
433  return Op->getOpcode();
434}
435
436/// This tries to simplify binary operations by factorizing out common terms
437/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
438static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
439                               const DataLayout *DL, BinaryOperator &I,
440                               Instruction::BinaryOps InnerOpcode, Value *A,
441                               Value *B, Value *C, Value *D) {
442
443  // If any of A, B, C, D are null, we can not factor I, return early.
444  // Checking A and C should be enough.
445  if (!A || !C || !B || !D)
446    return nullptr;
447
448  Value *SimplifiedInst = nullptr;
449  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
450  Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
451
452  // Does "X op' Y" always equal "Y op' X"?
453  bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
454
455  // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
456  if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
457    // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
458    // commutative case, "(A op' B) op (C op' A)"?
459    if (A == C || (InnerCommutative && A == D)) {
460      if (A != C)
461        std::swap(C, D);
462      // Consider forming "A op' (B op D)".
463      // If "B op D" simplifies then it can be formed with no cost.
464      Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
465      // If "B op D" doesn't simplify then only go on if both of the existing
466      // operations "A op' B" and "C op' D" will be zapped as no longer used.
467      if (!V && LHS->hasOneUse() && RHS->hasOneUse())
468        V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
469      if (V) {
470        SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V);
471      }
472    }
473
474  // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
475  if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
476    // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
477    // commutative case, "(A op' B) op (B op' D)"?
478    if (B == D || (InnerCommutative && B == C)) {
479      if (B != D)
480        std::swap(C, D);
481      // Consider forming "(A op C) op' B".
482      // If "A op C" simplifies then it can be formed with no cost.
483      Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
484
485      // If "A op C" doesn't simplify then only go on if both of the existing
486      // operations "A op' B" and "C op' D" will be zapped as no longer used.
487      if (!V && LHS->hasOneUse() && RHS->hasOneUse())
488        V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
489      if (V) {
490        SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B);
491      }
492    }
493
494  if (SimplifiedInst) {
495    ++NumFactor;
496    SimplifiedInst->takeName(&I);
497
498    // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag.
499    // TODO: Check for NUW.
500    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
501      if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
502        bool HasNSW = false;
503        if (isa<OverflowingBinaryOperator>(&I))
504          HasNSW = I.hasNoSignedWrap();
505
506        if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
507          if (isa<OverflowingBinaryOperator>(Op0))
508            HasNSW &= Op0->hasNoSignedWrap();
509
510        if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
511          if (isa<OverflowingBinaryOperator>(Op1))
512            HasNSW &= Op1->hasNoSignedWrap();
513        BO->setHasNoSignedWrap(HasNSW);
514      }
515    }
516  }
517  return SimplifiedInst;
518}
519
520/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
521/// which some other binary operation distributes over either by factorizing
522/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
523/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
524/// a win).  Returns the simplified value, or null if it didn't simplify.
525Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
526  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
527  BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
528  BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
529
530  // Factorization.
531  Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
532  Instruction::BinaryOps LHSOpcode = getBinOpsForFactorization(Op0, A, B);
533  Instruction::BinaryOps RHSOpcode = getBinOpsForFactorization(Op1, C, D);
534
535  // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
536  // a common term.
537  if (LHSOpcode == RHSOpcode) {
538    if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, C, D))
539      return V;
540  }
541
542  // The instruction has the form "(A op' B) op (C)".  Try to factorize common
543  // term.
544  if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, RHS,
545                                  getIdentityValue(LHSOpcode, RHS)))
546    return V;
547
548  // The instruction has the form "(B) op (C op' D)".  Try to factorize common
549  // term.
550  if (Value *V = tryFactorization(Builder, DL, I, RHSOpcode, LHS,
551                                  getIdentityValue(RHSOpcode, LHS), C, D))
552    return V;
553
554  // Expansion.
555  Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
556  if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
557    // The instruction has the form "(A op' B) op C".  See if expanding it out
558    // to "(A op C) op' (B op C)" results in simplifications.
559    Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
560    Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
561
562    // Do "A op C" and "B op C" both simplify?
563    if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))
564      if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
565        // They do! Return "L op' R".
566        ++NumExpand;
567        // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
568        if ((L == A && R == B) ||
569            (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
570          return Op0;
571        // Otherwise return "L op' R" if it simplifies.
572        if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
573          return V;
574        // Otherwise, create a new instruction.
575        C = Builder->CreateBinOp(InnerOpcode, L, R);
576        C->takeName(&I);
577        return C;
578      }
579  }
580
581  if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
582    // The instruction has the form "A op (B op' C)".  See if expanding it out
583    // to "(A op B) op' (A op C)" results in simplifications.
584    Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
585    Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
586
587    // Do "A op B" and "A op C" both simplify?
588    if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))
589      if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
590        // They do! Return "L op' R".
591        ++NumExpand;
592        // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
593        if ((L == B && R == C) ||
594            (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
595          return Op1;
596        // Otherwise return "L op' R" if it simplifies.
597        if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
598          return V;
599        // Otherwise, create a new instruction.
600        A = Builder->CreateBinOp(InnerOpcode, L, R);
601        A->takeName(&I);
602        return A;
603      }
604  }
605
606  return nullptr;
607}
608
609// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
610// if the LHS is a constant zero (which is the 'negate' form).
611//
612Value *InstCombiner::dyn_castNegVal(Value *V) const {
613  if (BinaryOperator::isNeg(V))
614    return BinaryOperator::getNegArgument(V);
615
616  // Constants can be considered to be negated values if they can be folded.
617  if (ConstantInt *C = dyn_cast<ConstantInt>(V))
618    return ConstantExpr::getNeg(C);
619
620  if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
621    if (C->getType()->getElementType()->isIntegerTy())
622      return ConstantExpr::getNeg(C);
623
624  return nullptr;
625}
626
627// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
628// instruction if the LHS is a constant negative zero (which is the 'negate'
629// form).
630//
631Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
632  if (BinaryOperator::isFNeg(V, IgnoreZeroSign))
633    return BinaryOperator::getFNegArgument(V);
634
635  // Constants can be considered to be negated values if they can be folded.
636  if (ConstantFP *C = dyn_cast<ConstantFP>(V))
637    return ConstantExpr::getFNeg(C);
638
639  if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
640    if (C->getType()->getElementType()->isFloatingPointTy())
641      return ConstantExpr::getFNeg(C);
642
643  return nullptr;
644}
645
646static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
647                                             InstCombiner *IC) {
648  if (CastInst *CI = dyn_cast<CastInst>(&I)) {
649    return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
650  }
651
652  // Figure out if the constant is the left or the right argument.
653  bool ConstIsRHS = isa<Constant>(I.getOperand(1));
654  Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
655
656  if (Constant *SOC = dyn_cast<Constant>(SO)) {
657    if (ConstIsRHS)
658      return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
659    return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
660  }
661
662  Value *Op0 = SO, *Op1 = ConstOperand;
663  if (!ConstIsRHS)
664    std::swap(Op0, Op1);
665
666  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) {
667    Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
668                                    SO->getName()+".op");
669    Instruction *FPInst = dyn_cast<Instruction>(RI);
670    if (FPInst && isa<FPMathOperator>(FPInst))
671      FPInst->copyFastMathFlags(BO);
672    return RI;
673  }
674  if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
675    return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
676                                   SO->getName()+".cmp");
677  if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
678    return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
679                                   SO->getName()+".cmp");
680  llvm_unreachable("Unknown binary instruction type!");
681}
682
683// FoldOpIntoSelect - Given an instruction with a select as one operand and a
684// constant as the other operand, try to fold the binary operator into the
685// select arguments.  This also works for Cast instructions, which obviously do
686// not have a second operand.
687Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
688  // Don't modify shared select instructions
689  if (!SI->hasOneUse()) return nullptr;
690  Value *TV = SI->getOperand(1);
691  Value *FV = SI->getOperand(2);
692
693  if (isa<Constant>(TV) || isa<Constant>(FV)) {
694    // Bool selects with constant operands can be folded to logical ops.
695    if (SI->getType()->isIntegerTy(1)) return nullptr;
696
697    // If it's a bitcast involving vectors, make sure it has the same number of
698    // elements on both sides.
699    if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
700      VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
701      VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
702
703      // Verify that either both or neither are vectors.
704      if ((SrcTy == nullptr) != (DestTy == nullptr)) return nullptr;
705      // If vectors, verify that they have the same number of elements.
706      if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
707        return nullptr;
708    }
709
710    Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
711    Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
712
713    return SelectInst::Create(SI->getCondition(),
714                              SelectTrueVal, SelectFalseVal);
715  }
716  return nullptr;
717}
718
719
720/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
721/// has a PHI node as operand #0, see if we can fold the instruction into the
722/// PHI (which is only possible if all operands to the PHI are constants).
723///
724Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
725  PHINode *PN = cast<PHINode>(I.getOperand(0));
726  unsigned NumPHIValues = PN->getNumIncomingValues();
727  if (NumPHIValues == 0)
728    return nullptr;
729
730  // We normally only transform phis with a single use.  However, if a PHI has
731  // multiple uses and they are all the same operation, we can fold *all* of the
732  // uses into the PHI.
733  if (!PN->hasOneUse()) {
734    // Walk the use list for the instruction, comparing them to I.
735    for (User *U : PN->users()) {
736      Instruction *UI = cast<Instruction>(U);
737      if (UI != &I && !I.isIdenticalTo(UI))
738        return nullptr;
739    }
740    // Otherwise, we can replace *all* users with the new PHI we form.
741  }
742
743  // Check to see if all of the operands of the PHI are simple constants
744  // (constantint/constantfp/undef).  If there is one non-constant value,
745  // remember the BB it is in.  If there is more than one or if *it* is a PHI,
746  // bail out.  We don't do arbitrary constant expressions here because moving
747  // their computation can be expensive without a cost model.
748  BasicBlock *NonConstBB = nullptr;
749  for (unsigned i = 0; i != NumPHIValues; ++i) {
750    Value *InVal = PN->getIncomingValue(i);
751    if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
752      continue;
753
754    if (isa<PHINode>(InVal)) return nullptr;  // Itself a phi.
755    if (NonConstBB) return nullptr;  // More than one non-const value.
756
757    NonConstBB = PN->getIncomingBlock(i);
758
759    // If the InVal is an invoke at the end of the pred block, then we can't
760    // insert a computation after it without breaking the edge.
761    if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
762      if (II->getParent() == NonConstBB)
763        return nullptr;
764
765    // If the incoming non-constant value is in I's block, we will remove one
766    // instruction, but insert another equivalent one, leading to infinite
767    // instcombine.
768    if (NonConstBB == I.getParent())
769      return nullptr;
770  }
771
772  // If there is exactly one non-constant value, we can insert a copy of the
773  // operation in that block.  However, if this is a critical edge, we would be
774  // inserting the computation one some other paths (e.g. inside a loop).  Only
775  // do this if the pred block is unconditionally branching into the phi block.
776  if (NonConstBB != nullptr) {
777    BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
778    if (!BI || !BI->isUnconditional()) return nullptr;
779  }
780
781  // Okay, we can do the transformation: create the new PHI node.
782  PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
783  InsertNewInstBefore(NewPN, *PN);
784  NewPN->takeName(PN);
785
786  // If we are going to have to insert a new computation, do so right before the
787  // predecessors terminator.
788  if (NonConstBB)
789    Builder->SetInsertPoint(NonConstBB->getTerminator());
790
791  // Next, add all of the operands to the PHI.
792  if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
793    // We only currently try to fold the condition of a select when it is a phi,
794    // not the true/false values.
795    Value *TrueV = SI->getTrueValue();
796    Value *FalseV = SI->getFalseValue();
797    BasicBlock *PhiTransBB = PN->getParent();
798    for (unsigned i = 0; i != NumPHIValues; ++i) {
799      BasicBlock *ThisBB = PN->getIncomingBlock(i);
800      Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
801      Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
802      Value *InV = nullptr;
803      // Beware of ConstantExpr:  it may eventually evaluate to getNullValue,
804      // even if currently isNullValue gives false.
805      Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
806      if (InC && !isa<ConstantExpr>(InC))
807        InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
808      else
809        InV = Builder->CreateSelect(PN->getIncomingValue(i),
810                                    TrueVInPred, FalseVInPred, "phitmp");
811      NewPN->addIncoming(InV, ThisBB);
812    }
813  } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
814    Constant *C = cast<Constant>(I.getOperand(1));
815    for (unsigned i = 0; i != NumPHIValues; ++i) {
816      Value *InV = nullptr;
817      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
818        InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
819      else if (isa<ICmpInst>(CI))
820        InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
821                                  C, "phitmp");
822      else
823        InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
824                                  C, "phitmp");
825      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
826    }
827  } else if (I.getNumOperands() == 2) {
828    Constant *C = cast<Constant>(I.getOperand(1));
829    for (unsigned i = 0; i != NumPHIValues; ++i) {
830      Value *InV = nullptr;
831      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
832        InV = ConstantExpr::get(I.getOpcode(), InC, C);
833      else
834        InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
835                                   PN->getIncomingValue(i), C, "phitmp");
836      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
837    }
838  } else {
839    CastInst *CI = cast<CastInst>(&I);
840    Type *RetTy = CI->getType();
841    for (unsigned i = 0; i != NumPHIValues; ++i) {
842      Value *InV;
843      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
844        InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
845      else
846        InV = Builder->CreateCast(CI->getOpcode(),
847                                PN->getIncomingValue(i), I.getType(), "phitmp");
848      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
849    }
850  }
851
852  for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
853    Instruction *User = cast<Instruction>(*UI++);
854    if (User == &I) continue;
855    ReplaceInstUsesWith(*User, NewPN);
856    EraseInstFromFunction(*User);
857  }
858  return ReplaceInstUsesWith(I, NewPN);
859}
860
861/// FindElementAtOffset - Given a pointer type and a constant offset, determine
862/// whether or not there is a sequence of GEP indices into the pointed type that
863/// will land us at the specified offset.  If so, fill them into NewIndices and
864/// return the resultant element type, otherwise return null.
865Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
866                                        SmallVectorImpl<Value*> &NewIndices) {
867  assert(PtrTy->isPtrOrPtrVectorTy());
868
869  if (!DL)
870    return nullptr;
871
872  Type *Ty = PtrTy->getPointerElementType();
873  if (!Ty->isSized())
874    return nullptr;
875
876  // Start with the index over the outer type.  Note that the type size
877  // might be zero (even if the offset isn't zero) if the indexed type
878  // is something like [0 x {int, int}]
879  Type *IntPtrTy = DL->getIntPtrType(PtrTy);
880  int64_t FirstIdx = 0;
881  if (int64_t TySize = DL->getTypeAllocSize(Ty)) {
882    FirstIdx = Offset/TySize;
883    Offset -= FirstIdx*TySize;
884
885    // Handle hosts where % returns negative instead of values [0..TySize).
886    if (Offset < 0) {
887      --FirstIdx;
888      Offset += TySize;
889      assert(Offset >= 0);
890    }
891    assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
892  }
893
894  NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
895
896  // Index into the types.  If we fail, set OrigBase to null.
897  while (Offset) {
898    // Indexing into tail padding between struct/array elements.
899    if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
900      return nullptr;
901
902    if (StructType *STy = dyn_cast<StructType>(Ty)) {
903      const StructLayout *SL = DL->getStructLayout(STy);
904      assert(Offset < (int64_t)SL->getSizeInBytes() &&
905             "Offset must stay within the indexed type");
906
907      unsigned Elt = SL->getElementContainingOffset(Offset);
908      NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
909                                            Elt));
910
911      Offset -= SL->getElementOffset(Elt);
912      Ty = STy->getElementType(Elt);
913    } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
914      uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());
915      assert(EltSize && "Cannot index into a zero-sized array");
916      NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
917      Offset %= EltSize;
918      Ty = AT->getElementType();
919    } else {
920      // Otherwise, we can't index into the middle of this atomic type, bail.
921      return nullptr;
922    }
923  }
924
925  return Ty;
926}
927
928static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
929  // If this GEP has only 0 indices, it is the same pointer as
930  // Src. If Src is not a trivial GEP too, don't combine
931  // the indices.
932  if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
933      !Src.hasOneUse())
934    return false;
935  return true;
936}
937
938/// Descale - Return a value X such that Val = X * Scale, or null if none.  If
939/// the multiplication is known not to overflow then NoSignedWrap is set.
940Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
941  assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
942  assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
943         Scale.getBitWidth() && "Scale not compatible with value!");
944
945  // If Val is zero or Scale is one then Val = Val * Scale.
946  if (match(Val, m_Zero()) || Scale == 1) {
947    NoSignedWrap = true;
948    return Val;
949  }
950
951  // If Scale is zero then it does not divide Val.
952  if (Scale.isMinValue())
953    return nullptr;
954
955  // Look through chains of multiplications, searching for a constant that is
956  // divisible by Scale.  For example, descaling X*(Y*(Z*4)) by a factor of 4
957  // will find the constant factor 4 and produce X*(Y*Z).  Descaling X*(Y*8) by
958  // a factor of 4 will produce X*(Y*2).  The principle of operation is to bore
959  // down from Val:
960  //
961  //     Val = M1 * X          ||   Analysis starts here and works down
962  //      M1 = M2 * Y          ||   Doesn't descend into terms with more
963  //      M2 =  Z * 4          \/   than one use
964  //
965  // Then to modify a term at the bottom:
966  //
967  //     Val = M1 * X
968  //      M1 =  Z * Y          ||   Replaced M2 with Z
969  //
970  // Then to work back up correcting nsw flags.
971
972  // Op - the term we are currently analyzing.  Starts at Val then drills down.
973  // Replaced with its descaled value before exiting from the drill down loop.
974  Value *Op = Val;
975
976  // Parent - initially null, but after drilling down notes where Op came from.
977  // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
978  // 0'th operand of Val.
979  std::pair<Instruction*, unsigned> Parent;
980
981  // RequireNoSignedWrap - Set if the transform requires a descaling at deeper
982  // levels that doesn't overflow.
983  bool RequireNoSignedWrap = false;
984
985  // logScale - log base 2 of the scale.  Negative if not a power of 2.
986  int32_t logScale = Scale.exactLogBase2();
987
988  for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
989
990    if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
991      // If Op is a constant divisible by Scale then descale to the quotient.
992      APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
993      APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
994      if (!Remainder.isMinValue())
995        // Not divisible by Scale.
996        return nullptr;
997      // Replace with the quotient in the parent.
998      Op = ConstantInt::get(CI->getType(), Quotient);
999      NoSignedWrap = true;
1000      break;
1001    }
1002
1003    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
1004
1005      if (BO->getOpcode() == Instruction::Mul) {
1006        // Multiplication.
1007        NoSignedWrap = BO->hasNoSignedWrap();
1008        if (RequireNoSignedWrap && !NoSignedWrap)
1009          return nullptr;
1010
1011        // There are three cases for multiplication: multiplication by exactly
1012        // the scale, multiplication by a constant different to the scale, and
1013        // multiplication by something else.
1014        Value *LHS = BO->getOperand(0);
1015        Value *RHS = BO->getOperand(1);
1016
1017        if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1018          // Multiplication by a constant.
1019          if (CI->getValue() == Scale) {
1020            // Multiplication by exactly the scale, replace the multiplication
1021            // by its left-hand side in the parent.
1022            Op = LHS;
1023            break;
1024          }
1025
1026          // Otherwise drill down into the constant.
1027          if (!Op->hasOneUse())
1028            return nullptr;
1029
1030          Parent = std::make_pair(BO, 1);
1031          continue;
1032        }
1033
1034        // Multiplication by something else. Drill down into the left-hand side
1035        // since that's where the reassociate pass puts the good stuff.
1036        if (!Op->hasOneUse())
1037          return nullptr;
1038
1039        Parent = std::make_pair(BO, 0);
1040        continue;
1041      }
1042
1043      if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
1044          isa<ConstantInt>(BO->getOperand(1))) {
1045        // Multiplication by a power of 2.
1046        NoSignedWrap = BO->hasNoSignedWrap();
1047        if (RequireNoSignedWrap && !NoSignedWrap)
1048          return nullptr;
1049
1050        Value *LHS = BO->getOperand(0);
1051        int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
1052          getLimitedValue(Scale.getBitWidth());
1053        // Op = LHS << Amt.
1054
1055        if (Amt == logScale) {
1056          // Multiplication by exactly the scale, replace the multiplication
1057          // by its left-hand side in the parent.
1058          Op = LHS;
1059          break;
1060        }
1061        if (Amt < logScale || !Op->hasOneUse())
1062          return nullptr;
1063
1064        // Multiplication by more than the scale.  Reduce the multiplying amount
1065        // by the scale in the parent.
1066        Parent = std::make_pair(BO, 1);
1067        Op = ConstantInt::get(BO->getType(), Amt - logScale);
1068        break;
1069      }
1070    }
1071
1072    if (!Op->hasOneUse())
1073      return nullptr;
1074
1075    if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
1076      if (Cast->getOpcode() == Instruction::SExt) {
1077        // Op is sign-extended from a smaller type, descale in the smaller type.
1078        unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1079        APInt SmallScale = Scale.trunc(SmallSize);
1080        // Suppose Op = sext X, and we descale X as Y * SmallScale.  We want to
1081        // descale Op as (sext Y) * Scale.  In order to have
1082        //   sext (Y * SmallScale) = (sext Y) * Scale
1083        // some conditions need to hold however: SmallScale must sign-extend to
1084        // Scale and the multiplication Y * SmallScale should not overflow.
1085        if (SmallScale.sext(Scale.getBitWidth()) != Scale)
1086          // SmallScale does not sign-extend to Scale.
1087          return nullptr;
1088        assert(SmallScale.exactLogBase2() == logScale);
1089        // Require that Y * SmallScale must not overflow.
1090        RequireNoSignedWrap = true;
1091
1092        // Drill down through the cast.
1093        Parent = std::make_pair(Cast, 0);
1094        Scale = SmallScale;
1095        continue;
1096      }
1097
1098      if (Cast->getOpcode() == Instruction::Trunc) {
1099        // Op is truncated from a larger type, descale in the larger type.
1100        // Suppose Op = trunc X, and we descale X as Y * sext Scale.  Then
1101        //   trunc (Y * sext Scale) = (trunc Y) * Scale
1102        // always holds.  However (trunc Y) * Scale may overflow even if
1103        // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
1104        // from this point up in the expression (see later).
1105        if (RequireNoSignedWrap)
1106          return nullptr;
1107
1108        // Drill down through the cast.
1109        unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1110        Parent = std::make_pair(Cast, 0);
1111        Scale = Scale.sext(LargeSize);
1112        if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
1113          logScale = -1;
1114        assert(Scale.exactLogBase2() == logScale);
1115        continue;
1116      }
1117    }
1118
1119    // Unsupported expression, bail out.
1120    return nullptr;
1121  }
1122
1123  // If Op is zero then Val = Op * Scale.
1124  if (match(Op, m_Zero())) {
1125    NoSignedWrap = true;
1126    return Op;
1127  }
1128
1129  // We know that we can successfully descale, so from here on we can safely
1130  // modify the IR.  Op holds the descaled version of the deepest term in the
1131  // expression.  NoSignedWrap is 'true' if multiplying Op by Scale is known
1132  // not to overflow.
1133
1134  if (!Parent.first)
1135    // The expression only had one term.
1136    return Op;
1137
1138  // Rewrite the parent using the descaled version of its operand.
1139  assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
1140  assert(Op != Parent.first->getOperand(Parent.second) &&
1141         "Descaling was a no-op?");
1142  Parent.first->setOperand(Parent.second, Op);
1143  Worklist.Add(Parent.first);
1144
1145  // Now work back up the expression correcting nsw flags.  The logic is based
1146  // on the following observation: if X * Y is known not to overflow as a signed
1147  // multiplication, and Y is replaced by a value Z with smaller absolute value,
1148  // then X * Z will not overflow as a signed multiplication either.  As we work
1149  // our way up, having NoSignedWrap 'true' means that the descaled value at the
1150  // current level has strictly smaller absolute value than the original.
1151  Instruction *Ancestor = Parent.first;
1152  do {
1153    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
1154      // If the multiplication wasn't nsw then we can't say anything about the
1155      // value of the descaled multiplication, and we have to clear nsw flags
1156      // from this point on up.
1157      bool OpNoSignedWrap = BO->hasNoSignedWrap();
1158      NoSignedWrap &= OpNoSignedWrap;
1159      if (NoSignedWrap != OpNoSignedWrap) {
1160        BO->setHasNoSignedWrap(NoSignedWrap);
1161        Worklist.Add(Ancestor);
1162      }
1163    } else if (Ancestor->getOpcode() == Instruction::Trunc) {
1164      // The fact that the descaled input to the trunc has smaller absolute
1165      // value than the original input doesn't tell us anything useful about
1166      // the absolute values of the truncations.
1167      NoSignedWrap = false;
1168    }
1169    assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
1170           "Failed to keep proper track of nsw flags while drilling down?");
1171
1172    if (Ancestor == Val)
1173      // Got to the top, all done!
1174      return Val;
1175
1176    // Move up one level in the expression.
1177    assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
1178    Ancestor = Ancestor->user_back();
1179  } while (1);
1180}
1181
1182/// \brief Creates node of binary operation with the same attributes as the
1183/// specified one but with other operands.
1184static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
1185                                 InstCombiner::BuilderTy *B) {
1186  Value *BORes = B->CreateBinOp(Inst.getOpcode(), LHS, RHS);
1187  if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BORes)) {
1188    if (isa<OverflowingBinaryOperator>(NewBO)) {
1189      NewBO->setHasNoSignedWrap(Inst.hasNoSignedWrap());
1190      NewBO->setHasNoUnsignedWrap(Inst.hasNoUnsignedWrap());
1191    }
1192    if (isa<PossiblyExactOperator>(NewBO))
1193      NewBO->setIsExact(Inst.isExact());
1194  }
1195  return BORes;
1196}
1197
1198/// \brief Makes transformation of binary operation specific for vector types.
1199/// \param Inst Binary operator to transform.
1200/// \return Pointer to node that must replace the original binary operator, or
1201///         null pointer if no transformation was made.
1202Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
1203  if (!Inst.getType()->isVectorTy()) return nullptr;
1204
1205  // It may not be safe to reorder shuffles and things like div, urem, etc.
1206  // because we may trap when executing those ops on unknown vector elements.
1207  // See PR20059.
1208  if (!isSafeToSpeculativelyExecute(&Inst, DL)) return nullptr;
1209
1210  unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
1211  Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
1212  assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
1213  assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth);
1214
1215  // If both arguments of binary operation are shuffles, which use the same
1216  // mask and shuffle within a single vector, it is worthwhile to move the
1217  // shuffle after binary operation:
1218  //   Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m)
1219  if (isa<ShuffleVectorInst>(LHS) && isa<ShuffleVectorInst>(RHS)) {
1220    ShuffleVectorInst *LShuf = cast<ShuffleVectorInst>(LHS);
1221    ShuffleVectorInst *RShuf = cast<ShuffleVectorInst>(RHS);
1222    if (isa<UndefValue>(LShuf->getOperand(1)) &&
1223        isa<UndefValue>(RShuf->getOperand(1)) &&
1224        LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType() &&
1225        LShuf->getMask() == RShuf->getMask()) {
1226      Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
1227          RShuf->getOperand(0), Builder);
1228      Value *Res = Builder->CreateShuffleVector(NewBO,
1229          UndefValue::get(NewBO->getType()), LShuf->getMask());
1230      return Res;
1231    }
1232  }
1233
1234  // If one argument is a shuffle within one vector, the other is a constant,
1235  // try moving the shuffle after the binary operation.
1236  ShuffleVectorInst *Shuffle = nullptr;
1237  Constant *C1 = nullptr;
1238  if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS);
1239  if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
1240  if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
1241  if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
1242  if (Shuffle && C1 &&
1243      (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) &&
1244      isa<UndefValue>(Shuffle->getOperand(1)) &&
1245      Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
1246    SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
1247    // Find constant C2 that has property:
1248    //   shuffle(C2, ShMask) = C1
1249    // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>)
1250    // reorder is not possible.
1251    SmallVector<Constant*, 16> C2M(VWidth,
1252                               UndefValue::get(C1->getType()->getScalarType()));
1253    bool MayChange = true;
1254    for (unsigned I = 0; I < VWidth; ++I) {
1255      if (ShMask[I] >= 0) {
1256        assert(ShMask[I] < (int)VWidth);
1257        if (!isa<UndefValue>(C2M[ShMask[I]])) {
1258          MayChange = false;
1259          break;
1260        }
1261        C2M[ShMask[I]] = C1->getAggregateElement(I);
1262      }
1263    }
1264    if (MayChange) {
1265      Constant *C2 = ConstantVector::get(C2M);
1266      Value *NewLHS, *NewRHS;
1267      if (isa<Constant>(LHS)) {
1268        NewLHS = C2;
1269        NewRHS = Shuffle->getOperand(0);
1270      } else {
1271        NewLHS = Shuffle->getOperand(0);
1272        NewRHS = C2;
1273      }
1274      Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
1275      Value *Res = Builder->CreateShuffleVector(NewBO,
1276          UndefValue::get(Inst.getType()), Shuffle->getMask());
1277      return Res;
1278    }
1279  }
1280
1281  return nullptr;
1282}
1283
1284Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
1285  SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
1286
1287  if (Value *V = SimplifyGEPInst(Ops, DL))
1288    return ReplaceInstUsesWith(GEP, V);
1289
1290  Value *PtrOp = GEP.getOperand(0);
1291
1292  // Eliminate unneeded casts for indices, and replace indices which displace
1293  // by multiples of a zero size type with zero.
1294  if (DL) {
1295    bool MadeChange = false;
1296    Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());
1297
1298    gep_type_iterator GTI = gep_type_begin(GEP);
1299    for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
1300         I != E; ++I, ++GTI) {
1301      // Skip indices into struct types.
1302      SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
1303      if (!SeqTy) continue;
1304
1305      // If the element type has zero size then any index over it is equivalent
1306      // to an index of zero, so replace it with zero if it is not zero already.
1307      if (SeqTy->getElementType()->isSized() &&
1308          DL->getTypeAllocSize(SeqTy->getElementType()) == 0)
1309        if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
1310          *I = Constant::getNullValue(IntPtrTy);
1311          MadeChange = true;
1312        }
1313
1314      Type *IndexTy = (*I)->getType();
1315      if (IndexTy != IntPtrTy) {
1316        // If we are using a wider index than needed for this platform, shrink
1317        // it to what we need.  If narrower, sign-extend it to what we need.
1318        // This explicit cast can make subsequent optimizations more obvious.
1319        *I = Builder->CreateIntCast(*I, IntPtrTy, true);
1320        MadeChange = true;
1321      }
1322    }
1323    if (MadeChange) return &GEP;
1324  }
1325
1326  // Check to see if the inputs to the PHI node are getelementptr instructions.
1327  if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
1328    GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
1329    if (!Op1)
1330      return nullptr;
1331
1332    signed DI = -1;
1333
1334    for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
1335      GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I);
1336      if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands())
1337        return nullptr;
1338
1339      // Keep track of the type as we walk the GEP.
1340      Type *CurTy = Op1->getOperand(0)->getType()->getScalarType();
1341
1342      for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
1343        if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
1344          return nullptr;
1345
1346        if (Op1->getOperand(J) != Op2->getOperand(J)) {
1347          if (DI == -1) {
1348            // We have not seen any differences yet in the GEPs feeding the
1349            // PHI yet, so we record this one if it is allowed to be a
1350            // variable.
1351
1352            // The first two arguments can vary for any GEP, the rest have to be
1353            // static for struct slots
1354            if (J > 1 && CurTy->isStructTy())
1355              return nullptr;
1356
1357            DI = J;
1358          } else {
1359            // The GEP is different by more than one input. While this could be
1360            // extended to support GEPs that vary by more than one variable it
1361            // doesn't make sense since it greatly increases the complexity and
1362            // would result in an R+R+R addressing mode which no backend
1363            // directly supports and would need to be broken into several
1364            // simpler instructions anyway.
1365            return nullptr;
1366          }
1367        }
1368
1369        // Sink down a layer of the type for the next iteration.
1370        if (J > 0) {
1371          if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
1372            CurTy = CT->getTypeAtIndex(Op1->getOperand(J));
1373          } else {
1374            CurTy = nullptr;
1375          }
1376        }
1377      }
1378    }
1379
1380    GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());
1381
1382    if (DI == -1) {
1383      // All the GEPs feeding the PHI are identical. Clone one down into our
1384      // BB so that it can be merged with the current GEP.
1385      GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(),
1386                                            NewGEP);
1387    } else {
1388      // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
1389      // into the current block so it can be merged, and create a new PHI to
1390      // set that index.
1391      Instruction *InsertPt = Builder->GetInsertPoint();
1392      Builder->SetInsertPoint(PN);
1393      PHINode *NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(),
1394                                          PN->getNumOperands());
1395      Builder->SetInsertPoint(InsertPt);
1396
1397      for (auto &I : PN->operands())
1398        NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
1399                           PN->getIncomingBlock(I));
1400
1401      NewGEP->setOperand(DI, NewPN);
1402      GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(),
1403                                            NewGEP);
1404      NewGEP->setOperand(DI, NewPN);
1405    }
1406
1407    GEP.setOperand(0, NewGEP);
1408    PtrOp = NewGEP;
1409  }
1410
1411  // Combine Indices - If the source pointer to this getelementptr instruction
1412  // is a getelementptr instruction, combine the indices of the two
1413  // getelementptr instructions into a single instruction.
1414  //
1415  if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
1416    if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
1417      return nullptr;
1418
1419    // Note that if our source is a gep chain itself then we wait for that
1420    // chain to be resolved before we perform this transformation.  This
1421    // avoids us creating a TON of code in some cases.
1422    if (GEPOperator *SrcGEP =
1423          dyn_cast<GEPOperator>(Src->getOperand(0)))
1424      if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
1425        return nullptr;   // Wait until our source is folded to completion.
1426
1427    SmallVector<Value*, 8> Indices;
1428
1429    // Find out whether the last index in the source GEP is a sequential idx.
1430    bool EndsWithSequential = false;
1431    for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
1432         I != E; ++I)
1433      EndsWithSequential = !(*I)->isStructTy();
1434
1435    // Can we combine the two pointer arithmetics offsets?
1436    if (EndsWithSequential) {
1437      // Replace: gep (gep %P, long B), long A, ...
1438      // With:    T = long A+B; gep %P, T, ...
1439      //
1440      Value *Sum;
1441      Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
1442      Value *GO1 = GEP.getOperand(1);
1443      if (SO1 == Constant::getNullValue(SO1->getType())) {
1444        Sum = GO1;
1445      } else if (GO1 == Constant::getNullValue(GO1->getType())) {
1446        Sum = SO1;
1447      } else {
1448        // If they aren't the same type, then the input hasn't been processed
1449        // by the loop above yet (which canonicalizes sequential index types to
1450        // intptr_t).  Just avoid transforming this until the input has been
1451        // normalized.
1452        if (SO1->getType() != GO1->getType())
1453          return nullptr;
1454        Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
1455      }
1456
1457      // Update the GEP in place if possible.
1458      if (Src->getNumOperands() == 2) {
1459        GEP.setOperand(0, Src->getOperand(0));
1460        GEP.setOperand(1, Sum);
1461        return &GEP;
1462      }
1463      Indices.append(Src->op_begin()+1, Src->op_end()-1);
1464      Indices.push_back(Sum);
1465      Indices.append(GEP.op_begin()+2, GEP.op_end());
1466    } else if (isa<Constant>(*GEP.idx_begin()) &&
1467               cast<Constant>(*GEP.idx_begin())->isNullValue() &&
1468               Src->getNumOperands() != 1) {
1469      // Otherwise we can do the fold if the first index of the GEP is a zero
1470      Indices.append(Src->op_begin()+1, Src->op_end());
1471      Indices.append(GEP.idx_begin()+1, GEP.idx_end());
1472    }
1473
1474    if (!Indices.empty())
1475      return (GEP.isInBounds() && Src->isInBounds()) ?
1476        GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices,
1477                                          GEP.getName()) :
1478        GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
1479  }
1480
1481  // Canonicalize (gep i8* X, -(ptrtoint Y)) to (sub (ptrtoint X), (ptrtoint Y))
1482  // The GEP pattern is emitted by the SCEV expander for certain kinds of
1483  // pointer arithmetic.
1484  if (DL && GEP.getNumIndices() == 1 &&
1485      match(GEP.getOperand(1), m_Neg(m_PtrToInt(m_Value())))) {
1486    unsigned AS = GEP.getPointerAddressSpace();
1487    if (GEP.getType() == Builder->getInt8PtrTy(AS) &&
1488        GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
1489        DL->getPointerSizeInBits(AS)) {
1490      Operator *Index = cast<Operator>(GEP.getOperand(1));
1491      Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
1492      Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
1493      return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
1494    }
1495  }
1496
1497  // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
1498  Value *StrippedPtr = PtrOp->stripPointerCasts();
1499  PointerType *StrippedPtrTy = dyn_cast<PointerType>(StrippedPtr->getType());
1500
1501  // We do not handle pointer-vector geps here.
1502  if (!StrippedPtrTy)
1503    return nullptr;
1504
1505  if (StrippedPtr != PtrOp) {
1506    bool HasZeroPointerIndex = false;
1507    if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
1508      HasZeroPointerIndex = C->isZero();
1509
1510    // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
1511    // into     : GEP [10 x i8]* X, i32 0, ...
1512    //
1513    // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
1514    //           into     : GEP i8* X, ...
1515    //
1516    // This occurs when the program declares an array extern like "int X[];"
1517    if (HasZeroPointerIndex) {
1518      PointerType *CPTy = cast<PointerType>(PtrOp->getType());
1519      if (ArrayType *CATy =
1520          dyn_cast<ArrayType>(CPTy->getElementType())) {
1521        // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
1522        if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
1523          // -> GEP i8* X, ...
1524          SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
1525          GetElementPtrInst *Res =
1526            GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName());
1527          Res->setIsInBounds(GEP.isInBounds());
1528          if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
1529            return Res;
1530          // Insert Res, and create an addrspacecast.
1531          // e.g.,
1532          // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
1533          // ->
1534          // %0 = GEP i8 addrspace(1)* X, ...
1535          // addrspacecast i8 addrspace(1)* %0 to i8*
1536          return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType());
1537        }
1538
1539        if (ArrayType *XATy =
1540              dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
1541          // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
1542          if (CATy->getElementType() == XATy->getElementType()) {
1543            // -> GEP [10 x i8]* X, i32 0, ...
1544            // At this point, we know that the cast source type is a pointer
1545            // to an array of the same type as the destination pointer
1546            // array.  Because the array type is never stepped over (there
1547            // is a leading zero) we can fold the cast into this GEP.
1548            if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
1549              GEP.setOperand(0, StrippedPtr);
1550              return &GEP;
1551            }
1552            // Cannot replace the base pointer directly because StrippedPtr's
1553            // address space is different. Instead, create a new GEP followed by
1554            // an addrspacecast.
1555            // e.g.,
1556            // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
1557            //   i32 0, ...
1558            // ->
1559            // %0 = GEP [10 x i8] addrspace(1)* X, ...
1560            // addrspacecast i8 addrspace(1)* %0 to i8*
1561            SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
1562            Value *NewGEP = GEP.isInBounds() ?
1563              Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
1564              Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
1565            return new AddrSpaceCastInst(NewGEP, GEP.getType());
1566          }
1567        }
1568      }
1569    } else if (GEP.getNumOperands() == 2) {
1570      // Transform things like:
1571      // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
1572      // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
1573      Type *SrcElTy = StrippedPtrTy->getElementType();
1574      Type *ResElTy = PtrOp->getType()->getPointerElementType();
1575      if (DL && SrcElTy->isArrayTy() &&
1576          DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
1577          DL->getTypeAllocSize(ResElTy)) {
1578        Type *IdxType = DL->getIntPtrType(GEP.getType());
1579        Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
1580        Value *NewGEP = GEP.isInBounds() ?
1581          Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
1582          Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
1583
1584        // V and GEP are both pointer types --> BitCast
1585        if (StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace())
1586          return new BitCastInst(NewGEP, GEP.getType());
1587        return new AddrSpaceCastInst(NewGEP, GEP.getType());
1588      }
1589
1590      // Transform things like:
1591      // %V = mul i64 %N, 4
1592      // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
1593      // into:  %t1 = getelementptr i32* %arr, i32 %N; bitcast
1594      if (DL && ResElTy->isSized() && SrcElTy->isSized()) {
1595        // Check that changing the type amounts to dividing the index by a scale
1596        // factor.
1597        uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
1598        uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);
1599        if (ResSize && SrcSize % ResSize == 0) {
1600          Value *Idx = GEP.getOperand(1);
1601          unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
1602          uint64_t Scale = SrcSize / ResSize;
1603
1604          // Earlier transforms ensure that the index has type IntPtrType, which
1605          // considerably simplifies the logic by eliminating implicit casts.
1606          assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
1607                 "Index not cast to pointer width?");
1608
1609          bool NSW;
1610          if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
1611            // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1612            // If the multiplication NewIdx * Scale may overflow then the new
1613            // GEP may not be "inbounds".
1614            Value *NewGEP = GEP.isInBounds() && NSW ?
1615              Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) :
1616              Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName());
1617
1618            // The NewGEP must be pointer typed, so must the old one -> BitCast
1619            if (StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace())
1620              return new BitCastInst(NewGEP, GEP.getType());
1621            return new AddrSpaceCastInst(NewGEP, GEP.getType());
1622          }
1623        }
1624      }
1625
1626      // Similarly, transform things like:
1627      // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
1628      //   (where tmp = 8*tmp2) into:
1629      // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
1630      if (DL && ResElTy->isSized() && SrcElTy->isSized() &&
1631          SrcElTy->isArrayTy()) {
1632        // Check that changing to the array element type amounts to dividing the
1633        // index by a scale factor.
1634        uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
1635        uint64_t ArrayEltSize
1636          = DL->getTypeAllocSize(SrcElTy->getArrayElementType());
1637        if (ResSize && ArrayEltSize % ResSize == 0) {
1638          Value *Idx = GEP.getOperand(1);
1639          unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
1640          uint64_t Scale = ArrayEltSize / ResSize;
1641
1642          // Earlier transforms ensure that the index has type IntPtrType, which
1643          // considerably simplifies the logic by eliminating implicit casts.
1644          assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
1645                 "Index not cast to pointer width?");
1646
1647          bool NSW;
1648          if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
1649            // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1650            // If the multiplication NewIdx * Scale may overflow then the new
1651            // GEP may not be "inbounds".
1652            Value *Off[2] = {
1653              Constant::getNullValue(DL->getIntPtrType(GEP.getType())),
1654              NewIdx
1655            };
1656
1657            Value *NewGEP = GEP.isInBounds() && NSW ?
1658              Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) :
1659              Builder->CreateGEP(StrippedPtr, Off, GEP.getName());
1660            // The NewGEP must be pointer typed, so must the old one -> BitCast
1661            if (StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace())
1662              return new BitCastInst(NewGEP, GEP.getType());
1663            return new AddrSpaceCastInst(NewGEP, GEP.getType());
1664          }
1665        }
1666      }
1667    }
1668  }
1669
1670  if (!DL)
1671    return nullptr;
1672
1673  /// See if we can simplify:
1674  ///   X = bitcast A* to B*
1675  ///   Y = gep X, <...constant indices...>
1676  /// into a gep of the original struct.  This is important for SROA and alias
1677  /// analysis of unions.  If "A" is also a bitcast, wait for A/X to be merged.
1678  if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
1679    Value *Operand = BCI->getOperand(0);
1680    PointerType *OpType = cast<PointerType>(Operand->getType());
1681    unsigned OffsetBits = DL->getPointerTypeSizeInBits(OpType);
1682    APInt Offset(OffsetBits, 0);
1683    if (!isa<BitCastInst>(Operand) &&
1684        GEP.accumulateConstantOffset(*DL, Offset) &&
1685        StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
1686
1687      // If this GEP instruction doesn't move the pointer, just replace the GEP
1688      // with a bitcast of the real input to the dest type.
1689      if (!Offset) {
1690        // If the bitcast is of an allocation, and the allocation will be
1691        // converted to match the type of the cast, don't touch this.
1692        if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, TLI)) {
1693          // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1694          if (Instruction *I = visitBitCast(*BCI)) {
1695            if (I != BCI) {
1696              I->takeName(BCI);
1697              BCI->getParent()->getInstList().insert(BCI, I);
1698              ReplaceInstUsesWith(*BCI, I);
1699            }
1700            return &GEP;
1701          }
1702        }
1703        return new BitCastInst(Operand, GEP.getType());
1704      }
1705
1706      // Otherwise, if the offset is non-zero, we need to find out if there is a
1707      // field at Offset in 'A's type.  If so, we can pull the cast through the
1708      // GEP.
1709      SmallVector<Value*, 8> NewIndices;
1710      if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
1711        Value *NGEP = GEP.isInBounds() ?
1712          Builder->CreateInBoundsGEP(Operand, NewIndices) :
1713          Builder->CreateGEP(Operand, NewIndices);
1714
1715        if (NGEP->getType() == GEP.getType())
1716          return ReplaceInstUsesWith(GEP, NGEP);
1717        NGEP->takeName(&GEP);
1718        return new BitCastInst(NGEP, GEP.getType());
1719      }
1720    }
1721  }
1722
1723  return nullptr;
1724}
1725
1726static bool
1727isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
1728                     const TargetLibraryInfo *TLI) {
1729  SmallVector<Instruction*, 4> Worklist;
1730  Worklist.push_back(AI);
1731
1732  do {
1733    Instruction *PI = Worklist.pop_back_val();
1734    for (User *U : PI->users()) {
1735      Instruction *I = cast<Instruction>(U);
1736      switch (I->getOpcode()) {
1737      default:
1738        // Give up the moment we see something we can't handle.
1739        return false;
1740
1741      case Instruction::BitCast:
1742      case Instruction::GetElementPtr:
1743        Users.push_back(I);
1744        Worklist.push_back(I);
1745        continue;
1746
1747      case Instruction::ICmp: {
1748        ICmpInst *ICI = cast<ICmpInst>(I);
1749        // We can fold eq/ne comparisons with null to false/true, respectively.
1750        if (!ICI->isEquality() || !isa<ConstantPointerNull>(ICI->getOperand(1)))
1751          return false;
1752        Users.push_back(I);
1753        continue;
1754      }
1755
1756      case Instruction::Call:
1757        // Ignore no-op and store intrinsics.
1758        if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1759          switch (II->getIntrinsicID()) {
1760          default:
1761            return false;
1762
1763          case Intrinsic::memmove:
1764          case Intrinsic::memcpy:
1765          case Intrinsic::memset: {
1766            MemIntrinsic *MI = cast<MemIntrinsic>(II);
1767            if (MI->isVolatile() || MI->getRawDest() != PI)
1768              return false;
1769          }
1770          // fall through
1771          case Intrinsic::dbg_declare:
1772          case Intrinsic::dbg_value:
1773          case Intrinsic::invariant_start:
1774          case Intrinsic::invariant_end:
1775          case Intrinsic::lifetime_start:
1776          case Intrinsic::lifetime_end:
1777          case Intrinsic::objectsize:
1778            Users.push_back(I);
1779            continue;
1780          }
1781        }
1782
1783        if (isFreeCall(I, TLI)) {
1784          Users.push_back(I);
1785          continue;
1786        }
1787        return false;
1788
1789      case Instruction::Store: {
1790        StoreInst *SI = cast<StoreInst>(I);
1791        if (SI->isVolatile() || SI->getPointerOperand() != PI)
1792          return false;
1793        Users.push_back(I);
1794        continue;
1795      }
1796      }
1797      llvm_unreachable("missing a return?");
1798    }
1799  } while (!Worklist.empty());
1800  return true;
1801}
1802
1803Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
1804  // If we have a malloc call which is only used in any amount of comparisons
1805  // to null and free calls, delete the calls and replace the comparisons with
1806  // true or false as appropriate.
1807  SmallVector<WeakVH, 64> Users;
1808  if (isAllocSiteRemovable(&MI, Users, TLI)) {
1809    for (unsigned i = 0, e = Users.size(); i != e; ++i) {
1810      Instruction *I = cast_or_null<Instruction>(&*Users[i]);
1811      if (!I) continue;
1812
1813      if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
1814        ReplaceInstUsesWith(*C,
1815                            ConstantInt::get(Type::getInt1Ty(C->getContext()),
1816                                             C->isFalseWhenEqual()));
1817      } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
1818        ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
1819      } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1820        if (II->getIntrinsicID() == Intrinsic::objectsize) {
1821          ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1822          uint64_t DontKnow = CI->isZero() ? -1ULL : 0;
1823          ReplaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow));
1824        }
1825      }
1826      EraseInstFromFunction(*I);
1827    }
1828
1829    if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
1830      // Replace invoke with a NOP intrinsic to maintain the original CFG
1831      Module *M = II->getParent()->getParent()->getParent();
1832      Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
1833      InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
1834                         None, "", II->getParent());
1835    }
1836    return EraseInstFromFunction(MI);
1837  }
1838  return nullptr;
1839}
1840
1841/// \brief Move the call to free before a NULL test.
1842///
1843/// Check if this free is accessed after its argument has been test
1844/// against NULL (property 0).
1845/// If yes, it is legal to move this call in its predecessor block.
1846///
1847/// The move is performed only if the block containing the call to free
1848/// will be removed, i.e.:
1849/// 1. it has only one predecessor P, and P has two successors
1850/// 2. it contains the call and an unconditional branch
1851/// 3. its successor is the same as its predecessor's successor
1852///
1853/// The profitability is out-of concern here and this function should
1854/// be called only if the caller knows this transformation would be
1855/// profitable (e.g., for code size).
1856static Instruction *
1857tryToMoveFreeBeforeNullTest(CallInst &FI) {
1858  Value *Op = FI.getArgOperand(0);
1859  BasicBlock *FreeInstrBB = FI.getParent();
1860  BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
1861
1862  // Validate part of constraint #1: Only one predecessor
1863  // FIXME: We can extend the number of predecessor, but in that case, we
1864  //        would duplicate the call to free in each predecessor and it may
1865  //        not be profitable even for code size.
1866  if (!PredBB)
1867    return nullptr;
1868
1869  // Validate constraint #2: Does this block contains only the call to
1870  //                         free and an unconditional branch?
1871  // FIXME: We could check if we can speculate everything in the
1872  //        predecessor block
1873  if (FreeInstrBB->size() != 2)
1874    return nullptr;
1875  BasicBlock *SuccBB;
1876  if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
1877    return nullptr;
1878
1879  // Validate the rest of constraint #1 by matching on the pred branch.
1880  TerminatorInst *TI = PredBB->getTerminator();
1881  BasicBlock *TrueBB, *FalseBB;
1882  ICmpInst::Predicate Pred;
1883  if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
1884    return nullptr;
1885  if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
1886    return nullptr;
1887
1888  // Validate constraint #3: Ensure the null case just falls through.
1889  if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
1890    return nullptr;
1891  assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
1892         "Broken CFG: missing edge from predecessor to successor");
1893
1894  FI.moveBefore(TI);
1895  return &FI;
1896}
1897
1898
1899Instruction *InstCombiner::visitFree(CallInst &FI) {
1900  Value *Op = FI.getArgOperand(0);
1901
1902  // free undef -> unreachable.
1903  if (isa<UndefValue>(Op)) {
1904    // Insert a new store to null because we cannot modify the CFG here.
1905    Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
1906                         UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
1907    return EraseInstFromFunction(FI);
1908  }
1909
1910  // If we have 'free null' delete the instruction.  This can happen in stl code
1911  // when lots of inlining happens.
1912  if (isa<ConstantPointerNull>(Op))
1913    return EraseInstFromFunction(FI);
1914
1915  // If we optimize for code size, try to move the call to free before the null
1916  // test so that simplify cfg can remove the empty block and dead code
1917  // elimination the branch. I.e., helps to turn something like:
1918  // if (foo) free(foo);
1919  // into
1920  // free(foo);
1921  if (MinimizeSize)
1922    if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
1923      return I;
1924
1925  return nullptr;
1926}
1927
1928
1929
1930Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
1931  // Change br (not X), label True, label False to: br X, label False, True
1932  Value *X = nullptr;
1933  BasicBlock *TrueDest;
1934  BasicBlock *FalseDest;
1935  if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
1936      !isa<Constant>(X)) {
1937    // Swap Destinations and condition...
1938    BI.setCondition(X);
1939    BI.swapSuccessors();
1940    return &BI;
1941  }
1942
1943  // Canonicalize fcmp_one -> fcmp_oeq
1944  FCmpInst::Predicate FPred; Value *Y;
1945  if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
1946                             TrueDest, FalseDest)) &&
1947      BI.getCondition()->hasOneUse())
1948    if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
1949        FPred == FCmpInst::FCMP_OGE) {
1950      FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
1951      Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
1952
1953      // Swap Destinations and condition.
1954      BI.swapSuccessors();
1955      Worklist.Add(Cond);
1956      return &BI;
1957    }
1958
1959  // Canonicalize icmp_ne -> icmp_eq
1960  ICmpInst::Predicate IPred;
1961  if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
1962                      TrueDest, FalseDest)) &&
1963      BI.getCondition()->hasOneUse())
1964    if (IPred == ICmpInst::ICMP_NE  || IPred == ICmpInst::ICMP_ULE ||
1965        IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
1966        IPred == ICmpInst::ICMP_SGE) {
1967      ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
1968      Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
1969      // Swap Destinations and condition.
1970      BI.swapSuccessors();
1971      Worklist.Add(Cond);
1972      return &BI;
1973    }
1974
1975  return nullptr;
1976}
1977
1978Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
1979  Value *Cond = SI.getCondition();
1980  if (Instruction *I = dyn_cast<Instruction>(Cond)) {
1981    if (I->getOpcode() == Instruction::Add)
1982      if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1983        // change 'switch (X+4) case 1:' into 'switch (X) case -3'
1984        // Skip the first item since that's the default case.
1985        for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
1986             i != e; ++i) {
1987          ConstantInt* CaseVal = i.getCaseValue();
1988          Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
1989                                                      AddRHS);
1990          assert(isa<ConstantInt>(NewCaseVal) &&
1991                 "Result of expression should be constant");
1992          i.setValue(cast<ConstantInt>(NewCaseVal));
1993        }
1994        SI.setCondition(I->getOperand(0));
1995        Worklist.Add(I);
1996        return &SI;
1997      }
1998  }
1999  return nullptr;
2000}
2001
2002Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
2003  Value *Agg = EV.getAggregateOperand();
2004
2005  if (!EV.hasIndices())
2006    return ReplaceInstUsesWith(EV, Agg);
2007
2008  if (Constant *C = dyn_cast<Constant>(Agg)) {
2009    if (Constant *C2 = C->getAggregateElement(*EV.idx_begin())) {
2010      if (EV.getNumIndices() == 0)
2011        return ReplaceInstUsesWith(EV, C2);
2012      // Extract the remaining indices out of the constant indexed by the
2013      // first index
2014      return ExtractValueInst::Create(C2, EV.getIndices().slice(1));
2015    }
2016    return nullptr; // Can't handle other constants
2017  }
2018
2019  if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
2020    // We're extracting from an insertvalue instruction, compare the indices
2021    const unsigned *exti, *exte, *insi, *inse;
2022    for (exti = EV.idx_begin(), insi = IV->idx_begin(),
2023         exte = EV.idx_end(), inse = IV->idx_end();
2024         exti != exte && insi != inse;
2025         ++exti, ++insi) {
2026      if (*insi != *exti)
2027        // The insert and extract both reference distinctly different elements.
2028        // This means the extract is not influenced by the insert, and we can
2029        // replace the aggregate operand of the extract with the aggregate
2030        // operand of the insert. i.e., replace
2031        // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
2032        // %E = extractvalue { i32, { i32 } } %I, 0
2033        // with
2034        // %E = extractvalue { i32, { i32 } } %A, 0
2035        return ExtractValueInst::Create(IV->getAggregateOperand(),
2036                                        EV.getIndices());
2037    }
2038    if (exti == exte && insi == inse)
2039      // Both iterators are at the end: Index lists are identical. Replace
2040      // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
2041      // %C = extractvalue { i32, { i32 } } %B, 1, 0
2042      // with "i32 42"
2043      return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
2044    if (exti == exte) {
2045      // The extract list is a prefix of the insert list. i.e. replace
2046      // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
2047      // %E = extractvalue { i32, { i32 } } %I, 1
2048      // with
2049      // %X = extractvalue { i32, { i32 } } %A, 1
2050      // %E = insertvalue { i32 } %X, i32 42, 0
2051      // by switching the order of the insert and extract (though the
2052      // insertvalue should be left in, since it may have other uses).
2053      Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
2054                                                 EV.getIndices());
2055      return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
2056                                     makeArrayRef(insi, inse));
2057    }
2058    if (insi == inse)
2059      // The insert list is a prefix of the extract list
2060      // We can simply remove the common indices from the extract and make it
2061      // operate on the inserted value instead of the insertvalue result.
2062      // i.e., replace
2063      // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
2064      // %E = extractvalue { i32, { i32 } } %I, 1, 0
2065      // with
2066      // %E extractvalue { i32 } { i32 42 }, 0
2067      return ExtractValueInst::Create(IV->getInsertedValueOperand(),
2068                                      makeArrayRef(exti, exte));
2069  }
2070  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
2071    // We're extracting from an intrinsic, see if we're the only user, which
2072    // allows us to simplify multiple result intrinsics to simpler things that
2073    // just get one value.
2074    if (II->hasOneUse()) {
2075      // Check if we're grabbing the overflow bit or the result of a 'with
2076      // overflow' intrinsic.  If it's the latter we can remove the intrinsic
2077      // and replace it with a traditional binary instruction.
2078      switch (II->getIntrinsicID()) {
2079      case Intrinsic::uadd_with_overflow:
2080      case Intrinsic::sadd_with_overflow:
2081        if (*EV.idx_begin() == 0) {  // Normal result.
2082          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
2083          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
2084          EraseInstFromFunction(*II);
2085          return BinaryOperator::CreateAdd(LHS, RHS);
2086        }
2087
2088        // If the normal result of the add is dead, and the RHS is a constant,
2089        // we can transform this into a range comparison.
2090        // overflow = uadd a, -4  -->  overflow = icmp ugt a, 3
2091        if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
2092          if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
2093            return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
2094                                ConstantExpr::getNot(CI));
2095        break;
2096      case Intrinsic::usub_with_overflow:
2097      case Intrinsic::ssub_with_overflow:
2098        if (*EV.idx_begin() == 0) {  // Normal result.
2099          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
2100          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
2101          EraseInstFromFunction(*II);
2102          return BinaryOperator::CreateSub(LHS, RHS);
2103        }
2104        break;
2105      case Intrinsic::umul_with_overflow:
2106      case Intrinsic::smul_with_overflow:
2107        if (*EV.idx_begin() == 0) {  // Normal result.
2108          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
2109          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
2110          EraseInstFromFunction(*II);
2111          return BinaryOperator::CreateMul(LHS, RHS);
2112        }
2113        break;
2114      default:
2115        break;
2116      }
2117    }
2118  }
2119  if (LoadInst *L = dyn_cast<LoadInst>(Agg))
2120    // If the (non-volatile) load only has one use, we can rewrite this to a
2121    // load from a GEP. This reduces the size of the load.
2122    // FIXME: If a load is used only by extractvalue instructions then this
2123    //        could be done regardless of having multiple uses.
2124    if (L->isSimple() && L->hasOneUse()) {
2125      // extractvalue has integer indices, getelementptr has Value*s. Convert.
2126      SmallVector<Value*, 4> Indices;
2127      // Prefix an i32 0 since we need the first element.
2128      Indices.push_back(Builder->getInt32(0));
2129      for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
2130            I != E; ++I)
2131        Indices.push_back(Builder->getInt32(*I));
2132
2133      // We need to insert these at the location of the old load, not at that of
2134      // the extractvalue.
2135      Builder->SetInsertPoint(L->getParent(), L);
2136      Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices);
2137      // Returning the load directly will cause the main loop to insert it in
2138      // the wrong spot, so use ReplaceInstUsesWith().
2139      return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
2140    }
2141  // We could simplify extracts from other values. Note that nested extracts may
2142  // already be simplified implicitly by the above: extract (extract (insert) )
2143  // will be translated into extract ( insert ( extract ) ) first and then just
2144  // the value inserted, if appropriate. Similarly for extracts from single-use
2145  // loads: extract (extract (load)) will be translated to extract (load (gep))
2146  // and if again single-use then via load (gep (gep)) to load (gep).
2147  // However, double extracts from e.g. function arguments or return values
2148  // aren't handled yet.
2149  return nullptr;
2150}
2151
2152enum Personality_Type {
2153  Unknown_Personality,
2154  GNU_Ada_Personality,
2155  GNU_CXX_Personality,
2156  GNU_ObjC_Personality
2157};
2158
2159/// RecognizePersonality - See if the given exception handling personality
2160/// function is one that we understand.  If so, return a description of it;
2161/// otherwise return Unknown_Personality.
2162static Personality_Type RecognizePersonality(Value *Pers) {
2163  Function *F = dyn_cast<Function>(Pers->stripPointerCasts());
2164  if (!F)
2165    return Unknown_Personality;
2166  return StringSwitch<Personality_Type>(F->getName())
2167    .Case("__gnat_eh_personality", GNU_Ada_Personality)
2168    .Case("__gxx_personality_v0",  GNU_CXX_Personality)
2169    .Case("__objc_personality_v0", GNU_ObjC_Personality)
2170    .Default(Unknown_Personality);
2171}
2172
2173/// isCatchAll - Return 'true' if the given typeinfo will match anything.
2174static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) {
2175  switch (Personality) {
2176  case Unknown_Personality:
2177    return false;
2178  case GNU_Ada_Personality:
2179    // While __gnat_all_others_value will match any Ada exception, it doesn't
2180    // match foreign exceptions (or didn't, before gcc-4.7).
2181    return false;
2182  case GNU_CXX_Personality:
2183  case GNU_ObjC_Personality:
2184    return TypeInfo->isNullValue();
2185  }
2186  llvm_unreachable("Unknown personality!");
2187}
2188
2189static bool shorter_filter(const Value *LHS, const Value *RHS) {
2190  return
2191    cast<ArrayType>(LHS->getType())->getNumElements()
2192  <
2193    cast<ArrayType>(RHS->getType())->getNumElements();
2194}
2195
2196Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
2197  // The logic here should be correct for any real-world personality function.
2198  // However if that turns out not to be true, the offending logic can always
2199  // be conditioned on the personality function, like the catch-all logic is.
2200  Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn());
2201
2202  // Simplify the list of clauses, eg by removing repeated catch clauses
2203  // (these are often created by inlining).
2204  bool MakeNewInstruction = false; // If true, recreate using the following:
2205  SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
2206  bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
2207
2208  SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
2209  for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
2210    bool isLastClause = i + 1 == e;
2211    if (LI.isCatch(i)) {
2212      // A catch clause.
2213      Constant *CatchClause = LI.getClause(i);
2214      Constant *TypeInfo = CatchClause->stripPointerCasts();
2215
2216      // If we already saw this clause, there is no point in having a second
2217      // copy of it.
2218      if (AlreadyCaught.insert(TypeInfo)) {
2219        // This catch clause was not already seen.
2220        NewClauses.push_back(CatchClause);
2221      } else {
2222        // Repeated catch clause - drop the redundant copy.
2223        MakeNewInstruction = true;
2224      }
2225
2226      // If this is a catch-all then there is no point in keeping any following
2227      // clauses or marking the landingpad as having a cleanup.
2228      if (isCatchAll(Personality, TypeInfo)) {
2229        if (!isLastClause)
2230          MakeNewInstruction = true;
2231        CleanupFlag = false;
2232        break;
2233      }
2234    } else {
2235      // A filter clause.  If any of the filter elements were already caught
2236      // then they can be dropped from the filter.  It is tempting to try to
2237      // exploit the filter further by saying that any typeinfo that does not
2238      // occur in the filter can't be caught later (and thus can be dropped).
2239      // However this would be wrong, since typeinfos can match without being
2240      // equal (for example if one represents a C++ class, and the other some
2241      // class derived from it).
2242      assert(LI.isFilter(i) && "Unsupported landingpad clause!");
2243      Constant *FilterClause = LI.getClause(i);
2244      ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
2245      unsigned NumTypeInfos = FilterType->getNumElements();
2246
2247      // An empty filter catches everything, so there is no point in keeping any
2248      // following clauses or marking the landingpad as having a cleanup.  By
2249      // dealing with this case here the following code is made a bit simpler.
2250      if (!NumTypeInfos) {
2251        NewClauses.push_back(FilterClause);
2252        if (!isLastClause)
2253          MakeNewInstruction = true;
2254        CleanupFlag = false;
2255        break;
2256      }
2257
2258      bool MakeNewFilter = false; // If true, make a new filter.
2259      SmallVector<Constant *, 16> NewFilterElts; // New elements.
2260      if (isa<ConstantAggregateZero>(FilterClause)) {
2261        // Not an empty filter - it contains at least one null typeinfo.
2262        assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
2263        Constant *TypeInfo =
2264          Constant::getNullValue(FilterType->getElementType());
2265        // If this typeinfo is a catch-all then the filter can never match.
2266        if (isCatchAll(Personality, TypeInfo)) {
2267          // Throw the filter away.
2268          MakeNewInstruction = true;
2269          continue;
2270        }
2271
2272        // There is no point in having multiple copies of this typeinfo, so
2273        // discard all but the first copy if there is more than one.
2274        NewFilterElts.push_back(TypeInfo);
2275        if (NumTypeInfos > 1)
2276          MakeNewFilter = true;
2277      } else {
2278        ConstantArray *Filter = cast<ConstantArray>(FilterClause);
2279        SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
2280        NewFilterElts.reserve(NumTypeInfos);
2281
2282        // Remove any filter elements that were already caught or that already
2283        // occurred in the filter.  While there, see if any of the elements are
2284        // catch-alls.  If so, the filter can be discarded.
2285        bool SawCatchAll = false;
2286        for (unsigned j = 0; j != NumTypeInfos; ++j) {
2287          Constant *Elt = Filter->getOperand(j);
2288          Constant *TypeInfo = Elt->stripPointerCasts();
2289          if (isCatchAll(Personality, TypeInfo)) {
2290            // This element is a catch-all.  Bail out, noting this fact.
2291            SawCatchAll = true;
2292            break;
2293          }
2294          if (AlreadyCaught.count(TypeInfo))
2295            // Already caught by an earlier clause, so having it in the filter
2296            // is pointless.
2297            continue;
2298          // There is no point in having multiple copies of the same typeinfo in
2299          // a filter, so only add it if we didn't already.
2300          if (SeenInFilter.insert(TypeInfo))
2301            NewFilterElts.push_back(cast<Constant>(Elt));
2302        }
2303        // A filter containing a catch-all cannot match anything by definition.
2304        if (SawCatchAll) {
2305          // Throw the filter away.
2306          MakeNewInstruction = true;
2307          continue;
2308        }
2309
2310        // If we dropped something from the filter, make a new one.
2311        if (NewFilterElts.size() < NumTypeInfos)
2312          MakeNewFilter = true;
2313      }
2314      if (MakeNewFilter) {
2315        FilterType = ArrayType::get(FilterType->getElementType(),
2316                                    NewFilterElts.size());
2317        FilterClause = ConstantArray::get(FilterType, NewFilterElts);
2318        MakeNewInstruction = true;
2319      }
2320
2321      NewClauses.push_back(FilterClause);
2322
2323      // If the new filter is empty then it will catch everything so there is
2324      // no point in keeping any following clauses or marking the landingpad
2325      // as having a cleanup.  The case of the original filter being empty was
2326      // already handled above.
2327      if (MakeNewFilter && !NewFilterElts.size()) {
2328        assert(MakeNewInstruction && "New filter but not a new instruction!");
2329        CleanupFlag = false;
2330        break;
2331      }
2332    }
2333  }
2334
2335  // If several filters occur in a row then reorder them so that the shortest
2336  // filters come first (those with the smallest number of elements).  This is
2337  // advantageous because shorter filters are more likely to match, speeding up
2338  // unwinding, but mostly because it increases the effectiveness of the other
2339  // filter optimizations below.
2340  for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
2341    unsigned j;
2342    // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
2343    for (j = i; j != e; ++j)
2344      if (!isa<ArrayType>(NewClauses[j]->getType()))
2345        break;
2346
2347    // Check whether the filters are already sorted by length.  We need to know
2348    // if sorting them is actually going to do anything so that we only make a
2349    // new landingpad instruction if it does.
2350    for (unsigned k = i; k + 1 < j; ++k)
2351      if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
2352        // Not sorted, so sort the filters now.  Doing an unstable sort would be
2353        // correct too but reordering filters pointlessly might confuse users.
2354        std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
2355                         shorter_filter);
2356        MakeNewInstruction = true;
2357        break;
2358      }
2359
2360    // Look for the next batch of filters.
2361    i = j + 1;
2362  }
2363
2364  // If typeinfos matched if and only if equal, then the elements of a filter L
2365  // that occurs later than a filter F could be replaced by the intersection of
2366  // the elements of F and L.  In reality two typeinfos can match without being
2367  // equal (for example if one represents a C++ class, and the other some class
2368  // derived from it) so it would be wrong to perform this transform in general.
2369  // However the transform is correct and useful if F is a subset of L.  In that
2370  // case L can be replaced by F, and thus removed altogether since repeating a
2371  // filter is pointless.  So here we look at all pairs of filters F and L where
2372  // L follows F in the list of clauses, and remove L if every element of F is
2373  // an element of L.  This can occur when inlining C++ functions with exception
2374  // specifications.
2375  for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
2376    // Examine each filter in turn.
2377    Value *Filter = NewClauses[i];
2378    ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
2379    if (!FTy)
2380      // Not a filter - skip it.
2381      continue;
2382    unsigned FElts = FTy->getNumElements();
2383    // Examine each filter following this one.  Doing this backwards means that
2384    // we don't have to worry about filters disappearing under us when removed.
2385    for (unsigned j = NewClauses.size() - 1; j != i; --j) {
2386      Value *LFilter = NewClauses[j];
2387      ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
2388      if (!LTy)
2389        // Not a filter - skip it.
2390        continue;
2391      // If Filter is a subset of LFilter, i.e. every element of Filter is also
2392      // an element of LFilter, then discard LFilter.
2393      SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
2394      // If Filter is empty then it is a subset of LFilter.
2395      if (!FElts) {
2396        // Discard LFilter.
2397        NewClauses.erase(J);
2398        MakeNewInstruction = true;
2399        // Move on to the next filter.
2400        continue;
2401      }
2402      unsigned LElts = LTy->getNumElements();
2403      // If Filter is longer than LFilter then it cannot be a subset of it.
2404      if (FElts > LElts)
2405        // Move on to the next filter.
2406        continue;
2407      // At this point we know that LFilter has at least one element.
2408      if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
2409        // Filter is a subset of LFilter iff Filter contains only zeros (as we
2410        // already know that Filter is not longer than LFilter).
2411        if (isa<ConstantAggregateZero>(Filter)) {
2412          assert(FElts <= LElts && "Should have handled this case earlier!");
2413          // Discard LFilter.
2414          NewClauses.erase(J);
2415          MakeNewInstruction = true;
2416        }
2417        // Move on to the next filter.
2418        continue;
2419      }
2420      ConstantArray *LArray = cast<ConstantArray>(LFilter);
2421      if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
2422        // Since Filter is non-empty and contains only zeros, it is a subset of
2423        // LFilter iff LFilter contains a zero.
2424        assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
2425        for (unsigned l = 0; l != LElts; ++l)
2426          if (LArray->getOperand(l)->isNullValue()) {
2427            // LFilter contains a zero - discard it.
2428            NewClauses.erase(J);
2429            MakeNewInstruction = true;
2430            break;
2431          }
2432        // Move on to the next filter.
2433        continue;
2434      }
2435      // At this point we know that both filters are ConstantArrays.  Loop over
2436      // operands to see whether every element of Filter is also an element of
2437      // LFilter.  Since filters tend to be short this is probably faster than
2438      // using a method that scales nicely.
2439      ConstantArray *FArray = cast<ConstantArray>(Filter);
2440      bool AllFound = true;
2441      for (unsigned f = 0; f != FElts; ++f) {
2442        Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
2443        AllFound = false;
2444        for (unsigned l = 0; l != LElts; ++l) {
2445          Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
2446          if (LTypeInfo == FTypeInfo) {
2447            AllFound = true;
2448            break;
2449          }
2450        }
2451        if (!AllFound)
2452          break;
2453      }
2454      if (AllFound) {
2455        // Discard LFilter.
2456        NewClauses.erase(J);
2457        MakeNewInstruction = true;
2458      }
2459      // Move on to the next filter.
2460    }
2461  }
2462
2463  // If we changed any of the clauses, replace the old landingpad instruction
2464  // with a new one.
2465  if (MakeNewInstruction) {
2466    LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
2467                                                 LI.getPersonalityFn(),
2468                                                 NewClauses.size());
2469    for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
2470      NLI->addClause(NewClauses[i]);
2471    // A landing pad with no clauses must have the cleanup flag set.  It is
2472    // theoretically possible, though highly unlikely, that we eliminated all
2473    // clauses.  If so, force the cleanup flag to true.
2474    if (NewClauses.empty())
2475      CleanupFlag = true;
2476    NLI->setCleanup(CleanupFlag);
2477    return NLI;
2478  }
2479
2480  // Even if none of the clauses changed, we may nonetheless have understood
2481  // that the cleanup flag is pointless.  Clear it if so.
2482  if (LI.isCleanup() != CleanupFlag) {
2483    assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
2484    LI.setCleanup(CleanupFlag);
2485    return &LI;
2486  }
2487
2488  return nullptr;
2489}
2490
2491
2492
2493
2494/// TryToSinkInstruction - Try to move the specified instruction from its
2495/// current block into the beginning of DestBlock, which can only happen if it's
2496/// safe to move the instruction past all of the instructions between it and the
2497/// end of its block.
2498static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
2499  assert(I->hasOneUse() && "Invariants didn't hold!");
2500
2501  // Cannot move control-flow-involving, volatile loads, vaarg, etc.
2502  if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() ||
2503      isa<TerminatorInst>(I))
2504    return false;
2505
2506  // Do not sink alloca instructions out of the entry block.
2507  if (isa<AllocaInst>(I) && I->getParent() ==
2508        &DestBlock->getParent()->getEntryBlock())
2509    return false;
2510
2511  // We can only sink load instructions if there is nothing between the load and
2512  // the end of block that could change the value.
2513  if (I->mayReadFromMemory()) {
2514    for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
2515         Scan != E; ++Scan)
2516      if (Scan->mayWriteToMemory())
2517        return false;
2518  }
2519
2520  BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
2521  I->moveBefore(InsertPos);
2522  ++NumSunkInst;
2523  return true;
2524}
2525
2526
2527/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
2528/// all reachable code to the worklist.
2529///
2530/// This has a couple of tricks to make the code faster and more powerful.  In
2531/// particular, we constant fold and DCE instructions as we go, to avoid adding
2532/// them to the worklist (this significantly speeds up instcombine on code where
2533/// many instructions are dead or constant).  Additionally, if we find a branch
2534/// whose condition is a known constant, we only visit the reachable successors.
2535///
2536static bool AddReachableCodeToWorklist(BasicBlock *BB,
2537                                       SmallPtrSet<BasicBlock*, 64> &Visited,
2538                                       InstCombiner &IC,
2539                                       const DataLayout *DL,
2540                                       const TargetLibraryInfo *TLI) {
2541  bool MadeIRChange = false;
2542  SmallVector<BasicBlock*, 256> Worklist;
2543  Worklist.push_back(BB);
2544
2545  SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
2546  DenseMap<ConstantExpr*, Constant*> FoldedConstants;
2547
2548  do {
2549    BB = Worklist.pop_back_val();
2550
2551    // We have now visited this block!  If we've already been here, ignore it.
2552    if (!Visited.insert(BB)) continue;
2553
2554    for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
2555      Instruction *Inst = BBI++;
2556
2557      // DCE instruction if trivially dead.
2558      if (isInstructionTriviallyDead(Inst, TLI)) {
2559        ++NumDeadInst;
2560        DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
2561        Inst->eraseFromParent();
2562        continue;
2563      }
2564
2565      // ConstantProp instruction if trivially constant.
2566      if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
2567        if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
2568          DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
2569                       << *Inst << '\n');
2570          Inst->replaceAllUsesWith(C);
2571          ++NumConstProp;
2572          Inst->eraseFromParent();
2573          continue;
2574        }
2575
2576      if (DL) {
2577        // See if we can constant fold its operands.
2578        for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
2579             i != e; ++i) {
2580          ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
2581          if (CE == nullptr) continue;
2582
2583          Constant*& FoldRes = FoldedConstants[CE];
2584          if (!FoldRes)
2585            FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
2586          if (!FoldRes)
2587            FoldRes = CE;
2588
2589          if (FoldRes != CE) {
2590            *i = FoldRes;
2591            MadeIRChange = true;
2592          }
2593        }
2594      }
2595
2596      InstrsForInstCombineWorklist.push_back(Inst);
2597    }
2598
2599    // Recursively visit successors.  If this is a branch or switch on a
2600    // constant, only visit the reachable successor.
2601    TerminatorInst *TI = BB->getTerminator();
2602    if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2603      if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
2604        bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
2605        BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
2606        Worklist.push_back(ReachableBB);
2607        continue;
2608      }
2609    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2610      if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
2611        // See if this is an explicit destination.
2612        for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2613             i != e; ++i)
2614          if (i.getCaseValue() == Cond) {
2615            BasicBlock *ReachableBB = i.getCaseSuccessor();
2616            Worklist.push_back(ReachableBB);
2617            continue;
2618          }
2619
2620        // Otherwise it is the default destination.
2621        Worklist.push_back(SI->getDefaultDest());
2622        continue;
2623      }
2624    }
2625
2626    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
2627      Worklist.push_back(TI->getSuccessor(i));
2628  } while (!Worklist.empty());
2629
2630  // Once we've found all of the instructions to add to instcombine's worklist,
2631  // add them in reverse order.  This way instcombine will visit from the top
2632  // of the function down.  This jives well with the way that it adds all uses
2633  // of instructions to the worklist after doing a transformation, thus avoiding
2634  // some N^2 behavior in pathological cases.
2635  IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
2636                              InstrsForInstCombineWorklist.size());
2637
2638  return MadeIRChange;
2639}
2640
2641bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
2642  MadeIRChange = false;
2643
2644  DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
2645               << F.getName() << "\n");
2646
2647  {
2648    // Do a depth-first traversal of the function, populate the worklist with
2649    // the reachable instructions.  Ignore blocks that are not reachable.  Keep
2650    // track of which blocks we visit.
2651    SmallPtrSet<BasicBlock*, 64> Visited;
2652    MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, DL,
2653                                               TLI);
2654
2655    // Do a quick scan over the function.  If we find any blocks that are
2656    // unreachable, remove any instructions inside of them.  This prevents
2657    // the instcombine code from having to deal with some bad special cases.
2658    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
2659      if (Visited.count(BB)) continue;
2660
2661      // Delete the instructions backwards, as it has a reduced likelihood of
2662      // having to update as many def-use and use-def chains.
2663      Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2664      while (EndInst != BB->begin()) {
2665        // Delete the next to last instruction.
2666        BasicBlock::iterator I = EndInst;
2667        Instruction *Inst = --I;
2668        if (!Inst->use_empty())
2669          Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2670        if (isa<LandingPadInst>(Inst)) {
2671          EndInst = Inst;
2672          continue;
2673        }
2674        if (!isa<DbgInfoIntrinsic>(Inst)) {
2675          ++NumDeadInst;
2676          MadeIRChange = true;
2677        }
2678        Inst->eraseFromParent();
2679      }
2680    }
2681  }
2682
2683  while (!Worklist.isEmpty()) {
2684    Instruction *I = Worklist.RemoveOne();
2685    if (I == nullptr) continue;  // skip null values.
2686
2687    // Check to see if we can DCE the instruction.
2688    if (isInstructionTriviallyDead(I, TLI)) {
2689      DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
2690      EraseInstFromFunction(*I);
2691      ++NumDeadInst;
2692      MadeIRChange = true;
2693      continue;
2694    }
2695
2696    // Instruction isn't dead, see if we can constant propagate it.
2697    if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
2698      if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
2699        DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
2700
2701        // Add operands to the worklist.
2702        ReplaceInstUsesWith(*I, C);
2703        ++NumConstProp;
2704        EraseInstFromFunction(*I);
2705        MadeIRChange = true;
2706        continue;
2707      }
2708
2709    // See if we can trivially sink this instruction to a successor basic block.
2710    if (I->hasOneUse()) {
2711      BasicBlock *BB = I->getParent();
2712      Instruction *UserInst = cast<Instruction>(*I->user_begin());
2713      BasicBlock *UserParent;
2714
2715      // Get the block the use occurs in.
2716      if (PHINode *PN = dyn_cast<PHINode>(UserInst))
2717        UserParent = PN->getIncomingBlock(*I->use_begin());
2718      else
2719        UserParent = UserInst->getParent();
2720
2721      if (UserParent != BB) {
2722        bool UserIsSuccessor = false;
2723        // See if the user is one of our successors.
2724        for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
2725          if (*SI == UserParent) {
2726            UserIsSuccessor = true;
2727            break;
2728          }
2729
2730        // If the user is one of our immediate successors, and if that successor
2731        // only has us as a predecessors (we'd have to split the critical edge
2732        // otherwise), we can keep going.
2733        if (UserIsSuccessor && UserParent->getSinglePredecessor())
2734          // Okay, the CFG is simple enough, try to sink this instruction.
2735          MadeIRChange |= TryToSinkInstruction(I, UserParent);
2736      }
2737    }
2738
2739    // Now that we have an instruction, try combining it to simplify it.
2740    Builder->SetInsertPoint(I->getParent(), I);
2741    Builder->SetCurrentDebugLocation(I->getDebugLoc());
2742
2743#ifndef NDEBUG
2744    std::string OrigI;
2745#endif
2746    DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
2747    DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
2748
2749    if (Instruction *Result = visit(*I)) {
2750      ++NumCombined;
2751      // Should we replace the old instruction with a new one?
2752      if (Result != I) {
2753        DEBUG(dbgs() << "IC: Old = " << *I << '\n'
2754                     << "    New = " << *Result << '\n');
2755
2756        if (!I->getDebugLoc().isUnknown())
2757          Result->setDebugLoc(I->getDebugLoc());
2758        // Everything uses the new instruction now.
2759        I->replaceAllUsesWith(Result);
2760
2761        // Move the name to the new instruction first.
2762        Result->takeName(I);
2763
2764        // Push the new instruction and any users onto the worklist.
2765        Worklist.Add(Result);
2766        Worklist.AddUsersToWorkList(*Result);
2767
2768        // Insert the new instruction into the basic block...
2769        BasicBlock *InstParent = I->getParent();
2770        BasicBlock::iterator InsertPos = I;
2771
2772        // If we replace a PHI with something that isn't a PHI, fix up the
2773        // insertion point.
2774        if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
2775          InsertPos = InstParent->getFirstInsertionPt();
2776
2777        InstParent->getInstList().insert(InsertPos, Result);
2778
2779        EraseInstFromFunction(*I);
2780      } else {
2781#ifndef NDEBUG
2782        DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
2783                     << "    New = " << *I << '\n');
2784#endif
2785
2786        // If the instruction was modified, it's possible that it is now dead.
2787        // if so, remove it.
2788        if (isInstructionTriviallyDead(I, TLI)) {
2789          EraseInstFromFunction(*I);
2790        } else {
2791          Worklist.Add(I);
2792          Worklist.AddUsersToWorkList(*I);
2793        }
2794      }
2795      MadeIRChange = true;
2796    }
2797  }
2798
2799  Worklist.Zap();
2800  return MadeIRChange;
2801}
2802
2803namespace {
2804class InstCombinerLibCallSimplifier : public LibCallSimplifier {
2805  InstCombiner *IC;
2806public:
2807  InstCombinerLibCallSimplifier(const DataLayout *DL,
2808                                const TargetLibraryInfo *TLI,
2809                                InstCombiner *IC)
2810    : LibCallSimplifier(DL, TLI, UnsafeFPShrink) {
2811    this->IC = IC;
2812  }
2813
2814  /// replaceAllUsesWith - override so that instruction replacement
2815  /// can be defined in terms of the instruction combiner framework.
2816  void replaceAllUsesWith(Instruction *I, Value *With) const override {
2817    IC->ReplaceInstUsesWith(*I, With);
2818  }
2819};
2820}
2821
2822bool InstCombiner::runOnFunction(Function &F) {
2823  if (skipOptnoneFunction(F))
2824    return false;
2825
2826  DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
2827  DL = DLP ? &DLP->getDataLayout() : nullptr;
2828  TLI = &getAnalysis<TargetLibraryInfo>();
2829  // Minimizing size?
2830  MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2831                                                Attribute::MinSize);
2832
2833  /// Builder - This is an IRBuilder that automatically inserts new
2834  /// instructions into the worklist when they are created.
2835  IRBuilder<true, TargetFolder, InstCombineIRInserter>
2836    TheBuilder(F.getContext(), TargetFolder(DL),
2837               InstCombineIRInserter(Worklist));
2838  Builder = &TheBuilder;
2839
2840  InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this);
2841  Simplifier = &TheSimplifier;
2842
2843  bool EverMadeChange = false;
2844
2845  // Lower dbg.declare intrinsics otherwise their value may be clobbered
2846  // by instcombiner.
2847  EverMadeChange = LowerDbgDeclare(F);
2848
2849  // Iterate while there is work to do.
2850  unsigned Iteration = 0;
2851  while (DoOneIteration(F, Iteration++))
2852    EverMadeChange = true;
2853
2854  Builder = nullptr;
2855  return EverMadeChange;
2856}
2857
2858FunctionPass *llvm::createInstructionCombiningPass() {
2859  return new InstCombiner();
2860}
2861