InstructionCombining.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// InstructionCombining - Combine instructions to form fewer, simple
11// instructions.  This pass does not modify the CFG.  This pass is where
12// algebraic simplification happens.
13//
14// This pass combines things like:
15//    %Y = add i32 %X, 1
16//    %Z = add i32 %Y, 1
17// into:
18//    %Z = add i32 %X, 2
19//
20// This is a simple worklist driven algorithm.
21//
22// This pass guarantees that the following canonicalizations are performed on
23// the program:
24//    1. If a binary operator has a constant operand, it is moved to the RHS
25//    2. Bitwise operators with constant operands are always grouped so that
26//       shifts are performed first, then or's, then and's, then xor's.
27//    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28//    4. All cmp instructions on boolean values are replaced with logical ops
29//    5. add X, X is represented as (X*2) => (X << 1)
30//    6. Multiplies with a power-of-two constant argument are transformed into
31//       shifts.
32//   ... etc.
33//
34//===----------------------------------------------------------------------===//
35
36#include "llvm/Transforms/Scalar.h"
37#include "InstCombine.h"
38#include "llvm-c/Initialization.h"
39#include "llvm/ADT/SmallPtrSet.h"
40#include "llvm/ADT/Statistic.h"
41#include "llvm/ADT/StringSwitch.h"
42#include "llvm/Analysis/ConstantFolding.h"
43#include "llvm/Analysis/InstructionSimplify.h"
44#include "llvm/Analysis/MemoryBuiltins.h"
45#include "llvm/IR/CFG.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/GetElementPtrTypeIterator.h"
48#include "llvm/IR/IntrinsicInst.h"
49#include "llvm/IR/PatternMatch.h"
50#include "llvm/IR/ValueHandle.h"
51#include "llvm/Support/CommandLine.h"
52#include "llvm/Support/Debug.h"
53#include "llvm/Target/TargetLibraryInfo.h"
54#include "llvm/Transforms/Utils/Local.h"
55#include <algorithm>
56#include <climits>
57using namespace llvm;
58using namespace llvm::PatternMatch;
59
60#define DEBUG_TYPE "instcombine"
61
62STATISTIC(NumCombined , "Number of insts combined");
63STATISTIC(NumConstProp, "Number of constant folds");
64STATISTIC(NumDeadInst , "Number of dead inst eliminated");
65STATISTIC(NumSunkInst , "Number of instructions sunk");
66STATISTIC(NumExpand,    "Number of expansions");
67STATISTIC(NumFactor   , "Number of factorizations");
68STATISTIC(NumReassoc  , "Number of reassociations");
69
70static cl::opt<bool> UnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
71                                   cl::init(false),
72                                   cl::desc("Enable unsafe double to float "
73                                            "shrinking for math lib calls"));
74
75// Initialization Routines
76void llvm::initializeInstCombine(PassRegistry &Registry) {
77  initializeInstCombinerPass(Registry);
78}
79
80void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
81  initializeInstCombine(*unwrap(R));
82}
83
84char InstCombiner::ID = 0;
85INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine",
86                "Combine redundant instructions", false, false)
87INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
88INITIALIZE_PASS_END(InstCombiner, "instcombine",
89                "Combine redundant instructions", false, false)
90
91void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
92  AU.setPreservesCFG();
93  AU.addRequired<TargetLibraryInfo>();
94}
95
96
97Value *InstCombiner::EmitGEPOffset(User *GEP) {
98  return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
99}
100
101/// ShouldChangeType - Return true if it is desirable to convert a computation
102/// from 'From' to 'To'.  We don't want to convert from a legal to an illegal
103/// type for example, or from a smaller to a larger illegal type.
104bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
105  assert(From->isIntegerTy() && To->isIntegerTy());
106
107  // If we don't have DL, we don't know if the source/dest are legal.
108  if (!DL) return false;
109
110  unsigned FromWidth = From->getPrimitiveSizeInBits();
111  unsigned ToWidth = To->getPrimitiveSizeInBits();
112  bool FromLegal = DL->isLegalInteger(FromWidth);
113  bool ToLegal = DL->isLegalInteger(ToWidth);
114
115  // If this is a legal integer from type, and the result would be an illegal
116  // type, don't do the transformation.
117  if (FromLegal && !ToLegal)
118    return false;
119
120  // Otherwise, if both are illegal, do not increase the size of the result. We
121  // do allow things like i160 -> i64, but not i64 -> i160.
122  if (!FromLegal && !ToLegal && ToWidth > FromWidth)
123    return false;
124
125  return true;
126}
127
128// Return true, if No Signed Wrap should be maintained for I.
129// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
130// where both B and C should be ConstantInts, results in a constant that does
131// not overflow. This function only handles the Add and Sub opcodes. For
132// all other opcodes, the function conservatively returns false.
133static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
134  OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
135  if (!OBO || !OBO->hasNoSignedWrap()) {
136    return false;
137  }
138
139  // We reason about Add and Sub Only.
140  Instruction::BinaryOps Opcode = I.getOpcode();
141  if (Opcode != Instruction::Add &&
142      Opcode != Instruction::Sub) {
143    return false;
144  }
145
146  ConstantInt *CB = dyn_cast<ConstantInt>(B);
147  ConstantInt *CC = dyn_cast<ConstantInt>(C);
148
149  if (!CB || !CC) {
150    return false;
151  }
152
153  const APInt &BVal = CB->getValue();
154  const APInt &CVal = CC->getValue();
155  bool Overflow = false;
156
157  if (Opcode == Instruction::Add) {
158    BVal.sadd_ov(CVal, Overflow);
159  } else {
160    BVal.ssub_ov(CVal, Overflow);
161  }
162
163  return !Overflow;
164}
165
166/// Conservatively clears subclassOptionalData after a reassociation or
167/// commutation. We preserve fast-math flags when applicable as they can be
168/// preserved.
169static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
170  FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
171  if (!FPMO) {
172    I.clearSubclassOptionalData();
173    return;
174  }
175
176  FastMathFlags FMF = I.getFastMathFlags();
177  I.clearSubclassOptionalData();
178  I.setFastMathFlags(FMF);
179}
180
181/// SimplifyAssociativeOrCommutative - This performs a few simplifications for
182/// operators which are associative or commutative:
183//
184//  Commutative operators:
185//
186//  1. Order operands such that they are listed from right (least complex) to
187//     left (most complex).  This puts constants before unary operators before
188//     binary operators.
189//
190//  Associative operators:
191//
192//  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
193//  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
194//
195//  Associative and commutative operators:
196//
197//  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
198//  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
199//  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
200//     if C1 and C2 are constants.
201//
202bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
203  Instruction::BinaryOps Opcode = I.getOpcode();
204  bool Changed = false;
205
206  do {
207    // Order operands such that they are listed from right (least complex) to
208    // left (most complex).  This puts constants before unary operators before
209    // binary operators.
210    if (I.isCommutative() && getComplexity(I.getOperand(0)) <
211        getComplexity(I.getOperand(1)))
212      Changed = !I.swapOperands();
213
214    BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
215    BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
216
217    if (I.isAssociative()) {
218      // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
219      if (Op0 && Op0->getOpcode() == Opcode) {
220        Value *A = Op0->getOperand(0);
221        Value *B = Op0->getOperand(1);
222        Value *C = I.getOperand(1);
223
224        // Does "B op C" simplify?
225        if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {
226          // It simplifies to V.  Form "A op V".
227          I.setOperand(0, A);
228          I.setOperand(1, V);
229          // Conservatively clear the optional flags, since they may not be
230          // preserved by the reassociation.
231          if (MaintainNoSignedWrap(I, B, C) &&
232              (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
233            // Note: this is only valid because SimplifyBinOp doesn't look at
234            // the operands to Op0.
235            I.clearSubclassOptionalData();
236            I.setHasNoSignedWrap(true);
237          } else {
238            ClearSubclassDataAfterReassociation(I);
239          }
240
241          Changed = true;
242          ++NumReassoc;
243          continue;
244        }
245      }
246
247      // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
248      if (Op1 && Op1->getOpcode() == Opcode) {
249        Value *A = I.getOperand(0);
250        Value *B = Op1->getOperand(0);
251        Value *C = Op1->getOperand(1);
252
253        // Does "A op B" simplify?
254        if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {
255          // It simplifies to V.  Form "V op C".
256          I.setOperand(0, V);
257          I.setOperand(1, C);
258          // Conservatively clear the optional flags, since they may not be
259          // preserved by the reassociation.
260          ClearSubclassDataAfterReassociation(I);
261          Changed = true;
262          ++NumReassoc;
263          continue;
264        }
265      }
266    }
267
268    if (I.isAssociative() && I.isCommutative()) {
269      // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
270      if (Op0 && Op0->getOpcode() == Opcode) {
271        Value *A = Op0->getOperand(0);
272        Value *B = Op0->getOperand(1);
273        Value *C = I.getOperand(1);
274
275        // Does "C op A" simplify?
276        if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
277          // It simplifies to V.  Form "V op B".
278          I.setOperand(0, V);
279          I.setOperand(1, B);
280          // Conservatively clear the optional flags, since they may not be
281          // preserved by the reassociation.
282          ClearSubclassDataAfterReassociation(I);
283          Changed = true;
284          ++NumReassoc;
285          continue;
286        }
287      }
288
289      // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
290      if (Op1 && Op1->getOpcode() == Opcode) {
291        Value *A = I.getOperand(0);
292        Value *B = Op1->getOperand(0);
293        Value *C = Op1->getOperand(1);
294
295        // Does "C op A" simplify?
296        if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
297          // It simplifies to V.  Form "B op V".
298          I.setOperand(0, B);
299          I.setOperand(1, V);
300          // Conservatively clear the optional flags, since they may not be
301          // preserved by the reassociation.
302          ClearSubclassDataAfterReassociation(I);
303          Changed = true;
304          ++NumReassoc;
305          continue;
306        }
307      }
308
309      // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
310      // if C1 and C2 are constants.
311      if (Op0 && Op1 &&
312          Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
313          isa<Constant>(Op0->getOperand(1)) &&
314          isa<Constant>(Op1->getOperand(1)) &&
315          Op0->hasOneUse() && Op1->hasOneUse()) {
316        Value *A = Op0->getOperand(0);
317        Constant *C1 = cast<Constant>(Op0->getOperand(1));
318        Value *B = Op1->getOperand(0);
319        Constant *C2 = cast<Constant>(Op1->getOperand(1));
320
321        Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
322        BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
323        if (isa<FPMathOperator>(New)) {
324          FastMathFlags Flags = I.getFastMathFlags();
325          Flags &= Op0->getFastMathFlags();
326          Flags &= Op1->getFastMathFlags();
327          New->setFastMathFlags(Flags);
328        }
329        InsertNewInstWith(New, I);
330        New->takeName(Op1);
331        I.setOperand(0, New);
332        I.setOperand(1, Folded);
333        // Conservatively clear the optional flags, since they may not be
334        // preserved by the reassociation.
335        ClearSubclassDataAfterReassociation(I);
336
337        Changed = true;
338        continue;
339      }
340    }
341
342    // No further simplifications.
343    return Changed;
344  } while (1);
345}
346
347/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
348/// "(X LOp Y) ROp (X LOp Z)".
349static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
350                                     Instruction::BinaryOps ROp) {
351  switch (LOp) {
352  default:
353    return false;
354
355  case Instruction::And:
356    // And distributes over Or and Xor.
357    switch (ROp) {
358    default:
359      return false;
360    case Instruction::Or:
361    case Instruction::Xor:
362      return true;
363    }
364
365  case Instruction::Mul:
366    // Multiplication distributes over addition and subtraction.
367    switch (ROp) {
368    default:
369      return false;
370    case Instruction::Add:
371    case Instruction::Sub:
372      return true;
373    }
374
375  case Instruction::Or:
376    // Or distributes over And.
377    switch (ROp) {
378    default:
379      return false;
380    case Instruction::And:
381      return true;
382    }
383  }
384}
385
386/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
387/// "(X ROp Z) LOp (Y ROp Z)".
388static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
389                                     Instruction::BinaryOps ROp) {
390  if (Instruction::isCommutative(ROp))
391    return LeftDistributesOverRight(ROp, LOp);
392  // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
393  // but this requires knowing that the addition does not overflow and other
394  // such subtleties.
395  return false;
396}
397
398/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
399/// which some other binary operation distributes over either by factorizing
400/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
401/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
402/// a win).  Returns the simplified value, or null if it didn't simplify.
403Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
404  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
405  BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
406  BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
407  Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
408
409  // Factorization.
410  if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
411    // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
412    // a common term.
413    Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
414    Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
415    Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
416
417    // Does "X op' Y" always equal "Y op' X"?
418    bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
419
420    // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
421    if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
422      // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
423      // commutative case, "(A op' B) op (C op' A)"?
424      if (A == C || (InnerCommutative && A == D)) {
425        if (A != C)
426          std::swap(C, D);
427        // Consider forming "A op' (B op D)".
428        // If "B op D" simplifies then it can be formed with no cost.
429        Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
430        // If "B op D" doesn't simplify then only go on if both of the existing
431        // operations "A op' B" and "C op' D" will be zapped as no longer used.
432        if (!V && Op0->hasOneUse() && Op1->hasOneUse())
433          V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
434        if (V) {
435          ++NumFactor;
436          V = Builder->CreateBinOp(InnerOpcode, A, V);
437          V->takeName(&I);
438          return V;
439        }
440      }
441
442    // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
443    if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
444      // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
445      // commutative case, "(A op' B) op (B op' D)"?
446      if (B == D || (InnerCommutative && B == C)) {
447        if (B != D)
448          std::swap(C, D);
449        // Consider forming "(A op C) op' B".
450        // If "A op C" simplifies then it can be formed with no cost.
451        Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
452        // If "A op C" doesn't simplify then only go on if both of the existing
453        // operations "A op' B" and "C op' D" will be zapped as no longer used.
454        if (!V && Op0->hasOneUse() && Op1->hasOneUse())
455          V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
456        if (V) {
457          ++NumFactor;
458          V = Builder->CreateBinOp(InnerOpcode, V, B);
459          V->takeName(&I);
460          return V;
461        }
462      }
463  }
464
465  // Expansion.
466  if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
467    // The instruction has the form "(A op' B) op C".  See if expanding it out
468    // to "(A op C) op' (B op C)" results in simplifications.
469    Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
470    Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
471
472    // Do "A op C" and "B op C" both simplify?
473    if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))
474      if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
475        // They do! Return "L op' R".
476        ++NumExpand;
477        // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
478        if ((L == A && R == B) ||
479            (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
480          return Op0;
481        // Otherwise return "L op' R" if it simplifies.
482        if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
483          return V;
484        // Otherwise, create a new instruction.
485        C = Builder->CreateBinOp(InnerOpcode, L, R);
486        C->takeName(&I);
487        return C;
488      }
489  }
490
491  if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
492    // The instruction has the form "A op (B op' C)".  See if expanding it out
493    // to "(A op B) op' (A op C)" results in simplifications.
494    Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
495    Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
496
497    // Do "A op B" and "A op C" both simplify?
498    if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))
499      if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
500        // They do! Return "L op' R".
501        ++NumExpand;
502        // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
503        if ((L == B && R == C) ||
504            (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
505          return Op1;
506        // Otherwise return "L op' R" if it simplifies.
507        if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
508          return V;
509        // Otherwise, create a new instruction.
510        A = Builder->CreateBinOp(InnerOpcode, L, R);
511        A->takeName(&I);
512        return A;
513      }
514  }
515
516  return nullptr;
517}
518
519// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
520// if the LHS is a constant zero (which is the 'negate' form).
521//
522Value *InstCombiner::dyn_castNegVal(Value *V) const {
523  if (BinaryOperator::isNeg(V))
524    return BinaryOperator::getNegArgument(V);
525
526  // Constants can be considered to be negated values if they can be folded.
527  if (ConstantInt *C = dyn_cast<ConstantInt>(V))
528    return ConstantExpr::getNeg(C);
529
530  if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
531    if (C->getType()->getElementType()->isIntegerTy())
532      return ConstantExpr::getNeg(C);
533
534  return nullptr;
535}
536
537// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
538// instruction if the LHS is a constant negative zero (which is the 'negate'
539// form).
540//
541Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
542  if (BinaryOperator::isFNeg(V, IgnoreZeroSign))
543    return BinaryOperator::getFNegArgument(V);
544
545  // Constants can be considered to be negated values if they can be folded.
546  if (ConstantFP *C = dyn_cast<ConstantFP>(V))
547    return ConstantExpr::getFNeg(C);
548
549  if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
550    if (C->getType()->getElementType()->isFloatingPointTy())
551      return ConstantExpr::getFNeg(C);
552
553  return nullptr;
554}
555
556static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
557                                             InstCombiner *IC) {
558  if (CastInst *CI = dyn_cast<CastInst>(&I)) {
559    return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
560  }
561
562  // Figure out if the constant is the left or the right argument.
563  bool ConstIsRHS = isa<Constant>(I.getOperand(1));
564  Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
565
566  if (Constant *SOC = dyn_cast<Constant>(SO)) {
567    if (ConstIsRHS)
568      return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
569    return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
570  }
571
572  Value *Op0 = SO, *Op1 = ConstOperand;
573  if (!ConstIsRHS)
574    std::swap(Op0, Op1);
575
576  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) {
577    Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
578                                    SO->getName()+".op");
579    Instruction *FPInst = dyn_cast<Instruction>(RI);
580    if (FPInst && isa<FPMathOperator>(FPInst))
581      FPInst->copyFastMathFlags(BO);
582    return RI;
583  }
584  if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
585    return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
586                                   SO->getName()+".cmp");
587  if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
588    return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
589                                   SO->getName()+".cmp");
590  llvm_unreachable("Unknown binary instruction type!");
591}
592
593// FoldOpIntoSelect - Given an instruction with a select as one operand and a
594// constant as the other operand, try to fold the binary operator into the
595// select arguments.  This also works for Cast instructions, which obviously do
596// not have a second operand.
597Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
598  // Don't modify shared select instructions
599  if (!SI->hasOneUse()) return nullptr;
600  Value *TV = SI->getOperand(1);
601  Value *FV = SI->getOperand(2);
602
603  if (isa<Constant>(TV) || isa<Constant>(FV)) {
604    // Bool selects with constant operands can be folded to logical ops.
605    if (SI->getType()->isIntegerTy(1)) return nullptr;
606
607    // If it's a bitcast involving vectors, make sure it has the same number of
608    // elements on both sides.
609    if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
610      VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
611      VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
612
613      // Verify that either both or neither are vectors.
614      if ((SrcTy == nullptr) != (DestTy == nullptr)) return nullptr;
615      // If vectors, verify that they have the same number of elements.
616      if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
617        return nullptr;
618    }
619
620    Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
621    Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
622
623    return SelectInst::Create(SI->getCondition(),
624                              SelectTrueVal, SelectFalseVal);
625  }
626  return nullptr;
627}
628
629
630/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
631/// has a PHI node as operand #0, see if we can fold the instruction into the
632/// PHI (which is only possible if all operands to the PHI are constants).
633///
634Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
635  PHINode *PN = cast<PHINode>(I.getOperand(0));
636  unsigned NumPHIValues = PN->getNumIncomingValues();
637  if (NumPHIValues == 0)
638    return nullptr;
639
640  // We normally only transform phis with a single use.  However, if a PHI has
641  // multiple uses and they are all the same operation, we can fold *all* of the
642  // uses into the PHI.
643  if (!PN->hasOneUse()) {
644    // Walk the use list for the instruction, comparing them to I.
645    for (User *U : PN->users()) {
646      Instruction *UI = cast<Instruction>(U);
647      if (UI != &I && !I.isIdenticalTo(UI))
648        return nullptr;
649    }
650    // Otherwise, we can replace *all* users with the new PHI we form.
651  }
652
653  // Check to see if all of the operands of the PHI are simple constants
654  // (constantint/constantfp/undef).  If there is one non-constant value,
655  // remember the BB it is in.  If there is more than one or if *it* is a PHI,
656  // bail out.  We don't do arbitrary constant expressions here because moving
657  // their computation can be expensive without a cost model.
658  BasicBlock *NonConstBB = nullptr;
659  for (unsigned i = 0; i != NumPHIValues; ++i) {
660    Value *InVal = PN->getIncomingValue(i);
661    if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
662      continue;
663
664    if (isa<PHINode>(InVal)) return nullptr;  // Itself a phi.
665    if (NonConstBB) return nullptr;  // More than one non-const value.
666
667    NonConstBB = PN->getIncomingBlock(i);
668
669    // If the InVal is an invoke at the end of the pred block, then we can't
670    // insert a computation after it without breaking the edge.
671    if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
672      if (II->getParent() == NonConstBB)
673        return nullptr;
674
675    // If the incoming non-constant value is in I's block, we will remove one
676    // instruction, but insert another equivalent one, leading to infinite
677    // instcombine.
678    if (NonConstBB == I.getParent())
679      return nullptr;
680  }
681
682  // If there is exactly one non-constant value, we can insert a copy of the
683  // operation in that block.  However, if this is a critical edge, we would be
684  // inserting the computation one some other paths (e.g. inside a loop).  Only
685  // do this if the pred block is unconditionally branching into the phi block.
686  if (NonConstBB != nullptr) {
687    BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
688    if (!BI || !BI->isUnconditional()) return nullptr;
689  }
690
691  // Okay, we can do the transformation: create the new PHI node.
692  PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
693  InsertNewInstBefore(NewPN, *PN);
694  NewPN->takeName(PN);
695
696  // If we are going to have to insert a new computation, do so right before the
697  // predecessors terminator.
698  if (NonConstBB)
699    Builder->SetInsertPoint(NonConstBB->getTerminator());
700
701  // Next, add all of the operands to the PHI.
702  if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
703    // We only currently try to fold the condition of a select when it is a phi,
704    // not the true/false values.
705    Value *TrueV = SI->getTrueValue();
706    Value *FalseV = SI->getFalseValue();
707    BasicBlock *PhiTransBB = PN->getParent();
708    for (unsigned i = 0; i != NumPHIValues; ++i) {
709      BasicBlock *ThisBB = PN->getIncomingBlock(i);
710      Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
711      Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
712      Value *InV = nullptr;
713      // Beware of ConstantExpr:  it may eventually evaluate to getNullValue,
714      // even if currently isNullValue gives false.
715      Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
716      if (InC && !isa<ConstantExpr>(InC))
717        InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
718      else
719        InV = Builder->CreateSelect(PN->getIncomingValue(i),
720                                    TrueVInPred, FalseVInPred, "phitmp");
721      NewPN->addIncoming(InV, ThisBB);
722    }
723  } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
724    Constant *C = cast<Constant>(I.getOperand(1));
725    for (unsigned i = 0; i != NumPHIValues; ++i) {
726      Value *InV = nullptr;
727      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
728        InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
729      else if (isa<ICmpInst>(CI))
730        InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
731                                  C, "phitmp");
732      else
733        InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
734                                  C, "phitmp");
735      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
736    }
737  } else if (I.getNumOperands() == 2) {
738    Constant *C = cast<Constant>(I.getOperand(1));
739    for (unsigned i = 0; i != NumPHIValues; ++i) {
740      Value *InV = nullptr;
741      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
742        InV = ConstantExpr::get(I.getOpcode(), InC, C);
743      else
744        InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
745                                   PN->getIncomingValue(i), C, "phitmp");
746      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
747    }
748  } else {
749    CastInst *CI = cast<CastInst>(&I);
750    Type *RetTy = CI->getType();
751    for (unsigned i = 0; i != NumPHIValues; ++i) {
752      Value *InV;
753      if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
754        InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
755      else
756        InV = Builder->CreateCast(CI->getOpcode(),
757                                PN->getIncomingValue(i), I.getType(), "phitmp");
758      NewPN->addIncoming(InV, PN->getIncomingBlock(i));
759    }
760  }
761
762  for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
763    Instruction *User = cast<Instruction>(*UI++);
764    if (User == &I) continue;
765    ReplaceInstUsesWith(*User, NewPN);
766    EraseInstFromFunction(*User);
767  }
768  return ReplaceInstUsesWith(I, NewPN);
769}
770
771/// FindElementAtOffset - Given a pointer type and a constant offset, determine
772/// whether or not there is a sequence of GEP indices into the pointed type that
773/// will land us at the specified offset.  If so, fill them into NewIndices and
774/// return the resultant element type, otherwise return null.
775Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
776                                        SmallVectorImpl<Value*> &NewIndices) {
777  assert(PtrTy->isPtrOrPtrVectorTy());
778
779  if (!DL)
780    return nullptr;
781
782  Type *Ty = PtrTy->getPointerElementType();
783  if (!Ty->isSized())
784    return nullptr;
785
786  // Start with the index over the outer type.  Note that the type size
787  // might be zero (even if the offset isn't zero) if the indexed type
788  // is something like [0 x {int, int}]
789  Type *IntPtrTy = DL->getIntPtrType(PtrTy);
790  int64_t FirstIdx = 0;
791  if (int64_t TySize = DL->getTypeAllocSize(Ty)) {
792    FirstIdx = Offset/TySize;
793    Offset -= FirstIdx*TySize;
794
795    // Handle hosts where % returns negative instead of values [0..TySize).
796    if (Offset < 0) {
797      --FirstIdx;
798      Offset += TySize;
799      assert(Offset >= 0);
800    }
801    assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
802  }
803
804  NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
805
806  // Index into the types.  If we fail, set OrigBase to null.
807  while (Offset) {
808    // Indexing into tail padding between struct/array elements.
809    if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
810      return nullptr;
811
812    if (StructType *STy = dyn_cast<StructType>(Ty)) {
813      const StructLayout *SL = DL->getStructLayout(STy);
814      assert(Offset < (int64_t)SL->getSizeInBytes() &&
815             "Offset must stay within the indexed type");
816
817      unsigned Elt = SL->getElementContainingOffset(Offset);
818      NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
819                                            Elt));
820
821      Offset -= SL->getElementOffset(Elt);
822      Ty = STy->getElementType(Elt);
823    } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
824      uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());
825      assert(EltSize && "Cannot index into a zero-sized array");
826      NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
827      Offset %= EltSize;
828      Ty = AT->getElementType();
829    } else {
830      // Otherwise, we can't index into the middle of this atomic type, bail.
831      return nullptr;
832    }
833  }
834
835  return Ty;
836}
837
838static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
839  // If this GEP has only 0 indices, it is the same pointer as
840  // Src. If Src is not a trivial GEP too, don't combine
841  // the indices.
842  if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
843      !Src.hasOneUse())
844    return false;
845  return true;
846}
847
848/// Descale - Return a value X such that Val = X * Scale, or null if none.  If
849/// the multiplication is known not to overflow then NoSignedWrap is set.
850Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
851  assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
852  assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
853         Scale.getBitWidth() && "Scale not compatible with value!");
854
855  // If Val is zero or Scale is one then Val = Val * Scale.
856  if (match(Val, m_Zero()) || Scale == 1) {
857    NoSignedWrap = true;
858    return Val;
859  }
860
861  // If Scale is zero then it does not divide Val.
862  if (Scale.isMinValue())
863    return nullptr;
864
865  // Look through chains of multiplications, searching for a constant that is
866  // divisible by Scale.  For example, descaling X*(Y*(Z*4)) by a factor of 4
867  // will find the constant factor 4 and produce X*(Y*Z).  Descaling X*(Y*8) by
868  // a factor of 4 will produce X*(Y*2).  The principle of operation is to bore
869  // down from Val:
870  //
871  //     Val = M1 * X          ||   Analysis starts here and works down
872  //      M1 = M2 * Y          ||   Doesn't descend into terms with more
873  //      M2 =  Z * 4          \/   than one use
874  //
875  // Then to modify a term at the bottom:
876  //
877  //     Val = M1 * X
878  //      M1 =  Z * Y          ||   Replaced M2 with Z
879  //
880  // Then to work back up correcting nsw flags.
881
882  // Op - the term we are currently analyzing.  Starts at Val then drills down.
883  // Replaced with its descaled value before exiting from the drill down loop.
884  Value *Op = Val;
885
886  // Parent - initially null, but after drilling down notes where Op came from.
887  // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
888  // 0'th operand of Val.
889  std::pair<Instruction*, unsigned> Parent;
890
891  // RequireNoSignedWrap - Set if the transform requires a descaling at deeper
892  // levels that doesn't overflow.
893  bool RequireNoSignedWrap = false;
894
895  // logScale - log base 2 of the scale.  Negative if not a power of 2.
896  int32_t logScale = Scale.exactLogBase2();
897
898  for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
899
900    if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
901      // If Op is a constant divisible by Scale then descale to the quotient.
902      APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
903      APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
904      if (!Remainder.isMinValue())
905        // Not divisible by Scale.
906        return nullptr;
907      // Replace with the quotient in the parent.
908      Op = ConstantInt::get(CI->getType(), Quotient);
909      NoSignedWrap = true;
910      break;
911    }
912
913    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
914
915      if (BO->getOpcode() == Instruction::Mul) {
916        // Multiplication.
917        NoSignedWrap = BO->hasNoSignedWrap();
918        if (RequireNoSignedWrap && !NoSignedWrap)
919          return nullptr;
920
921        // There are three cases for multiplication: multiplication by exactly
922        // the scale, multiplication by a constant different to the scale, and
923        // multiplication by something else.
924        Value *LHS = BO->getOperand(0);
925        Value *RHS = BO->getOperand(1);
926
927        if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
928          // Multiplication by a constant.
929          if (CI->getValue() == Scale) {
930            // Multiplication by exactly the scale, replace the multiplication
931            // by its left-hand side in the parent.
932            Op = LHS;
933            break;
934          }
935
936          // Otherwise drill down into the constant.
937          if (!Op->hasOneUse())
938            return nullptr;
939
940          Parent = std::make_pair(BO, 1);
941          continue;
942        }
943
944        // Multiplication by something else. Drill down into the left-hand side
945        // since that's where the reassociate pass puts the good stuff.
946        if (!Op->hasOneUse())
947          return nullptr;
948
949        Parent = std::make_pair(BO, 0);
950        continue;
951      }
952
953      if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
954          isa<ConstantInt>(BO->getOperand(1))) {
955        // Multiplication by a power of 2.
956        NoSignedWrap = BO->hasNoSignedWrap();
957        if (RequireNoSignedWrap && !NoSignedWrap)
958          return nullptr;
959
960        Value *LHS = BO->getOperand(0);
961        int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
962          getLimitedValue(Scale.getBitWidth());
963        // Op = LHS << Amt.
964
965        if (Amt == logScale) {
966          // Multiplication by exactly the scale, replace the multiplication
967          // by its left-hand side in the parent.
968          Op = LHS;
969          break;
970        }
971        if (Amt < logScale || !Op->hasOneUse())
972          return nullptr;
973
974        // Multiplication by more than the scale.  Reduce the multiplying amount
975        // by the scale in the parent.
976        Parent = std::make_pair(BO, 1);
977        Op = ConstantInt::get(BO->getType(), Amt - logScale);
978        break;
979      }
980    }
981
982    if (!Op->hasOneUse())
983      return nullptr;
984
985    if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
986      if (Cast->getOpcode() == Instruction::SExt) {
987        // Op is sign-extended from a smaller type, descale in the smaller type.
988        unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
989        APInt SmallScale = Scale.trunc(SmallSize);
990        // Suppose Op = sext X, and we descale X as Y * SmallScale.  We want to
991        // descale Op as (sext Y) * Scale.  In order to have
992        //   sext (Y * SmallScale) = (sext Y) * Scale
993        // some conditions need to hold however: SmallScale must sign-extend to
994        // Scale and the multiplication Y * SmallScale should not overflow.
995        if (SmallScale.sext(Scale.getBitWidth()) != Scale)
996          // SmallScale does not sign-extend to Scale.
997          return nullptr;
998        assert(SmallScale.exactLogBase2() == logScale);
999        // Require that Y * SmallScale must not overflow.
1000        RequireNoSignedWrap = true;
1001
1002        // Drill down through the cast.
1003        Parent = std::make_pair(Cast, 0);
1004        Scale = SmallScale;
1005        continue;
1006      }
1007
1008      if (Cast->getOpcode() == Instruction::Trunc) {
1009        // Op is truncated from a larger type, descale in the larger type.
1010        // Suppose Op = trunc X, and we descale X as Y * sext Scale.  Then
1011        //   trunc (Y * sext Scale) = (trunc Y) * Scale
1012        // always holds.  However (trunc Y) * Scale may overflow even if
1013        // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
1014        // from this point up in the expression (see later).
1015        if (RequireNoSignedWrap)
1016          return nullptr;
1017
1018        // Drill down through the cast.
1019        unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1020        Parent = std::make_pair(Cast, 0);
1021        Scale = Scale.sext(LargeSize);
1022        if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
1023          logScale = -1;
1024        assert(Scale.exactLogBase2() == logScale);
1025        continue;
1026      }
1027    }
1028
1029    // Unsupported expression, bail out.
1030    return nullptr;
1031  }
1032
1033  // We know that we can successfully descale, so from here on we can safely
1034  // modify the IR.  Op holds the descaled version of the deepest term in the
1035  // expression.  NoSignedWrap is 'true' if multiplying Op by Scale is known
1036  // not to overflow.
1037
1038  if (!Parent.first)
1039    // The expression only had one term.
1040    return Op;
1041
1042  // Rewrite the parent using the descaled version of its operand.
1043  assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
1044  assert(Op != Parent.first->getOperand(Parent.second) &&
1045         "Descaling was a no-op?");
1046  Parent.first->setOperand(Parent.second, Op);
1047  Worklist.Add(Parent.first);
1048
1049  // Now work back up the expression correcting nsw flags.  The logic is based
1050  // on the following observation: if X * Y is known not to overflow as a signed
1051  // multiplication, and Y is replaced by a value Z with smaller absolute value,
1052  // then X * Z will not overflow as a signed multiplication either.  As we work
1053  // our way up, having NoSignedWrap 'true' means that the descaled value at the
1054  // current level has strictly smaller absolute value than the original.
1055  Instruction *Ancestor = Parent.first;
1056  do {
1057    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
1058      // If the multiplication wasn't nsw then we can't say anything about the
1059      // value of the descaled multiplication, and we have to clear nsw flags
1060      // from this point on up.
1061      bool OpNoSignedWrap = BO->hasNoSignedWrap();
1062      NoSignedWrap &= OpNoSignedWrap;
1063      if (NoSignedWrap != OpNoSignedWrap) {
1064        BO->setHasNoSignedWrap(NoSignedWrap);
1065        Worklist.Add(Ancestor);
1066      }
1067    } else if (Ancestor->getOpcode() == Instruction::Trunc) {
1068      // The fact that the descaled input to the trunc has smaller absolute
1069      // value than the original input doesn't tell us anything useful about
1070      // the absolute values of the truncations.
1071      NoSignedWrap = false;
1072    }
1073    assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
1074           "Failed to keep proper track of nsw flags while drilling down?");
1075
1076    if (Ancestor == Val)
1077      // Got to the top, all done!
1078      return Val;
1079
1080    // Move up one level in the expression.
1081    assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
1082    Ancestor = Ancestor->user_back();
1083  } while (1);
1084}
1085
1086/// \brief Creates node of binary operation with the same attributes as the
1087/// specified one but with other operands.
1088static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
1089                                 InstCombiner::BuilderTy *B) {
1090  Value *BORes = B->CreateBinOp(Inst.getOpcode(), LHS, RHS);
1091  if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BORes)) {
1092    if (isa<OverflowingBinaryOperator>(NewBO)) {
1093      NewBO->setHasNoSignedWrap(Inst.hasNoSignedWrap());
1094      NewBO->setHasNoUnsignedWrap(Inst.hasNoUnsignedWrap());
1095    }
1096    if (isa<PossiblyExactOperator>(NewBO))
1097      NewBO->setIsExact(Inst.isExact());
1098  }
1099  return BORes;
1100}
1101
1102/// \brief Makes transformation of binary operation specific for vector types.
1103/// \param Inst Binary operator to transform.
1104/// \return Pointer to node that must replace the original binary operator, or
1105///         null pointer if no transformation was made.
1106Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
1107  if (!Inst.getType()->isVectorTy()) return nullptr;
1108
1109  unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
1110  Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
1111  assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
1112  assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth);
1113
1114  // If both arguments of binary operation are shuffles, which use the same
1115  // mask and shuffle within a single vector, it is worthwhile to move the
1116  // shuffle after binary operation:
1117  //   Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m)
1118  if (isa<ShuffleVectorInst>(LHS) && isa<ShuffleVectorInst>(RHS)) {
1119    ShuffleVectorInst *LShuf = cast<ShuffleVectorInst>(LHS);
1120    ShuffleVectorInst *RShuf = cast<ShuffleVectorInst>(RHS);
1121    if (isa<UndefValue>(LShuf->getOperand(1)) &&
1122        isa<UndefValue>(RShuf->getOperand(1)) &&
1123        LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType() &&
1124        LShuf->getMask() == RShuf->getMask()) {
1125      Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
1126          RShuf->getOperand(0), Builder);
1127      Value *Res = Builder->CreateShuffleVector(NewBO,
1128          UndefValue::get(NewBO->getType()), LShuf->getMask());
1129      return Res;
1130    }
1131  }
1132
1133  // If one argument is a shuffle within one vector, the other is a constant,
1134  // try moving the shuffle after the binary operation.
1135  ShuffleVectorInst *Shuffle = nullptr;
1136  Constant *C1 = nullptr;
1137  if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS);
1138  if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
1139  if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
1140  if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
1141  if (Shuffle && C1 && isa<UndefValue>(Shuffle->getOperand(1)) &&
1142      Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
1143    SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
1144    // Find constant C2 that has property:
1145    //   shuffle(C2, ShMask) = C1
1146    // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>)
1147    // reorder is not possible.
1148    SmallVector<Constant*, 16> C2M(VWidth,
1149                               UndefValue::get(C1->getType()->getScalarType()));
1150    bool MayChange = true;
1151    for (unsigned I = 0; I < VWidth; ++I) {
1152      if (ShMask[I] >= 0) {
1153        assert(ShMask[I] < (int)VWidth);
1154        if (!isa<UndefValue>(C2M[ShMask[I]])) {
1155          MayChange = false;
1156          break;
1157        }
1158        C2M[ShMask[I]] = C1->getAggregateElement(I);
1159      }
1160    }
1161    if (MayChange) {
1162      Constant *C2 = ConstantVector::get(C2M);
1163      Value *NewLHS, *NewRHS;
1164      if (isa<Constant>(LHS)) {
1165        NewLHS = C2;
1166        NewRHS = Shuffle->getOperand(0);
1167      } else {
1168        NewLHS = Shuffle->getOperand(0);
1169        NewRHS = C2;
1170      }
1171      Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
1172      Value *Res = Builder->CreateShuffleVector(NewBO,
1173          UndefValue::get(Inst.getType()), Shuffle->getMask());
1174      return Res;
1175    }
1176  }
1177
1178  return nullptr;
1179}
1180
1181Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
1182  SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
1183
1184  if (Value *V = SimplifyGEPInst(Ops, DL))
1185    return ReplaceInstUsesWith(GEP, V);
1186
1187  Value *PtrOp = GEP.getOperand(0);
1188
1189  // Eliminate unneeded casts for indices, and replace indices which displace
1190  // by multiples of a zero size type with zero.
1191  if (DL) {
1192    bool MadeChange = false;
1193    Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());
1194
1195    gep_type_iterator GTI = gep_type_begin(GEP);
1196    for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
1197         I != E; ++I, ++GTI) {
1198      // Skip indices into struct types.
1199      SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
1200      if (!SeqTy) continue;
1201
1202      // If the element type has zero size then any index over it is equivalent
1203      // to an index of zero, so replace it with zero if it is not zero already.
1204      if (SeqTy->getElementType()->isSized() &&
1205          DL->getTypeAllocSize(SeqTy->getElementType()) == 0)
1206        if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
1207          *I = Constant::getNullValue(IntPtrTy);
1208          MadeChange = true;
1209        }
1210
1211      Type *IndexTy = (*I)->getType();
1212      if (IndexTy != IntPtrTy) {
1213        // If we are using a wider index than needed for this platform, shrink
1214        // it to what we need.  If narrower, sign-extend it to what we need.
1215        // This explicit cast can make subsequent optimizations more obvious.
1216        *I = Builder->CreateIntCast(*I, IntPtrTy, true);
1217        MadeChange = true;
1218      }
1219    }
1220    if (MadeChange) return &GEP;
1221  }
1222
1223  // Combine Indices - If the source pointer to this getelementptr instruction
1224  // is a getelementptr instruction, combine the indices of the two
1225  // getelementptr instructions into a single instruction.
1226  //
1227  if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
1228    if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
1229      return nullptr;
1230
1231    // Note that if our source is a gep chain itself then we wait for that
1232    // chain to be resolved before we perform this transformation.  This
1233    // avoids us creating a TON of code in some cases.
1234    if (GEPOperator *SrcGEP =
1235          dyn_cast<GEPOperator>(Src->getOperand(0)))
1236      if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
1237        return nullptr;   // Wait until our source is folded to completion.
1238
1239    SmallVector<Value*, 8> Indices;
1240
1241    // Find out whether the last index in the source GEP is a sequential idx.
1242    bool EndsWithSequential = false;
1243    for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
1244         I != E; ++I)
1245      EndsWithSequential = !(*I)->isStructTy();
1246
1247    // Can we combine the two pointer arithmetics offsets?
1248    if (EndsWithSequential) {
1249      // Replace: gep (gep %P, long B), long A, ...
1250      // With:    T = long A+B; gep %P, T, ...
1251      //
1252      Value *Sum;
1253      Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
1254      Value *GO1 = GEP.getOperand(1);
1255      if (SO1 == Constant::getNullValue(SO1->getType())) {
1256        Sum = GO1;
1257      } else if (GO1 == Constant::getNullValue(GO1->getType())) {
1258        Sum = SO1;
1259      } else {
1260        // If they aren't the same type, then the input hasn't been processed
1261        // by the loop above yet (which canonicalizes sequential index types to
1262        // intptr_t).  Just avoid transforming this until the input has been
1263        // normalized.
1264        if (SO1->getType() != GO1->getType())
1265          return nullptr;
1266        Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
1267      }
1268
1269      // Update the GEP in place if possible.
1270      if (Src->getNumOperands() == 2) {
1271        GEP.setOperand(0, Src->getOperand(0));
1272        GEP.setOperand(1, Sum);
1273        return &GEP;
1274      }
1275      Indices.append(Src->op_begin()+1, Src->op_end()-1);
1276      Indices.push_back(Sum);
1277      Indices.append(GEP.op_begin()+2, GEP.op_end());
1278    } else if (isa<Constant>(*GEP.idx_begin()) &&
1279               cast<Constant>(*GEP.idx_begin())->isNullValue() &&
1280               Src->getNumOperands() != 1) {
1281      // Otherwise we can do the fold if the first index of the GEP is a zero
1282      Indices.append(Src->op_begin()+1, Src->op_end());
1283      Indices.append(GEP.idx_begin()+1, GEP.idx_end());
1284    }
1285
1286    if (!Indices.empty())
1287      return (GEP.isInBounds() && Src->isInBounds()) ?
1288        GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices,
1289                                          GEP.getName()) :
1290        GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
1291  }
1292
1293  // Canonicalize (gep i8* X, -(ptrtoint Y)) to (sub (ptrtoint X), (ptrtoint Y))
1294  // The GEP pattern is emitted by the SCEV expander for certain kinds of
1295  // pointer arithmetic.
1296  if (DL && GEP.getNumIndices() == 1 &&
1297      match(GEP.getOperand(1), m_Neg(m_PtrToInt(m_Value())))) {
1298    unsigned AS = GEP.getPointerAddressSpace();
1299    if (GEP.getType() == Builder->getInt8PtrTy(AS) &&
1300        GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
1301        DL->getPointerSizeInBits(AS)) {
1302      Operator *Index = cast<Operator>(GEP.getOperand(1));
1303      Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
1304      Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
1305      return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
1306    }
1307  }
1308
1309  // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
1310  Value *StrippedPtr = PtrOp->stripPointerCasts();
1311  PointerType *StrippedPtrTy = dyn_cast<PointerType>(StrippedPtr->getType());
1312
1313  // We do not handle pointer-vector geps here.
1314  if (!StrippedPtrTy)
1315    return nullptr;
1316
1317  if (StrippedPtr != PtrOp) {
1318    bool HasZeroPointerIndex = false;
1319    if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
1320      HasZeroPointerIndex = C->isZero();
1321
1322    // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
1323    // into     : GEP [10 x i8]* X, i32 0, ...
1324    //
1325    // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
1326    //           into     : GEP i8* X, ...
1327    //
1328    // This occurs when the program declares an array extern like "int X[];"
1329    if (HasZeroPointerIndex) {
1330      PointerType *CPTy = cast<PointerType>(PtrOp->getType());
1331      if (ArrayType *CATy =
1332          dyn_cast<ArrayType>(CPTy->getElementType())) {
1333        // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
1334        if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
1335          // -> GEP i8* X, ...
1336          SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
1337          GetElementPtrInst *Res =
1338            GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName());
1339          Res->setIsInBounds(GEP.isInBounds());
1340          if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
1341            return Res;
1342          // Insert Res, and create an addrspacecast.
1343          // e.g.,
1344          // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
1345          // ->
1346          // %0 = GEP i8 addrspace(1)* X, ...
1347          // addrspacecast i8 addrspace(1)* %0 to i8*
1348          return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType());
1349        }
1350
1351        if (ArrayType *XATy =
1352              dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
1353          // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
1354          if (CATy->getElementType() == XATy->getElementType()) {
1355            // -> GEP [10 x i8]* X, i32 0, ...
1356            // At this point, we know that the cast source type is a pointer
1357            // to an array of the same type as the destination pointer
1358            // array.  Because the array type is never stepped over (there
1359            // is a leading zero) we can fold the cast into this GEP.
1360            if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
1361              GEP.setOperand(0, StrippedPtr);
1362              return &GEP;
1363            }
1364            // Cannot replace the base pointer directly because StrippedPtr's
1365            // address space is different. Instead, create a new GEP followed by
1366            // an addrspacecast.
1367            // e.g.,
1368            // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
1369            //   i32 0, ...
1370            // ->
1371            // %0 = GEP [10 x i8] addrspace(1)* X, ...
1372            // addrspacecast i8 addrspace(1)* %0 to i8*
1373            SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
1374            Value *NewGEP = GEP.isInBounds() ?
1375              Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
1376              Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
1377            return new AddrSpaceCastInst(NewGEP, GEP.getType());
1378          }
1379        }
1380      }
1381    } else if (GEP.getNumOperands() == 2) {
1382      // Transform things like:
1383      // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
1384      // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
1385      Type *SrcElTy = StrippedPtrTy->getElementType();
1386      Type *ResElTy = PtrOp->getType()->getPointerElementType();
1387      if (DL && SrcElTy->isArrayTy() &&
1388          DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
1389          DL->getTypeAllocSize(ResElTy)) {
1390        Type *IdxType = DL->getIntPtrType(GEP.getType());
1391        Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
1392        Value *NewGEP = GEP.isInBounds() ?
1393          Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
1394          Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
1395
1396        // V and GEP are both pointer types --> BitCast
1397        if (StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace())
1398          return new BitCastInst(NewGEP, GEP.getType());
1399        return new AddrSpaceCastInst(NewGEP, GEP.getType());
1400      }
1401
1402      // Transform things like:
1403      // %V = mul i64 %N, 4
1404      // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
1405      // into:  %t1 = getelementptr i32* %arr, i32 %N; bitcast
1406      if (DL && ResElTy->isSized() && SrcElTy->isSized()) {
1407        // Check that changing the type amounts to dividing the index by a scale
1408        // factor.
1409        uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
1410        uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);
1411        if (ResSize && SrcSize % ResSize == 0) {
1412          Value *Idx = GEP.getOperand(1);
1413          unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
1414          uint64_t Scale = SrcSize / ResSize;
1415
1416          // Earlier transforms ensure that the index has type IntPtrType, which
1417          // considerably simplifies the logic by eliminating implicit casts.
1418          assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
1419                 "Index not cast to pointer width?");
1420
1421          bool NSW;
1422          if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
1423            // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1424            // If the multiplication NewIdx * Scale may overflow then the new
1425            // GEP may not be "inbounds".
1426            Value *NewGEP = GEP.isInBounds() && NSW ?
1427              Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) :
1428              Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName());
1429
1430            // The NewGEP must be pointer typed, so must the old one -> BitCast
1431            if (StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace())
1432              return new BitCastInst(NewGEP, GEP.getType());
1433            return new AddrSpaceCastInst(NewGEP, GEP.getType());
1434          }
1435        }
1436      }
1437
1438      // Similarly, transform things like:
1439      // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
1440      //   (where tmp = 8*tmp2) into:
1441      // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
1442      if (DL && ResElTy->isSized() && SrcElTy->isSized() &&
1443          SrcElTy->isArrayTy()) {
1444        // Check that changing to the array element type amounts to dividing the
1445        // index by a scale factor.
1446        uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
1447        uint64_t ArrayEltSize
1448          = DL->getTypeAllocSize(SrcElTy->getArrayElementType());
1449        if (ResSize && ArrayEltSize % ResSize == 0) {
1450          Value *Idx = GEP.getOperand(1);
1451          unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
1452          uint64_t Scale = ArrayEltSize / ResSize;
1453
1454          // Earlier transforms ensure that the index has type IntPtrType, which
1455          // considerably simplifies the logic by eliminating implicit casts.
1456          assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
1457                 "Index not cast to pointer width?");
1458
1459          bool NSW;
1460          if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
1461            // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1462            // If the multiplication NewIdx * Scale may overflow then the new
1463            // GEP may not be "inbounds".
1464            Value *Off[2] = {
1465              Constant::getNullValue(DL->getIntPtrType(GEP.getType())),
1466              NewIdx
1467            };
1468
1469            Value *NewGEP = GEP.isInBounds() && NSW ?
1470              Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) :
1471              Builder->CreateGEP(StrippedPtr, Off, GEP.getName());
1472            // The NewGEP must be pointer typed, so must the old one -> BitCast
1473            if (StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace())
1474              return new BitCastInst(NewGEP, GEP.getType());
1475            return new AddrSpaceCastInst(NewGEP, GEP.getType());
1476          }
1477        }
1478      }
1479    }
1480  }
1481
1482  if (!DL)
1483    return nullptr;
1484
1485  /// See if we can simplify:
1486  ///   X = bitcast A* to B*
1487  ///   Y = gep X, <...constant indices...>
1488  /// into a gep of the original struct.  This is important for SROA and alias
1489  /// analysis of unions.  If "A" is also a bitcast, wait for A/X to be merged.
1490  if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
1491    Value *Operand = BCI->getOperand(0);
1492    PointerType *OpType = cast<PointerType>(Operand->getType());
1493    unsigned OffsetBits = DL->getPointerTypeSizeInBits(OpType);
1494    APInt Offset(OffsetBits, 0);
1495    if (!isa<BitCastInst>(Operand) &&
1496        GEP.accumulateConstantOffset(*DL, Offset) &&
1497        StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
1498
1499      // If this GEP instruction doesn't move the pointer, just replace the GEP
1500      // with a bitcast of the real input to the dest type.
1501      if (!Offset) {
1502        // If the bitcast is of an allocation, and the allocation will be
1503        // converted to match the type of the cast, don't touch this.
1504        if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, TLI)) {
1505          // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1506          if (Instruction *I = visitBitCast(*BCI)) {
1507            if (I != BCI) {
1508              I->takeName(BCI);
1509              BCI->getParent()->getInstList().insert(BCI, I);
1510              ReplaceInstUsesWith(*BCI, I);
1511            }
1512            return &GEP;
1513          }
1514        }
1515        return new BitCastInst(Operand, GEP.getType());
1516      }
1517
1518      // Otherwise, if the offset is non-zero, we need to find out if there is a
1519      // field at Offset in 'A's type.  If so, we can pull the cast through the
1520      // GEP.
1521      SmallVector<Value*, 8> NewIndices;
1522      if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
1523        Value *NGEP = GEP.isInBounds() ?
1524          Builder->CreateInBoundsGEP(Operand, NewIndices) :
1525          Builder->CreateGEP(Operand, NewIndices);
1526
1527        if (NGEP->getType() == GEP.getType())
1528          return ReplaceInstUsesWith(GEP, NGEP);
1529        NGEP->takeName(&GEP);
1530        return new BitCastInst(NGEP, GEP.getType());
1531      }
1532    }
1533  }
1534
1535  return nullptr;
1536}
1537
1538static bool
1539isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
1540                     const TargetLibraryInfo *TLI) {
1541  SmallVector<Instruction*, 4> Worklist;
1542  Worklist.push_back(AI);
1543
1544  do {
1545    Instruction *PI = Worklist.pop_back_val();
1546    for (User *U : PI->users()) {
1547      Instruction *I = cast<Instruction>(U);
1548      switch (I->getOpcode()) {
1549      default:
1550        // Give up the moment we see something we can't handle.
1551        return false;
1552
1553      case Instruction::BitCast:
1554      case Instruction::GetElementPtr:
1555        Users.push_back(I);
1556        Worklist.push_back(I);
1557        continue;
1558
1559      case Instruction::ICmp: {
1560        ICmpInst *ICI = cast<ICmpInst>(I);
1561        // We can fold eq/ne comparisons with null to false/true, respectively.
1562        if (!ICI->isEquality() || !isa<ConstantPointerNull>(ICI->getOperand(1)))
1563          return false;
1564        Users.push_back(I);
1565        continue;
1566      }
1567
1568      case Instruction::Call:
1569        // Ignore no-op and store intrinsics.
1570        if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1571          switch (II->getIntrinsicID()) {
1572          default:
1573            return false;
1574
1575          case Intrinsic::memmove:
1576          case Intrinsic::memcpy:
1577          case Intrinsic::memset: {
1578            MemIntrinsic *MI = cast<MemIntrinsic>(II);
1579            if (MI->isVolatile() || MI->getRawDest() != PI)
1580              return false;
1581          }
1582          // fall through
1583          case Intrinsic::dbg_declare:
1584          case Intrinsic::dbg_value:
1585          case Intrinsic::invariant_start:
1586          case Intrinsic::invariant_end:
1587          case Intrinsic::lifetime_start:
1588          case Intrinsic::lifetime_end:
1589          case Intrinsic::objectsize:
1590            Users.push_back(I);
1591            continue;
1592          }
1593        }
1594
1595        if (isFreeCall(I, TLI)) {
1596          Users.push_back(I);
1597          continue;
1598        }
1599        return false;
1600
1601      case Instruction::Store: {
1602        StoreInst *SI = cast<StoreInst>(I);
1603        if (SI->isVolatile() || SI->getPointerOperand() != PI)
1604          return false;
1605        Users.push_back(I);
1606        continue;
1607      }
1608      }
1609      llvm_unreachable("missing a return?");
1610    }
1611  } while (!Worklist.empty());
1612  return true;
1613}
1614
1615Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
1616  // If we have a malloc call which is only used in any amount of comparisons
1617  // to null and free calls, delete the calls and replace the comparisons with
1618  // true or false as appropriate.
1619  SmallVector<WeakVH, 64> Users;
1620  if (isAllocSiteRemovable(&MI, Users, TLI)) {
1621    for (unsigned i = 0, e = Users.size(); i != e; ++i) {
1622      Instruction *I = cast_or_null<Instruction>(&*Users[i]);
1623      if (!I) continue;
1624
1625      if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
1626        ReplaceInstUsesWith(*C,
1627                            ConstantInt::get(Type::getInt1Ty(C->getContext()),
1628                                             C->isFalseWhenEqual()));
1629      } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
1630        ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
1631      } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1632        if (II->getIntrinsicID() == Intrinsic::objectsize) {
1633          ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1634          uint64_t DontKnow = CI->isZero() ? -1ULL : 0;
1635          ReplaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow));
1636        }
1637      }
1638      EraseInstFromFunction(*I);
1639    }
1640
1641    if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
1642      // Replace invoke with a NOP intrinsic to maintain the original CFG
1643      Module *M = II->getParent()->getParent()->getParent();
1644      Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
1645      InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
1646                         None, "", II->getParent());
1647    }
1648    return EraseInstFromFunction(MI);
1649  }
1650  return nullptr;
1651}
1652
1653/// \brief Move the call to free before a NULL test.
1654///
1655/// Check if this free is accessed after its argument has been test
1656/// against NULL (property 0).
1657/// If yes, it is legal to move this call in its predecessor block.
1658///
1659/// The move is performed only if the block containing the call to free
1660/// will be removed, i.e.:
1661/// 1. it has only one predecessor P, and P has two successors
1662/// 2. it contains the call and an unconditional branch
1663/// 3. its successor is the same as its predecessor's successor
1664///
1665/// The profitability is out-of concern here and this function should
1666/// be called only if the caller knows this transformation would be
1667/// profitable (e.g., for code size).
1668static Instruction *
1669tryToMoveFreeBeforeNullTest(CallInst &FI) {
1670  Value *Op = FI.getArgOperand(0);
1671  BasicBlock *FreeInstrBB = FI.getParent();
1672  BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
1673
1674  // Validate part of constraint #1: Only one predecessor
1675  // FIXME: We can extend the number of predecessor, but in that case, we
1676  //        would duplicate the call to free in each predecessor and it may
1677  //        not be profitable even for code size.
1678  if (!PredBB)
1679    return nullptr;
1680
1681  // Validate constraint #2: Does this block contains only the call to
1682  //                         free and an unconditional branch?
1683  // FIXME: We could check if we can speculate everything in the
1684  //        predecessor block
1685  if (FreeInstrBB->size() != 2)
1686    return nullptr;
1687  BasicBlock *SuccBB;
1688  if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
1689    return nullptr;
1690
1691  // Validate the rest of constraint #1 by matching on the pred branch.
1692  TerminatorInst *TI = PredBB->getTerminator();
1693  BasicBlock *TrueBB, *FalseBB;
1694  ICmpInst::Predicate Pred;
1695  if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
1696    return nullptr;
1697  if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
1698    return nullptr;
1699
1700  // Validate constraint #3: Ensure the null case just falls through.
1701  if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
1702    return nullptr;
1703  assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
1704         "Broken CFG: missing edge from predecessor to successor");
1705
1706  FI.moveBefore(TI);
1707  return &FI;
1708}
1709
1710
1711Instruction *InstCombiner::visitFree(CallInst &FI) {
1712  Value *Op = FI.getArgOperand(0);
1713
1714  // free undef -> unreachable.
1715  if (isa<UndefValue>(Op)) {
1716    // Insert a new store to null because we cannot modify the CFG here.
1717    Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
1718                         UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
1719    return EraseInstFromFunction(FI);
1720  }
1721
1722  // If we have 'free null' delete the instruction.  This can happen in stl code
1723  // when lots of inlining happens.
1724  if (isa<ConstantPointerNull>(Op))
1725    return EraseInstFromFunction(FI);
1726
1727  // If we optimize for code size, try to move the call to free before the null
1728  // test so that simplify cfg can remove the empty block and dead code
1729  // elimination the branch. I.e., helps to turn something like:
1730  // if (foo) free(foo);
1731  // into
1732  // free(foo);
1733  if (MinimizeSize)
1734    if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
1735      return I;
1736
1737  return nullptr;
1738}
1739
1740
1741
1742Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
1743  // Change br (not X), label True, label False to: br X, label False, True
1744  Value *X = nullptr;
1745  BasicBlock *TrueDest;
1746  BasicBlock *FalseDest;
1747  if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
1748      !isa<Constant>(X)) {
1749    // Swap Destinations and condition...
1750    BI.setCondition(X);
1751    BI.swapSuccessors();
1752    return &BI;
1753  }
1754
1755  // Canonicalize fcmp_one -> fcmp_oeq
1756  FCmpInst::Predicate FPred; Value *Y;
1757  if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
1758                             TrueDest, FalseDest)) &&
1759      BI.getCondition()->hasOneUse())
1760    if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
1761        FPred == FCmpInst::FCMP_OGE) {
1762      FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
1763      Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
1764
1765      // Swap Destinations and condition.
1766      BI.swapSuccessors();
1767      Worklist.Add(Cond);
1768      return &BI;
1769    }
1770
1771  // Canonicalize icmp_ne -> icmp_eq
1772  ICmpInst::Predicate IPred;
1773  if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
1774                      TrueDest, FalseDest)) &&
1775      BI.getCondition()->hasOneUse())
1776    if (IPred == ICmpInst::ICMP_NE  || IPred == ICmpInst::ICMP_ULE ||
1777        IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
1778        IPred == ICmpInst::ICMP_SGE) {
1779      ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
1780      Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
1781      // Swap Destinations and condition.
1782      BI.swapSuccessors();
1783      Worklist.Add(Cond);
1784      return &BI;
1785    }
1786
1787  return nullptr;
1788}
1789
1790Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
1791  Value *Cond = SI.getCondition();
1792  if (Instruction *I = dyn_cast<Instruction>(Cond)) {
1793    if (I->getOpcode() == Instruction::Add)
1794      if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1795        // change 'switch (X+4) case 1:' into 'switch (X) case -3'
1796        // Skip the first item since that's the default case.
1797        for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
1798             i != e; ++i) {
1799          ConstantInt* CaseVal = i.getCaseValue();
1800          Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
1801                                                      AddRHS);
1802          assert(isa<ConstantInt>(NewCaseVal) &&
1803                 "Result of expression should be constant");
1804          i.setValue(cast<ConstantInt>(NewCaseVal));
1805        }
1806        SI.setCondition(I->getOperand(0));
1807        Worklist.Add(I);
1808        return &SI;
1809      }
1810  }
1811  return nullptr;
1812}
1813
1814Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
1815  Value *Agg = EV.getAggregateOperand();
1816
1817  if (!EV.hasIndices())
1818    return ReplaceInstUsesWith(EV, Agg);
1819
1820  if (Constant *C = dyn_cast<Constant>(Agg)) {
1821    if (Constant *C2 = C->getAggregateElement(*EV.idx_begin())) {
1822      if (EV.getNumIndices() == 0)
1823        return ReplaceInstUsesWith(EV, C2);
1824      // Extract the remaining indices out of the constant indexed by the
1825      // first index
1826      return ExtractValueInst::Create(C2, EV.getIndices().slice(1));
1827    }
1828    return nullptr; // Can't handle other constants
1829  }
1830
1831  if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
1832    // We're extracting from an insertvalue instruction, compare the indices
1833    const unsigned *exti, *exte, *insi, *inse;
1834    for (exti = EV.idx_begin(), insi = IV->idx_begin(),
1835         exte = EV.idx_end(), inse = IV->idx_end();
1836         exti != exte && insi != inse;
1837         ++exti, ++insi) {
1838      if (*insi != *exti)
1839        // The insert and extract both reference distinctly different elements.
1840        // This means the extract is not influenced by the insert, and we can
1841        // replace the aggregate operand of the extract with the aggregate
1842        // operand of the insert. i.e., replace
1843        // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1844        // %E = extractvalue { i32, { i32 } } %I, 0
1845        // with
1846        // %E = extractvalue { i32, { i32 } } %A, 0
1847        return ExtractValueInst::Create(IV->getAggregateOperand(),
1848                                        EV.getIndices());
1849    }
1850    if (exti == exte && insi == inse)
1851      // Both iterators are at the end: Index lists are identical. Replace
1852      // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1853      // %C = extractvalue { i32, { i32 } } %B, 1, 0
1854      // with "i32 42"
1855      return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
1856    if (exti == exte) {
1857      // The extract list is a prefix of the insert list. i.e. replace
1858      // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1859      // %E = extractvalue { i32, { i32 } } %I, 1
1860      // with
1861      // %X = extractvalue { i32, { i32 } } %A, 1
1862      // %E = insertvalue { i32 } %X, i32 42, 0
1863      // by switching the order of the insert and extract (though the
1864      // insertvalue should be left in, since it may have other uses).
1865      Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
1866                                                 EV.getIndices());
1867      return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
1868                                     makeArrayRef(insi, inse));
1869    }
1870    if (insi == inse)
1871      // The insert list is a prefix of the extract list
1872      // We can simply remove the common indices from the extract and make it
1873      // operate on the inserted value instead of the insertvalue result.
1874      // i.e., replace
1875      // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1876      // %E = extractvalue { i32, { i32 } } %I, 1, 0
1877      // with
1878      // %E extractvalue { i32 } { i32 42 }, 0
1879      return ExtractValueInst::Create(IV->getInsertedValueOperand(),
1880                                      makeArrayRef(exti, exte));
1881  }
1882  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
1883    // We're extracting from an intrinsic, see if we're the only user, which
1884    // allows us to simplify multiple result intrinsics to simpler things that
1885    // just get one value.
1886    if (II->hasOneUse()) {
1887      // Check if we're grabbing the overflow bit or the result of a 'with
1888      // overflow' intrinsic.  If it's the latter we can remove the intrinsic
1889      // and replace it with a traditional binary instruction.
1890      switch (II->getIntrinsicID()) {
1891      case Intrinsic::uadd_with_overflow:
1892      case Intrinsic::sadd_with_overflow:
1893        if (*EV.idx_begin() == 0) {  // Normal result.
1894          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1895          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1896          EraseInstFromFunction(*II);
1897          return BinaryOperator::CreateAdd(LHS, RHS);
1898        }
1899
1900        // If the normal result of the add is dead, and the RHS is a constant,
1901        // we can transform this into a range comparison.
1902        // overflow = uadd a, -4  -->  overflow = icmp ugt a, 3
1903        if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
1904          if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
1905            return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
1906                                ConstantExpr::getNot(CI));
1907        break;
1908      case Intrinsic::usub_with_overflow:
1909      case Intrinsic::ssub_with_overflow:
1910        if (*EV.idx_begin() == 0) {  // Normal result.
1911          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1912          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1913          EraseInstFromFunction(*II);
1914          return BinaryOperator::CreateSub(LHS, RHS);
1915        }
1916        break;
1917      case Intrinsic::umul_with_overflow:
1918      case Intrinsic::smul_with_overflow:
1919        if (*EV.idx_begin() == 0) {  // Normal result.
1920          Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1921          ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1922          EraseInstFromFunction(*II);
1923          return BinaryOperator::CreateMul(LHS, RHS);
1924        }
1925        break;
1926      default:
1927        break;
1928      }
1929    }
1930  }
1931  if (LoadInst *L = dyn_cast<LoadInst>(Agg))
1932    // If the (non-volatile) load only has one use, we can rewrite this to a
1933    // load from a GEP. This reduces the size of the load.
1934    // FIXME: If a load is used only by extractvalue instructions then this
1935    //        could be done regardless of having multiple uses.
1936    if (L->isSimple() && L->hasOneUse()) {
1937      // extractvalue has integer indices, getelementptr has Value*s. Convert.
1938      SmallVector<Value*, 4> Indices;
1939      // Prefix an i32 0 since we need the first element.
1940      Indices.push_back(Builder->getInt32(0));
1941      for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
1942            I != E; ++I)
1943        Indices.push_back(Builder->getInt32(*I));
1944
1945      // We need to insert these at the location of the old load, not at that of
1946      // the extractvalue.
1947      Builder->SetInsertPoint(L->getParent(), L);
1948      Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices);
1949      // Returning the load directly will cause the main loop to insert it in
1950      // the wrong spot, so use ReplaceInstUsesWith().
1951      return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
1952    }
1953  // We could simplify extracts from other values. Note that nested extracts may
1954  // already be simplified implicitly by the above: extract (extract (insert) )
1955  // will be translated into extract ( insert ( extract ) ) first and then just
1956  // the value inserted, if appropriate. Similarly for extracts from single-use
1957  // loads: extract (extract (load)) will be translated to extract (load (gep))
1958  // and if again single-use then via load (gep (gep)) to load (gep).
1959  // However, double extracts from e.g. function arguments or return values
1960  // aren't handled yet.
1961  return nullptr;
1962}
1963
1964enum Personality_Type {
1965  Unknown_Personality,
1966  GNU_Ada_Personality,
1967  GNU_CXX_Personality,
1968  GNU_ObjC_Personality
1969};
1970
1971/// RecognizePersonality - See if the given exception handling personality
1972/// function is one that we understand.  If so, return a description of it;
1973/// otherwise return Unknown_Personality.
1974static Personality_Type RecognizePersonality(Value *Pers) {
1975  Function *F = dyn_cast<Function>(Pers->stripPointerCasts());
1976  if (!F)
1977    return Unknown_Personality;
1978  return StringSwitch<Personality_Type>(F->getName())
1979    .Case("__gnat_eh_personality", GNU_Ada_Personality)
1980    .Case("__gxx_personality_v0",  GNU_CXX_Personality)
1981    .Case("__objc_personality_v0", GNU_ObjC_Personality)
1982    .Default(Unknown_Personality);
1983}
1984
1985/// isCatchAll - Return 'true' if the given typeinfo will match anything.
1986static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) {
1987  switch (Personality) {
1988  case Unknown_Personality:
1989    return false;
1990  case GNU_Ada_Personality:
1991    // While __gnat_all_others_value will match any Ada exception, it doesn't
1992    // match foreign exceptions (or didn't, before gcc-4.7).
1993    return false;
1994  case GNU_CXX_Personality:
1995  case GNU_ObjC_Personality:
1996    return TypeInfo->isNullValue();
1997  }
1998  llvm_unreachable("Unknown personality!");
1999}
2000
2001static bool shorter_filter(const Value *LHS, const Value *RHS) {
2002  return
2003    cast<ArrayType>(LHS->getType())->getNumElements()
2004  <
2005    cast<ArrayType>(RHS->getType())->getNumElements();
2006}
2007
2008Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
2009  // The logic here should be correct for any real-world personality function.
2010  // However if that turns out not to be true, the offending logic can always
2011  // be conditioned on the personality function, like the catch-all logic is.
2012  Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn());
2013
2014  // Simplify the list of clauses, eg by removing repeated catch clauses
2015  // (these are often created by inlining).
2016  bool MakeNewInstruction = false; // If true, recreate using the following:
2017  SmallVector<Value *, 16> NewClauses; // - Clauses for the new instruction;
2018  bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
2019
2020  SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
2021  for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
2022    bool isLastClause = i + 1 == e;
2023    if (LI.isCatch(i)) {
2024      // A catch clause.
2025      Value *CatchClause = LI.getClause(i);
2026      Constant *TypeInfo = cast<Constant>(CatchClause->stripPointerCasts());
2027
2028      // If we already saw this clause, there is no point in having a second
2029      // copy of it.
2030      if (AlreadyCaught.insert(TypeInfo)) {
2031        // This catch clause was not already seen.
2032        NewClauses.push_back(CatchClause);
2033      } else {
2034        // Repeated catch clause - drop the redundant copy.
2035        MakeNewInstruction = true;
2036      }
2037
2038      // If this is a catch-all then there is no point in keeping any following
2039      // clauses or marking the landingpad as having a cleanup.
2040      if (isCatchAll(Personality, TypeInfo)) {
2041        if (!isLastClause)
2042          MakeNewInstruction = true;
2043        CleanupFlag = false;
2044        break;
2045      }
2046    } else {
2047      // A filter clause.  If any of the filter elements were already caught
2048      // then they can be dropped from the filter.  It is tempting to try to
2049      // exploit the filter further by saying that any typeinfo that does not
2050      // occur in the filter can't be caught later (and thus can be dropped).
2051      // However this would be wrong, since typeinfos can match without being
2052      // equal (for example if one represents a C++ class, and the other some
2053      // class derived from it).
2054      assert(LI.isFilter(i) && "Unsupported landingpad clause!");
2055      Value *FilterClause = LI.getClause(i);
2056      ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
2057      unsigned NumTypeInfos = FilterType->getNumElements();
2058
2059      // An empty filter catches everything, so there is no point in keeping any
2060      // following clauses or marking the landingpad as having a cleanup.  By
2061      // dealing with this case here the following code is made a bit simpler.
2062      if (!NumTypeInfos) {
2063        NewClauses.push_back(FilterClause);
2064        if (!isLastClause)
2065          MakeNewInstruction = true;
2066        CleanupFlag = false;
2067        break;
2068      }
2069
2070      bool MakeNewFilter = false; // If true, make a new filter.
2071      SmallVector<Constant *, 16> NewFilterElts; // New elements.
2072      if (isa<ConstantAggregateZero>(FilterClause)) {
2073        // Not an empty filter - it contains at least one null typeinfo.
2074        assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
2075        Constant *TypeInfo =
2076          Constant::getNullValue(FilterType->getElementType());
2077        // If this typeinfo is a catch-all then the filter can never match.
2078        if (isCatchAll(Personality, TypeInfo)) {
2079          // Throw the filter away.
2080          MakeNewInstruction = true;
2081          continue;
2082        }
2083
2084        // There is no point in having multiple copies of this typeinfo, so
2085        // discard all but the first copy if there is more than one.
2086        NewFilterElts.push_back(TypeInfo);
2087        if (NumTypeInfos > 1)
2088          MakeNewFilter = true;
2089      } else {
2090        ConstantArray *Filter = cast<ConstantArray>(FilterClause);
2091        SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
2092        NewFilterElts.reserve(NumTypeInfos);
2093
2094        // Remove any filter elements that were already caught or that already
2095        // occurred in the filter.  While there, see if any of the elements are
2096        // catch-alls.  If so, the filter can be discarded.
2097        bool SawCatchAll = false;
2098        for (unsigned j = 0; j != NumTypeInfos; ++j) {
2099          Value *Elt = Filter->getOperand(j);
2100          Constant *TypeInfo = cast<Constant>(Elt->stripPointerCasts());
2101          if (isCatchAll(Personality, TypeInfo)) {
2102            // This element is a catch-all.  Bail out, noting this fact.
2103            SawCatchAll = true;
2104            break;
2105          }
2106          if (AlreadyCaught.count(TypeInfo))
2107            // Already caught by an earlier clause, so having it in the filter
2108            // is pointless.
2109            continue;
2110          // There is no point in having multiple copies of the same typeinfo in
2111          // a filter, so only add it if we didn't already.
2112          if (SeenInFilter.insert(TypeInfo))
2113            NewFilterElts.push_back(cast<Constant>(Elt));
2114        }
2115        // A filter containing a catch-all cannot match anything by definition.
2116        if (SawCatchAll) {
2117          // Throw the filter away.
2118          MakeNewInstruction = true;
2119          continue;
2120        }
2121
2122        // If we dropped something from the filter, make a new one.
2123        if (NewFilterElts.size() < NumTypeInfos)
2124          MakeNewFilter = true;
2125      }
2126      if (MakeNewFilter) {
2127        FilterType = ArrayType::get(FilterType->getElementType(),
2128                                    NewFilterElts.size());
2129        FilterClause = ConstantArray::get(FilterType, NewFilterElts);
2130        MakeNewInstruction = true;
2131      }
2132
2133      NewClauses.push_back(FilterClause);
2134
2135      // If the new filter is empty then it will catch everything so there is
2136      // no point in keeping any following clauses or marking the landingpad
2137      // as having a cleanup.  The case of the original filter being empty was
2138      // already handled above.
2139      if (MakeNewFilter && !NewFilterElts.size()) {
2140        assert(MakeNewInstruction && "New filter but not a new instruction!");
2141        CleanupFlag = false;
2142        break;
2143      }
2144    }
2145  }
2146
2147  // If several filters occur in a row then reorder them so that the shortest
2148  // filters come first (those with the smallest number of elements).  This is
2149  // advantageous because shorter filters are more likely to match, speeding up
2150  // unwinding, but mostly because it increases the effectiveness of the other
2151  // filter optimizations below.
2152  for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
2153    unsigned j;
2154    // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
2155    for (j = i; j != e; ++j)
2156      if (!isa<ArrayType>(NewClauses[j]->getType()))
2157        break;
2158
2159    // Check whether the filters are already sorted by length.  We need to know
2160    // if sorting them is actually going to do anything so that we only make a
2161    // new landingpad instruction if it does.
2162    for (unsigned k = i; k + 1 < j; ++k)
2163      if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
2164        // Not sorted, so sort the filters now.  Doing an unstable sort would be
2165        // correct too but reordering filters pointlessly might confuse users.
2166        std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
2167                         shorter_filter);
2168        MakeNewInstruction = true;
2169        break;
2170      }
2171
2172    // Look for the next batch of filters.
2173    i = j + 1;
2174  }
2175
2176  // If typeinfos matched if and only if equal, then the elements of a filter L
2177  // that occurs later than a filter F could be replaced by the intersection of
2178  // the elements of F and L.  In reality two typeinfos can match without being
2179  // equal (for example if one represents a C++ class, and the other some class
2180  // derived from it) so it would be wrong to perform this transform in general.
2181  // However the transform is correct and useful if F is a subset of L.  In that
2182  // case L can be replaced by F, and thus removed altogether since repeating a
2183  // filter is pointless.  So here we look at all pairs of filters F and L where
2184  // L follows F in the list of clauses, and remove L if every element of F is
2185  // an element of L.  This can occur when inlining C++ functions with exception
2186  // specifications.
2187  for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
2188    // Examine each filter in turn.
2189    Value *Filter = NewClauses[i];
2190    ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
2191    if (!FTy)
2192      // Not a filter - skip it.
2193      continue;
2194    unsigned FElts = FTy->getNumElements();
2195    // Examine each filter following this one.  Doing this backwards means that
2196    // we don't have to worry about filters disappearing under us when removed.
2197    for (unsigned j = NewClauses.size() - 1; j != i; --j) {
2198      Value *LFilter = NewClauses[j];
2199      ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
2200      if (!LTy)
2201        // Not a filter - skip it.
2202        continue;
2203      // If Filter is a subset of LFilter, i.e. every element of Filter is also
2204      // an element of LFilter, then discard LFilter.
2205      SmallVectorImpl<Value *>::iterator J = NewClauses.begin() + j;
2206      // If Filter is empty then it is a subset of LFilter.
2207      if (!FElts) {
2208        // Discard LFilter.
2209        NewClauses.erase(J);
2210        MakeNewInstruction = true;
2211        // Move on to the next filter.
2212        continue;
2213      }
2214      unsigned LElts = LTy->getNumElements();
2215      // If Filter is longer than LFilter then it cannot be a subset of it.
2216      if (FElts > LElts)
2217        // Move on to the next filter.
2218        continue;
2219      // At this point we know that LFilter has at least one element.
2220      if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
2221        // Filter is a subset of LFilter iff Filter contains only zeros (as we
2222        // already know that Filter is not longer than LFilter).
2223        if (isa<ConstantAggregateZero>(Filter)) {
2224          assert(FElts <= LElts && "Should have handled this case earlier!");
2225          // Discard LFilter.
2226          NewClauses.erase(J);
2227          MakeNewInstruction = true;
2228        }
2229        // Move on to the next filter.
2230        continue;
2231      }
2232      ConstantArray *LArray = cast<ConstantArray>(LFilter);
2233      if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
2234        // Since Filter is non-empty and contains only zeros, it is a subset of
2235        // LFilter iff LFilter contains a zero.
2236        assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
2237        for (unsigned l = 0; l != LElts; ++l)
2238          if (LArray->getOperand(l)->isNullValue()) {
2239            // LFilter contains a zero - discard it.
2240            NewClauses.erase(J);
2241            MakeNewInstruction = true;
2242            break;
2243          }
2244        // Move on to the next filter.
2245        continue;
2246      }
2247      // At this point we know that both filters are ConstantArrays.  Loop over
2248      // operands to see whether every element of Filter is also an element of
2249      // LFilter.  Since filters tend to be short this is probably faster than
2250      // using a method that scales nicely.
2251      ConstantArray *FArray = cast<ConstantArray>(Filter);
2252      bool AllFound = true;
2253      for (unsigned f = 0; f != FElts; ++f) {
2254        Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
2255        AllFound = false;
2256        for (unsigned l = 0; l != LElts; ++l) {
2257          Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
2258          if (LTypeInfo == FTypeInfo) {
2259            AllFound = true;
2260            break;
2261          }
2262        }
2263        if (!AllFound)
2264          break;
2265      }
2266      if (AllFound) {
2267        // Discard LFilter.
2268        NewClauses.erase(J);
2269        MakeNewInstruction = true;
2270      }
2271      // Move on to the next filter.
2272    }
2273  }
2274
2275  // If we changed any of the clauses, replace the old landingpad instruction
2276  // with a new one.
2277  if (MakeNewInstruction) {
2278    LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
2279                                                 LI.getPersonalityFn(),
2280                                                 NewClauses.size());
2281    for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
2282      NLI->addClause(NewClauses[i]);
2283    // A landing pad with no clauses must have the cleanup flag set.  It is
2284    // theoretically possible, though highly unlikely, that we eliminated all
2285    // clauses.  If so, force the cleanup flag to true.
2286    if (NewClauses.empty())
2287      CleanupFlag = true;
2288    NLI->setCleanup(CleanupFlag);
2289    return NLI;
2290  }
2291
2292  // Even if none of the clauses changed, we may nonetheless have understood
2293  // that the cleanup flag is pointless.  Clear it if so.
2294  if (LI.isCleanup() != CleanupFlag) {
2295    assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
2296    LI.setCleanup(CleanupFlag);
2297    return &LI;
2298  }
2299
2300  return nullptr;
2301}
2302
2303
2304
2305
2306/// TryToSinkInstruction - Try to move the specified instruction from its
2307/// current block into the beginning of DestBlock, which can only happen if it's
2308/// safe to move the instruction past all of the instructions between it and the
2309/// end of its block.
2310static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
2311  assert(I->hasOneUse() && "Invariants didn't hold!");
2312
2313  // Cannot move control-flow-involving, volatile loads, vaarg, etc.
2314  if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() ||
2315      isa<TerminatorInst>(I))
2316    return false;
2317
2318  // Do not sink alloca instructions out of the entry block.
2319  if (isa<AllocaInst>(I) && I->getParent() ==
2320        &DestBlock->getParent()->getEntryBlock())
2321    return false;
2322
2323  // We can only sink load instructions if there is nothing between the load and
2324  // the end of block that could change the value.
2325  if (I->mayReadFromMemory()) {
2326    for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
2327         Scan != E; ++Scan)
2328      if (Scan->mayWriteToMemory())
2329        return false;
2330  }
2331
2332  BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
2333  I->moveBefore(InsertPos);
2334  ++NumSunkInst;
2335  return true;
2336}
2337
2338
2339/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
2340/// all reachable code to the worklist.
2341///
2342/// This has a couple of tricks to make the code faster and more powerful.  In
2343/// particular, we constant fold and DCE instructions as we go, to avoid adding
2344/// them to the worklist (this significantly speeds up instcombine on code where
2345/// many instructions are dead or constant).  Additionally, if we find a branch
2346/// whose condition is a known constant, we only visit the reachable successors.
2347///
2348static bool AddReachableCodeToWorklist(BasicBlock *BB,
2349                                       SmallPtrSet<BasicBlock*, 64> &Visited,
2350                                       InstCombiner &IC,
2351                                       const DataLayout *DL,
2352                                       const TargetLibraryInfo *TLI) {
2353  bool MadeIRChange = false;
2354  SmallVector<BasicBlock*, 256> Worklist;
2355  Worklist.push_back(BB);
2356
2357  SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
2358  DenseMap<ConstantExpr*, Constant*> FoldedConstants;
2359
2360  do {
2361    BB = Worklist.pop_back_val();
2362
2363    // We have now visited this block!  If we've already been here, ignore it.
2364    if (!Visited.insert(BB)) continue;
2365
2366    for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
2367      Instruction *Inst = BBI++;
2368
2369      // DCE instruction if trivially dead.
2370      if (isInstructionTriviallyDead(Inst, TLI)) {
2371        ++NumDeadInst;
2372        DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
2373        Inst->eraseFromParent();
2374        continue;
2375      }
2376
2377      // ConstantProp instruction if trivially constant.
2378      if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
2379        if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
2380          DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
2381                       << *Inst << '\n');
2382          Inst->replaceAllUsesWith(C);
2383          ++NumConstProp;
2384          Inst->eraseFromParent();
2385          continue;
2386        }
2387
2388      if (DL) {
2389        // See if we can constant fold its operands.
2390        for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
2391             i != e; ++i) {
2392          ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
2393          if (CE == nullptr) continue;
2394
2395          Constant*& FoldRes = FoldedConstants[CE];
2396          if (!FoldRes)
2397            FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
2398          if (!FoldRes)
2399            FoldRes = CE;
2400
2401          if (FoldRes != CE) {
2402            *i = FoldRes;
2403            MadeIRChange = true;
2404          }
2405        }
2406      }
2407
2408      InstrsForInstCombineWorklist.push_back(Inst);
2409    }
2410
2411    // Recursively visit successors.  If this is a branch or switch on a
2412    // constant, only visit the reachable successor.
2413    TerminatorInst *TI = BB->getTerminator();
2414    if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2415      if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
2416        bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
2417        BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
2418        Worklist.push_back(ReachableBB);
2419        continue;
2420      }
2421    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2422      if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
2423        // See if this is an explicit destination.
2424        for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2425             i != e; ++i)
2426          if (i.getCaseValue() == Cond) {
2427            BasicBlock *ReachableBB = i.getCaseSuccessor();
2428            Worklist.push_back(ReachableBB);
2429            continue;
2430          }
2431
2432        // Otherwise it is the default destination.
2433        Worklist.push_back(SI->getDefaultDest());
2434        continue;
2435      }
2436    }
2437
2438    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
2439      Worklist.push_back(TI->getSuccessor(i));
2440  } while (!Worklist.empty());
2441
2442  // Once we've found all of the instructions to add to instcombine's worklist,
2443  // add them in reverse order.  This way instcombine will visit from the top
2444  // of the function down.  This jives well with the way that it adds all uses
2445  // of instructions to the worklist after doing a transformation, thus avoiding
2446  // some N^2 behavior in pathological cases.
2447  IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
2448                              InstrsForInstCombineWorklist.size());
2449
2450  return MadeIRChange;
2451}
2452
2453bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
2454  MadeIRChange = false;
2455
2456  DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
2457               << F.getName() << "\n");
2458
2459  {
2460    // Do a depth-first traversal of the function, populate the worklist with
2461    // the reachable instructions.  Ignore blocks that are not reachable.  Keep
2462    // track of which blocks we visit.
2463    SmallPtrSet<BasicBlock*, 64> Visited;
2464    MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, DL,
2465                                               TLI);
2466
2467    // Do a quick scan over the function.  If we find any blocks that are
2468    // unreachable, remove any instructions inside of them.  This prevents
2469    // the instcombine code from having to deal with some bad special cases.
2470    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
2471      if (Visited.count(BB)) continue;
2472
2473      // Delete the instructions backwards, as it has a reduced likelihood of
2474      // having to update as many def-use and use-def chains.
2475      Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2476      while (EndInst != BB->begin()) {
2477        // Delete the next to last instruction.
2478        BasicBlock::iterator I = EndInst;
2479        Instruction *Inst = --I;
2480        if (!Inst->use_empty())
2481          Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2482        if (isa<LandingPadInst>(Inst)) {
2483          EndInst = Inst;
2484          continue;
2485        }
2486        if (!isa<DbgInfoIntrinsic>(Inst)) {
2487          ++NumDeadInst;
2488          MadeIRChange = true;
2489        }
2490        Inst->eraseFromParent();
2491      }
2492    }
2493  }
2494
2495  while (!Worklist.isEmpty()) {
2496    Instruction *I = Worklist.RemoveOne();
2497    if (I == nullptr) continue;  // skip null values.
2498
2499    // Check to see if we can DCE the instruction.
2500    if (isInstructionTriviallyDead(I, TLI)) {
2501      DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
2502      EraseInstFromFunction(*I);
2503      ++NumDeadInst;
2504      MadeIRChange = true;
2505      continue;
2506    }
2507
2508    // Instruction isn't dead, see if we can constant propagate it.
2509    if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
2510      if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
2511        DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
2512
2513        // Add operands to the worklist.
2514        ReplaceInstUsesWith(*I, C);
2515        ++NumConstProp;
2516        EraseInstFromFunction(*I);
2517        MadeIRChange = true;
2518        continue;
2519      }
2520
2521    // See if we can trivially sink this instruction to a successor basic block.
2522    if (I->hasOneUse()) {
2523      BasicBlock *BB = I->getParent();
2524      Instruction *UserInst = cast<Instruction>(*I->user_begin());
2525      BasicBlock *UserParent;
2526
2527      // Get the block the use occurs in.
2528      if (PHINode *PN = dyn_cast<PHINode>(UserInst))
2529        UserParent = PN->getIncomingBlock(*I->use_begin());
2530      else
2531        UserParent = UserInst->getParent();
2532
2533      if (UserParent != BB) {
2534        bool UserIsSuccessor = false;
2535        // See if the user is one of our successors.
2536        for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
2537          if (*SI == UserParent) {
2538            UserIsSuccessor = true;
2539            break;
2540          }
2541
2542        // If the user is one of our immediate successors, and if that successor
2543        // only has us as a predecessors (we'd have to split the critical edge
2544        // otherwise), we can keep going.
2545        if (UserIsSuccessor && UserParent->getSinglePredecessor())
2546          // Okay, the CFG is simple enough, try to sink this instruction.
2547          MadeIRChange |= TryToSinkInstruction(I, UserParent);
2548      }
2549    }
2550
2551    // Now that we have an instruction, try combining it to simplify it.
2552    Builder->SetInsertPoint(I->getParent(), I);
2553    Builder->SetCurrentDebugLocation(I->getDebugLoc());
2554
2555#ifndef NDEBUG
2556    std::string OrigI;
2557#endif
2558    DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
2559    DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
2560
2561    if (Instruction *Result = visit(*I)) {
2562      ++NumCombined;
2563      // Should we replace the old instruction with a new one?
2564      if (Result != I) {
2565        DEBUG(dbgs() << "IC: Old = " << *I << '\n'
2566                     << "    New = " << *Result << '\n');
2567
2568        if (!I->getDebugLoc().isUnknown())
2569          Result->setDebugLoc(I->getDebugLoc());
2570        // Everything uses the new instruction now.
2571        I->replaceAllUsesWith(Result);
2572
2573        // Move the name to the new instruction first.
2574        Result->takeName(I);
2575
2576        // Push the new instruction and any users onto the worklist.
2577        Worklist.Add(Result);
2578        Worklist.AddUsersToWorkList(*Result);
2579
2580        // Insert the new instruction into the basic block...
2581        BasicBlock *InstParent = I->getParent();
2582        BasicBlock::iterator InsertPos = I;
2583
2584        // If we replace a PHI with something that isn't a PHI, fix up the
2585        // insertion point.
2586        if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
2587          InsertPos = InstParent->getFirstInsertionPt();
2588
2589        InstParent->getInstList().insert(InsertPos, Result);
2590
2591        EraseInstFromFunction(*I);
2592      } else {
2593#ifndef NDEBUG
2594        DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
2595                     << "    New = " << *I << '\n');
2596#endif
2597
2598        // If the instruction was modified, it's possible that it is now dead.
2599        // if so, remove it.
2600        if (isInstructionTriviallyDead(I, TLI)) {
2601          EraseInstFromFunction(*I);
2602        } else {
2603          Worklist.Add(I);
2604          Worklist.AddUsersToWorkList(*I);
2605        }
2606      }
2607      MadeIRChange = true;
2608    }
2609  }
2610
2611  Worklist.Zap();
2612  return MadeIRChange;
2613}
2614
2615namespace {
2616class InstCombinerLibCallSimplifier : public LibCallSimplifier {
2617  InstCombiner *IC;
2618public:
2619  InstCombinerLibCallSimplifier(const DataLayout *DL,
2620                                const TargetLibraryInfo *TLI,
2621                                InstCombiner *IC)
2622    : LibCallSimplifier(DL, TLI, UnsafeFPShrink) {
2623    this->IC = IC;
2624  }
2625
2626  /// replaceAllUsesWith - override so that instruction replacement
2627  /// can be defined in terms of the instruction combiner framework.
2628  void replaceAllUsesWith(Instruction *I, Value *With) const override {
2629    IC->ReplaceInstUsesWith(*I, With);
2630  }
2631};
2632}
2633
2634bool InstCombiner::runOnFunction(Function &F) {
2635  if (skipOptnoneFunction(F))
2636    return false;
2637
2638  DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
2639  DL = DLP ? &DLP->getDataLayout() : nullptr;
2640  TLI = &getAnalysis<TargetLibraryInfo>();
2641  // Minimizing size?
2642  MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2643                                                Attribute::MinSize);
2644
2645  /// Builder - This is an IRBuilder that automatically inserts new
2646  /// instructions into the worklist when they are created.
2647  IRBuilder<true, TargetFolder, InstCombineIRInserter>
2648    TheBuilder(F.getContext(), TargetFolder(DL),
2649               InstCombineIRInserter(Worklist));
2650  Builder = &TheBuilder;
2651
2652  InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this);
2653  Simplifier = &TheSimplifier;
2654
2655  bool EverMadeChange = false;
2656
2657  // Lower dbg.declare intrinsics otherwise their value may be clobbered
2658  // by instcombiner.
2659  EverMadeChange = LowerDbgDeclare(F);
2660
2661  // Iterate while there is work to do.
2662  unsigned Iteration = 0;
2663  while (DoOneIteration(F, Iteration++))
2664    EverMadeChange = true;
2665
2666  Builder = nullptr;
2667  return EverMadeChange;
2668}
2669
2670FunctionPass *llvm::createInstructionCombiningPass() {
2671  return new InstCombiner();
2672}
2673