1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains routines that help analyze properties that chains of
11// computations have.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/ValueTracking.h"
16#include "llvm/ADT/Optional.h"
17#include "llvm/ADT/SmallPtrSet.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/MemoryBuiltins.h"
21#include "llvm/Analysis/Loads.h"
22#include "llvm/Analysis/LoopInfo.h"
23#include "llvm/Analysis/VectorUtils.h"
24#include "llvm/IR/CallSite.h"
25#include "llvm/IR/ConstantRange.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/GetElementPtrTypeIterator.h"
30#include "llvm/IR/GlobalAlias.h"
31#include "llvm/IR/GlobalVariable.h"
32#include "llvm/IR/Instructions.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/IR/LLVMContext.h"
35#include "llvm/IR/Metadata.h"
36#include "llvm/IR/Operator.h"
37#include "llvm/IR/PatternMatch.h"
38#include "llvm/IR/Statepoint.h"
39#include "llvm/Support/Debug.h"
40#include "llvm/Support/MathExtras.h"
41#include <algorithm>
42#include <array>
43#include <cstring>
44using namespace llvm;
45using namespace llvm::PatternMatch;
46
47const unsigned MaxDepth = 6;
48
49// Controls the number of uses of the value searched for possible
50// dominating comparisons.
51static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
52                                              cl::Hidden, cl::init(20));
53
54/// Returns the bitwidth of the given scalar or pointer type (if unknown returns
55/// 0). For vector types, returns the element type's bitwidth.
56static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
57  if (unsigned BitWidth = Ty->getScalarSizeInBits())
58    return BitWidth;
59
60  return DL.getPointerTypeSizeInBits(Ty);
61}
62
63namespace {
64// Simplifying using an assume can only be done in a particular control-flow
65// context (the context instruction provides that context). If an assume and
66// the context instruction are not in the same block then the DT helps in
67// figuring out if we can use it.
68struct Query {
69  const DataLayout &DL;
70  AssumptionCache *AC;
71  const Instruction *CxtI;
72  const DominatorTree *DT;
73
74  /// Set of assumptions that should be excluded from further queries.
75  /// This is because of the potential for mutual recursion to cause
76  /// computeKnownBits to repeatedly visit the same assume intrinsic. The
77  /// classic case of this is assume(x = y), which will attempt to determine
78  /// bits in x from bits in y, which will attempt to determine bits in y from
79  /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
80  /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
81  /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
82  /// on.
83  std::array<const Value*, MaxDepth> Excluded;
84  unsigned NumExcluded;
85
86  Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
87        const DominatorTree *DT)
88      : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
89
90  Query(const Query &Q, const Value *NewExcl)
91      : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
92    Excluded = Q.Excluded;
93    Excluded[NumExcluded++] = NewExcl;
94    assert(NumExcluded <= Excluded.size());
95  }
96
97  bool isExcluded(const Value *Value) const {
98    if (NumExcluded == 0)
99      return false;
100    auto End = Excluded.begin() + NumExcluded;
101    return std::find(Excluded.begin(), End, Value) != End;
102  }
103};
104} // end anonymous namespace
105
106// Given the provided Value and, potentially, a context instruction, return
107// the preferred context instruction (if any).
108static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
109  // If we've been provided with a context instruction, then use that (provided
110  // it has been inserted).
111  if (CxtI && CxtI->getParent())
112    return CxtI;
113
114  // If the value is really an already-inserted instruction, then use that.
115  CxtI = dyn_cast<Instruction>(V);
116  if (CxtI && CxtI->getParent())
117    return CxtI;
118
119  return nullptr;
120}
121
122static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
123                             unsigned Depth, const Query &Q);
124
125void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
126                            const DataLayout &DL, unsigned Depth,
127                            AssumptionCache *AC, const Instruction *CxtI,
128                            const DominatorTree *DT) {
129  ::computeKnownBits(V, KnownZero, KnownOne, Depth,
130                     Query(DL, AC, safeCxtI(V, CxtI), DT));
131}
132
133bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
134                               AssumptionCache *AC, const Instruction *CxtI,
135                               const DominatorTree *DT) {
136  assert(LHS->getType() == RHS->getType() &&
137         "LHS and RHS should have the same type");
138  assert(LHS->getType()->isIntOrIntVectorTy() &&
139         "LHS and RHS should be integers");
140  IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
141  APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
142  APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
143  computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
144  computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
145  return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
146}
147
148static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
149                           unsigned Depth, const Query &Q);
150
151void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
152                          const DataLayout &DL, unsigned Depth,
153                          AssumptionCache *AC, const Instruction *CxtI,
154                          const DominatorTree *DT) {
155  ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
156                   Query(DL, AC, safeCxtI(V, CxtI), DT));
157}
158
159static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
160                                   const Query &Q);
161
162bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
163                                  unsigned Depth, AssumptionCache *AC,
164                                  const Instruction *CxtI,
165                                  const DominatorTree *DT) {
166  return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
167                                  Query(DL, AC, safeCxtI(V, CxtI), DT));
168}
169
170static bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q);
171
172bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
173                          AssumptionCache *AC, const Instruction *CxtI,
174                          const DominatorTree *DT) {
175  return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
176}
177
178bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
179                              AssumptionCache *AC, const Instruction *CxtI,
180                              const DominatorTree *DT) {
181  bool NonNegative, Negative;
182  ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
183  return NonNegative;
184}
185
186bool llvm::isKnownPositive(Value *V, const DataLayout &DL, unsigned Depth,
187                           AssumptionCache *AC, const Instruction *CxtI,
188                           const DominatorTree *DT) {
189  if (auto *CI = dyn_cast<ConstantInt>(V))
190    return CI->getValue().isStrictlyPositive();
191
192  // TODO: We'd doing two recursive queries here.  We should factor this such
193  // that only a single query is needed.
194  return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
195    isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
196}
197
198bool llvm::isKnownNegative(Value *V, const DataLayout &DL, unsigned Depth,
199                           AssumptionCache *AC, const Instruction *CxtI,
200                           const DominatorTree *DT) {
201  bool NonNegative, Negative;
202  ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
203  return Negative;
204}
205
206static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q);
207
208bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
209                          AssumptionCache *AC, const Instruction *CxtI,
210                          const DominatorTree *DT) {
211  return ::isKnownNonEqual(V1, V2, Query(DL, AC,
212                                         safeCxtI(V1, safeCxtI(V2, CxtI)),
213                                         DT));
214}
215
216static bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
217                              const Query &Q);
218
219bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
220                             unsigned Depth, AssumptionCache *AC,
221                             const Instruction *CxtI, const DominatorTree *DT) {
222  return ::MaskedValueIsZero(V, Mask, Depth,
223                             Query(DL, AC, safeCxtI(V, CxtI), DT));
224}
225
226static unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q);
227
228unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
229                                  unsigned Depth, AssumptionCache *AC,
230                                  const Instruction *CxtI,
231                                  const DominatorTree *DT) {
232  return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
233}
234
235static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
236                                   APInt &KnownZero, APInt &KnownOne,
237                                   APInt &KnownZero2, APInt &KnownOne2,
238                                   unsigned Depth, const Query &Q) {
239  if (!Add) {
240    if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
241      // We know that the top bits of C-X are clear if X contains less bits
242      // than C (i.e. no wrap-around can happen).  For example, 20-X is
243      // positive if we can prove that X is >= 0 and < 16.
244      if (!CLHS->getValue().isNegative()) {
245        unsigned BitWidth = KnownZero.getBitWidth();
246        unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
247        // NLZ can't be BitWidth with no sign bit
248        APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
249        computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
250
251        // If all of the MaskV bits are known to be zero, then we know the
252        // output top bits are zero, because we now know that the output is
253        // from [0-C].
254        if ((KnownZero2 & MaskV) == MaskV) {
255          unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
256          // Top bits known zero.
257          KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
258        }
259      }
260    }
261  }
262
263  unsigned BitWidth = KnownZero.getBitWidth();
264
265  // If an initial sequence of bits in the result is not needed, the
266  // corresponding bits in the operands are not needed.
267  APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
268  computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
269  computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
270
271  // Carry in a 1 for a subtract, rather than a 0.
272  APInt CarryIn(BitWidth, 0);
273  if (!Add) {
274    // Sum = LHS + ~RHS + 1
275    std::swap(KnownZero2, KnownOne2);
276    CarryIn.setBit(0);
277  }
278
279  APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
280  APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
281
282  // Compute known bits of the carry.
283  APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
284  APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
285
286  // Compute set of known bits (where all three relevant bits are known).
287  APInt LHSKnown = LHSKnownZero | LHSKnownOne;
288  APInt RHSKnown = KnownZero2 | KnownOne2;
289  APInt CarryKnown = CarryKnownZero | CarryKnownOne;
290  APInt Known = LHSKnown & RHSKnown & CarryKnown;
291
292  assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
293         "known bits of sum differ");
294
295  // Compute known bits of the result.
296  KnownZero = ~PossibleSumOne & Known;
297  KnownOne = PossibleSumOne & Known;
298
299  // Are we still trying to solve for the sign bit?
300  if (!Known.isNegative()) {
301    if (NSW) {
302      // Adding two non-negative numbers, or subtracting a negative number from
303      // a non-negative one, can't wrap into negative.
304      if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
305        KnownZero |= APInt::getSignBit(BitWidth);
306      // Adding two negative numbers, or subtracting a non-negative number from
307      // a negative one, can't wrap into non-negative.
308      else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
309        KnownOne |= APInt::getSignBit(BitWidth);
310    }
311  }
312}
313
314static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
315                                APInt &KnownZero, APInt &KnownOne,
316                                APInt &KnownZero2, APInt &KnownOne2,
317                                unsigned Depth, const Query &Q) {
318  unsigned BitWidth = KnownZero.getBitWidth();
319  computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
320  computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
321
322  bool isKnownNegative = false;
323  bool isKnownNonNegative = false;
324  // If the multiplication is known not to overflow, compute the sign bit.
325  if (NSW) {
326    if (Op0 == Op1) {
327      // The product of a number with itself is non-negative.
328      isKnownNonNegative = true;
329    } else {
330      bool isKnownNonNegativeOp1 = KnownZero.isNegative();
331      bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
332      bool isKnownNegativeOp1 = KnownOne.isNegative();
333      bool isKnownNegativeOp0 = KnownOne2.isNegative();
334      // The product of two numbers with the same sign is non-negative.
335      isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
336        (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
337      // The product of a negative number and a non-negative number is either
338      // negative or zero.
339      if (!isKnownNonNegative)
340        isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
341                           isKnownNonZero(Op0, Depth, Q)) ||
342                          (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
343                           isKnownNonZero(Op1, Depth, Q));
344    }
345  }
346
347  // If low bits are zero in either operand, output low known-0 bits.
348  // Also compute a conservative estimate for high known-0 bits.
349  // More trickiness is possible, but this is sufficient for the
350  // interesting case of alignment computation.
351  KnownOne.clearAllBits();
352  unsigned TrailZ = KnownZero.countTrailingOnes() +
353                    KnownZero2.countTrailingOnes();
354  unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
355                             KnownZero2.countLeadingOnes(),
356                             BitWidth) - BitWidth;
357
358  TrailZ = std::min(TrailZ, BitWidth);
359  LeadZ = std::min(LeadZ, BitWidth);
360  KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
361              APInt::getHighBitsSet(BitWidth, LeadZ);
362
363  // Only make use of no-wrap flags if we failed to compute the sign bit
364  // directly.  This matters if the multiplication always overflows, in
365  // which case we prefer to follow the result of the direct computation,
366  // though as the program is invoking undefined behaviour we can choose
367  // whatever we like here.
368  if (isKnownNonNegative && !KnownOne.isNegative())
369    KnownZero.setBit(BitWidth - 1);
370  else if (isKnownNegative && !KnownZero.isNegative())
371    KnownOne.setBit(BitWidth - 1);
372}
373
374void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
375                                             APInt &KnownZero,
376                                             APInt &KnownOne) {
377  unsigned BitWidth = KnownZero.getBitWidth();
378  unsigned NumRanges = Ranges.getNumOperands() / 2;
379  assert(NumRanges >= 1);
380
381  KnownZero.setAllBits();
382  KnownOne.setAllBits();
383
384  for (unsigned i = 0; i < NumRanges; ++i) {
385    ConstantInt *Lower =
386        mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
387    ConstantInt *Upper =
388        mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
389    ConstantRange Range(Lower->getValue(), Upper->getValue());
390
391    // The first CommonPrefixBits of all values in Range are equal.
392    unsigned CommonPrefixBits =
393        (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
394
395    APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
396    KnownOne &= Range.getUnsignedMax() & Mask;
397    KnownZero &= ~Range.getUnsignedMax() & Mask;
398  }
399}
400
401static bool isEphemeralValueOf(Instruction *I, const Value *E) {
402  SmallVector<const Value *, 16> WorkSet(1, I);
403  SmallPtrSet<const Value *, 32> Visited;
404  SmallPtrSet<const Value *, 16> EphValues;
405
406  // The instruction defining an assumption's condition itself is always
407  // considered ephemeral to that assumption (even if it has other
408  // non-ephemeral users). See r246696's test case for an example.
409  if (std::find(I->op_begin(), I->op_end(), E) != I->op_end())
410    return true;
411
412  while (!WorkSet.empty()) {
413    const Value *V = WorkSet.pop_back_val();
414    if (!Visited.insert(V).second)
415      continue;
416
417    // If all uses of this value are ephemeral, then so is this value.
418    if (std::all_of(V->user_begin(), V->user_end(),
419                    [&](const User *U) { return EphValues.count(U); })) {
420      if (V == E)
421        return true;
422
423      EphValues.insert(V);
424      if (const User *U = dyn_cast<User>(V))
425        for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
426             J != JE; ++J) {
427          if (isSafeToSpeculativelyExecute(*J))
428            WorkSet.push_back(*J);
429        }
430    }
431  }
432
433  return false;
434}
435
436// Is this an intrinsic that cannot be speculated but also cannot trap?
437static bool isAssumeLikeIntrinsic(const Instruction *I) {
438  if (const CallInst *CI = dyn_cast<CallInst>(I))
439    if (Function *F = CI->getCalledFunction())
440      switch (F->getIntrinsicID()) {
441      default: break;
442      // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
443      case Intrinsic::assume:
444      case Intrinsic::dbg_declare:
445      case Intrinsic::dbg_value:
446      case Intrinsic::invariant_start:
447      case Intrinsic::invariant_end:
448      case Intrinsic::lifetime_start:
449      case Intrinsic::lifetime_end:
450      case Intrinsic::objectsize:
451      case Intrinsic::ptr_annotation:
452      case Intrinsic::var_annotation:
453        return true;
454      }
455
456  return false;
457}
458
459static bool isValidAssumeForContext(Value *V, const Instruction *CxtI,
460                                    const DominatorTree *DT) {
461  Instruction *Inv = cast<Instruction>(V);
462
463  // There are two restrictions on the use of an assume:
464  //  1. The assume must dominate the context (or the control flow must
465  //     reach the assume whenever it reaches the context).
466  //  2. The context must not be in the assume's set of ephemeral values
467  //     (otherwise we will use the assume to prove that the condition
468  //     feeding the assume is trivially true, thus causing the removal of
469  //     the assume).
470
471  if (DT) {
472    if (DT->dominates(Inv, CxtI)) {
473      return true;
474    } else if (Inv->getParent() == CxtI->getParent()) {
475      // The context comes first, but they're both in the same block. Make sure
476      // there is nothing in between that might interrupt the control flow.
477      for (BasicBlock::const_iterator I =
478             std::next(BasicBlock::const_iterator(CxtI)),
479                                      IE(Inv); I != IE; ++I)
480        if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
481          return false;
482
483      return !isEphemeralValueOf(Inv, CxtI);
484    }
485
486    return false;
487  }
488
489  // When we don't have a DT, we do a limited search...
490  if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
491    return true;
492  } else if (Inv->getParent() == CxtI->getParent()) {
493    // Search forward from the assume until we reach the context (or the end
494    // of the block); the common case is that the assume will come first.
495    for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
496         IE = Inv->getParent()->end(); I != IE; ++I)
497      if (&*I == CxtI)
498        return true;
499
500    // The context must come first...
501    for (BasicBlock::const_iterator I =
502           std::next(BasicBlock::const_iterator(CxtI)),
503                                    IE(Inv); I != IE; ++I)
504      if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
505        return false;
506
507    return !isEphemeralValueOf(Inv, CxtI);
508  }
509
510  return false;
511}
512
513bool llvm::isValidAssumeForContext(const Instruction *I,
514                                   const Instruction *CxtI,
515                                   const DominatorTree *DT) {
516  return ::isValidAssumeForContext(const_cast<Instruction *>(I), CxtI, DT);
517}
518
519static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
520                                       APInt &KnownOne, unsigned Depth,
521                                       const Query &Q) {
522  // Use of assumptions is context-sensitive. If we don't have a context, we
523  // cannot use them!
524  if (!Q.AC || !Q.CxtI)
525    return;
526
527  unsigned BitWidth = KnownZero.getBitWidth();
528
529  for (auto &AssumeVH : Q.AC->assumptions()) {
530    if (!AssumeVH)
531      continue;
532    CallInst *I = cast<CallInst>(AssumeVH);
533    assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
534           "Got assumption for the wrong function!");
535    if (Q.isExcluded(I))
536      continue;
537
538    // Warning: This loop can end up being somewhat performance sensetive.
539    // We're running this loop for once for each value queried resulting in a
540    // runtime of ~O(#assumes * #values).
541
542    assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
543           "must be an assume intrinsic");
544
545    Value *Arg = I->getArgOperand(0);
546
547    if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
548      assert(BitWidth == 1 && "assume operand is not i1?");
549      KnownZero.clearAllBits();
550      KnownOne.setAllBits();
551      return;
552    }
553
554    // The remaining tests are all recursive, so bail out if we hit the limit.
555    if (Depth == MaxDepth)
556      continue;
557
558    Value *A, *B;
559    auto m_V = m_CombineOr(m_Specific(V),
560                           m_CombineOr(m_PtrToInt(m_Specific(V)),
561                           m_BitCast(m_Specific(V))));
562
563    CmpInst::Predicate Pred;
564    ConstantInt *C;
565    // assume(v = a)
566    if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
567        Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
568      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
569      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
570      KnownZero |= RHSKnownZero;
571      KnownOne  |= RHSKnownOne;
572    // assume(v & b = a)
573    } else if (match(Arg,
574                     m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
575               Pred == ICmpInst::ICMP_EQ &&
576               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
577      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
578      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
579      APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
580      computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
581
582      // For those bits in the mask that are known to be one, we can propagate
583      // known bits from the RHS to V.
584      KnownZero |= RHSKnownZero & MaskKnownOne;
585      KnownOne  |= RHSKnownOne  & MaskKnownOne;
586    // assume(~(v & b) = a)
587    } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
588                                   m_Value(A))) &&
589               Pred == ICmpInst::ICMP_EQ &&
590               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
591      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
592      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
593      APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
594      computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
595
596      // For those bits in the mask that are known to be one, we can propagate
597      // inverted known bits from the RHS to V.
598      KnownZero |= RHSKnownOne  & MaskKnownOne;
599      KnownOne  |= RHSKnownZero & MaskKnownOne;
600    // assume(v | b = a)
601    } else if (match(Arg,
602                     m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
603               Pred == ICmpInst::ICMP_EQ &&
604               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
605      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
606      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
607      APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
608      computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
609
610      // For those bits in B that are known to be zero, we can propagate known
611      // bits from the RHS to V.
612      KnownZero |= RHSKnownZero & BKnownZero;
613      KnownOne  |= RHSKnownOne  & BKnownZero;
614    // assume(~(v | b) = a)
615    } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
616                                   m_Value(A))) &&
617               Pred == ICmpInst::ICMP_EQ &&
618               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
619      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
620      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
621      APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
622      computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
623
624      // For those bits in B that are known to be zero, we can propagate
625      // inverted known bits from the RHS to V.
626      KnownZero |= RHSKnownOne  & BKnownZero;
627      KnownOne  |= RHSKnownZero & BKnownZero;
628    // assume(v ^ b = a)
629    } else if (match(Arg,
630                     m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
631               Pred == ICmpInst::ICMP_EQ &&
632               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
633      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
634      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
635      APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
636      computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
637
638      // For those bits in B that are known to be zero, we can propagate known
639      // bits from the RHS to V. For those bits in B that are known to be one,
640      // we can propagate inverted known bits from the RHS to V.
641      KnownZero |= RHSKnownZero & BKnownZero;
642      KnownOne  |= RHSKnownOne  & BKnownZero;
643      KnownZero |= RHSKnownOne  & BKnownOne;
644      KnownOne  |= RHSKnownZero & BKnownOne;
645    // assume(~(v ^ b) = a)
646    } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
647                                   m_Value(A))) &&
648               Pred == ICmpInst::ICMP_EQ &&
649               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
650      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
651      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
652      APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
653      computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
654
655      // For those bits in B that are known to be zero, we can propagate
656      // inverted known bits from the RHS to V. For those bits in B that are
657      // known to be one, we can propagate known bits from the RHS to V.
658      KnownZero |= RHSKnownOne  & BKnownZero;
659      KnownOne  |= RHSKnownZero & BKnownZero;
660      KnownZero |= RHSKnownZero & BKnownOne;
661      KnownOne  |= RHSKnownOne  & BKnownOne;
662    // assume(v << c = a)
663    } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
664                                   m_Value(A))) &&
665               Pred == ICmpInst::ICMP_EQ &&
666               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
667      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
668      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
669      // For those bits in RHS that are known, we can propagate them to known
670      // bits in V shifted to the right by C.
671      KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
672      KnownOne  |= RHSKnownOne.lshr(C->getZExtValue());
673    // assume(~(v << c) = a)
674    } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
675                                   m_Value(A))) &&
676               Pred == ICmpInst::ICMP_EQ &&
677               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
678      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
679      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
680      // For those bits in RHS that are known, we can propagate them inverted
681      // to known bits in V shifted to the right by C.
682      KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
683      KnownOne  |= RHSKnownZero.lshr(C->getZExtValue());
684    // assume(v >> c = a)
685    } else if (match(Arg,
686                     m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
687                                                m_AShr(m_V, m_ConstantInt(C))),
688                              m_Value(A))) &&
689               Pred == ICmpInst::ICMP_EQ &&
690               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
691      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
692      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
693      // For those bits in RHS that are known, we can propagate them to known
694      // bits in V shifted to the right by C.
695      KnownZero |= RHSKnownZero << C->getZExtValue();
696      KnownOne  |= RHSKnownOne  << C->getZExtValue();
697    // assume(~(v >> c) = a)
698    } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
699                                             m_LShr(m_V, m_ConstantInt(C)),
700                                             m_AShr(m_V, m_ConstantInt(C)))),
701                                   m_Value(A))) &&
702               Pred == ICmpInst::ICMP_EQ &&
703               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
704      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
705      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
706      // For those bits in RHS that are known, we can propagate them inverted
707      // to known bits in V shifted to the right by C.
708      KnownZero |= RHSKnownOne  << C->getZExtValue();
709      KnownOne  |= RHSKnownZero << C->getZExtValue();
710    // assume(v >=_s c) where c is non-negative
711    } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
712               Pred == ICmpInst::ICMP_SGE &&
713               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
714      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
715      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
716
717      if (RHSKnownZero.isNegative()) {
718        // We know that the sign bit is zero.
719        KnownZero |= APInt::getSignBit(BitWidth);
720      }
721    // assume(v >_s c) where c is at least -1.
722    } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
723               Pred == ICmpInst::ICMP_SGT &&
724               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
725      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
726      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
727
728      if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
729        // We know that the sign bit is zero.
730        KnownZero |= APInt::getSignBit(BitWidth);
731      }
732    // assume(v <=_s c) where c is negative
733    } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
734               Pred == ICmpInst::ICMP_SLE &&
735               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
736      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
737      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
738
739      if (RHSKnownOne.isNegative()) {
740        // We know that the sign bit is one.
741        KnownOne |= APInt::getSignBit(BitWidth);
742      }
743    // assume(v <_s c) where c is non-positive
744    } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
745               Pred == ICmpInst::ICMP_SLT &&
746               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
747      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
748      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
749
750      if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
751        // We know that the sign bit is one.
752        KnownOne |= APInt::getSignBit(BitWidth);
753      }
754    // assume(v <=_u c)
755    } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
756               Pred == ICmpInst::ICMP_ULE &&
757               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
758      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
759      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
760
761      // Whatever high bits in c are zero are known to be zero.
762      KnownZero |=
763        APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
764    // assume(v <_u c)
765    } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
766               Pred == ICmpInst::ICMP_ULT &&
767               isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
768      APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
769      computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
770
771      // Whatever high bits in c are zero are known to be zero (if c is a power
772      // of 2, then one more).
773      if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
774        KnownZero |=
775          APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
776      else
777        KnownZero |=
778          APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
779    }
780  }
781}
782
783// Compute known bits from a shift operator, including those with a
784// non-constant shift amount. KnownZero and KnownOne are the outputs of this
785// function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
786// same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
787// functors that, given the known-zero or known-one bits respectively, and a
788// shift amount, compute the implied known-zero or known-one bits of the shift
789// operator's result respectively for that shift amount. The results from calling
790// KZF and KOF are conservatively combined for all permitted shift amounts.
791template <typename KZFunctor, typename KOFunctor>
792static void computeKnownBitsFromShiftOperator(Operator *I,
793              APInt &KnownZero, APInt &KnownOne,
794              APInt &KnownZero2, APInt &KnownOne2,
795              unsigned Depth, const Query &Q, KZFunctor KZF, KOFunctor KOF) {
796  unsigned BitWidth = KnownZero.getBitWidth();
797
798  if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
799    unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
800
801    computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
802    KnownZero = KZF(KnownZero, ShiftAmt);
803    KnownOne  = KOF(KnownOne, ShiftAmt);
804    return;
805  }
806
807  computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
808
809  // Note: We cannot use KnownZero.getLimitedValue() here, because if
810  // BitWidth > 64 and any upper bits are known, we'll end up returning the
811  // limit value (which implies all bits are known).
812  uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
813  uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
814
815  // It would be more-clearly correct to use the two temporaries for this
816  // calculation. Reusing the APInts here to prevent unnecessary allocations.
817  KnownZero.clearAllBits();
818  KnownOne.clearAllBits();
819
820  // If we know the shifter operand is nonzero, we can sometimes infer more
821  // known bits. However this is expensive to compute, so be lazy about it and
822  // only compute it when absolutely necessary.
823  Optional<bool> ShifterOperandIsNonZero;
824
825  // Early exit if we can't constrain any well-defined shift amount.
826  if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
827    ShifterOperandIsNonZero =
828        isKnownNonZero(I->getOperand(1), Depth + 1, Q);
829    if (!*ShifterOperandIsNonZero)
830      return;
831  }
832
833  computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
834
835  KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
836  for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
837    // Combine the shifted known input bits only for those shift amounts
838    // compatible with its known constraints.
839    if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
840      continue;
841    if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
842      continue;
843    // If we know the shifter is nonzero, we may be able to infer more known
844    // bits. This check is sunk down as far as possible to avoid the expensive
845    // call to isKnownNonZero if the cheaper checks above fail.
846    if (ShiftAmt == 0) {
847      if (!ShifterOperandIsNonZero.hasValue())
848        ShifterOperandIsNonZero =
849            isKnownNonZero(I->getOperand(1), Depth + 1, Q);
850      if (*ShifterOperandIsNonZero)
851        continue;
852    }
853
854    KnownZero &= KZF(KnownZero2, ShiftAmt);
855    KnownOne  &= KOF(KnownOne2, ShiftAmt);
856  }
857
858  // If there are no compatible shift amounts, then we've proven that the shift
859  // amount must be >= the BitWidth, and the result is undefined. We could
860  // return anything we'd like, but we need to make sure the sets of known bits
861  // stay disjoint (it should be better for some other code to actually
862  // propagate the undef than to pick a value here using known bits).
863  if ((KnownZero & KnownOne) != 0) {
864    KnownZero.clearAllBits();
865    KnownOne.clearAllBits();
866  }
867}
868
869static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
870                                         APInt &KnownOne, unsigned Depth,
871                                         const Query &Q) {
872  unsigned BitWidth = KnownZero.getBitWidth();
873
874  APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
875  switch (I->getOpcode()) {
876  default: break;
877  case Instruction::Load:
878    if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
879      computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
880    break;
881  case Instruction::And: {
882    // If either the LHS or the RHS are Zero, the result is zero.
883    computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
884    computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
885
886    // Output known-1 bits are only known if set in both the LHS & RHS.
887    KnownOne &= KnownOne2;
888    // Output known-0 are known to be clear if zero in either the LHS | RHS.
889    KnownZero |= KnownZero2;
890
891    // and(x, add (x, -1)) is a common idiom that always clears the low bit;
892    // here we handle the more general case of adding any odd number by
893    // matching the form add(x, add(x, y)) where y is odd.
894    // TODO: This could be generalized to clearing any bit set in y where the
895    // following bit is known to be unset in y.
896    Value *Y = nullptr;
897    if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
898                                      m_Value(Y))) ||
899        match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
900                                      m_Value(Y)))) {
901      APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
902      computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q);
903      if (KnownOne3.countTrailingOnes() > 0)
904        KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
905    }
906    break;
907  }
908  case Instruction::Or: {
909    computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
910    computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
911
912    // Output known-0 bits are only known if clear in both the LHS & RHS.
913    KnownZero &= KnownZero2;
914    // Output known-1 are known to be set if set in either the LHS | RHS.
915    KnownOne |= KnownOne2;
916    break;
917  }
918  case Instruction::Xor: {
919    computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
920    computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
921
922    // Output known-0 bits are known if clear or set in both the LHS & RHS.
923    APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
924    // Output known-1 are known to be set if set in only one of the LHS, RHS.
925    KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
926    KnownZero = KnownZeroOut;
927    break;
928  }
929  case Instruction::Mul: {
930    bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
931    computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
932                        KnownOne, KnownZero2, KnownOne2, Depth, Q);
933    break;
934  }
935  case Instruction::UDiv: {
936    // For the purposes of computing leading zeros we can conservatively
937    // treat a udiv as a logical right shift by the power of 2 known to
938    // be less than the denominator.
939    computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
940    unsigned LeadZ = KnownZero2.countLeadingOnes();
941
942    KnownOne2.clearAllBits();
943    KnownZero2.clearAllBits();
944    computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
945    unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
946    if (RHSUnknownLeadingOnes != BitWidth)
947      LeadZ = std::min(BitWidth,
948                       LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
949
950    KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
951    break;
952  }
953  case Instruction::Select:
954    computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
955    computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
956
957    // Only known if known in both the LHS and RHS.
958    KnownOne &= KnownOne2;
959    KnownZero &= KnownZero2;
960    break;
961  case Instruction::FPTrunc:
962  case Instruction::FPExt:
963  case Instruction::FPToUI:
964  case Instruction::FPToSI:
965  case Instruction::SIToFP:
966  case Instruction::UIToFP:
967    break; // Can't work with floating point.
968  case Instruction::PtrToInt:
969  case Instruction::IntToPtr:
970  case Instruction::AddrSpaceCast: // Pointers could be different sizes.
971    // FALL THROUGH and handle them the same as zext/trunc.
972  case Instruction::ZExt:
973  case Instruction::Trunc: {
974    Type *SrcTy = I->getOperand(0)->getType();
975
976    unsigned SrcBitWidth;
977    // Note that we handle pointer operands here because of inttoptr/ptrtoint
978    // which fall through here.
979    SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
980
981    assert(SrcBitWidth && "SrcBitWidth can't be zero");
982    KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
983    KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
984    computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
985    KnownZero = KnownZero.zextOrTrunc(BitWidth);
986    KnownOne = KnownOne.zextOrTrunc(BitWidth);
987    // Any top bits are known to be zero.
988    if (BitWidth > SrcBitWidth)
989      KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
990    break;
991  }
992  case Instruction::BitCast: {
993    Type *SrcTy = I->getOperand(0)->getType();
994    if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
995        // TODO: For now, not handling conversions like:
996        // (bitcast i64 %x to <2 x i32>)
997        !I->getType()->isVectorTy()) {
998      computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
999      break;
1000    }
1001    break;
1002  }
1003  case Instruction::SExt: {
1004    // Compute the bits in the result that are not present in the input.
1005    unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1006
1007    KnownZero = KnownZero.trunc(SrcBitWidth);
1008    KnownOne = KnownOne.trunc(SrcBitWidth);
1009    computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1010    KnownZero = KnownZero.zext(BitWidth);
1011    KnownOne = KnownOne.zext(BitWidth);
1012
1013    // If the sign bit of the input is known set or clear, then we know the
1014    // top bits of the result.
1015    if (KnownZero[SrcBitWidth-1])             // Input sign bit known zero
1016      KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1017    else if (KnownOne[SrcBitWidth-1])           // Input sign bit known set
1018      KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1019    break;
1020  }
1021  case Instruction::Shl: {
1022    // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1023    auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1024      return (KnownZero << ShiftAmt) |
1025             APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
1026    };
1027
1028    auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1029      return KnownOne << ShiftAmt;
1030    };
1031
1032    computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1033                                      KnownZero2, KnownOne2, Depth, Q, KZF,
1034                                      KOF);
1035    break;
1036  }
1037  case Instruction::LShr: {
1038    // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1039    auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1040      return APIntOps::lshr(KnownZero, ShiftAmt) |
1041             // High bits known zero.
1042             APInt::getHighBitsSet(BitWidth, ShiftAmt);
1043    };
1044
1045    auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1046      return APIntOps::lshr(KnownOne, ShiftAmt);
1047    };
1048
1049    computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1050                                      KnownZero2, KnownOne2, Depth, Q, KZF,
1051                                      KOF);
1052    break;
1053  }
1054  case Instruction::AShr: {
1055    // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1056    auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1057      return APIntOps::ashr(KnownZero, ShiftAmt);
1058    };
1059
1060    auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1061      return APIntOps::ashr(KnownOne, ShiftAmt);
1062    };
1063
1064    computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1065                                      KnownZero2, KnownOne2, Depth, Q, KZF,
1066                                      KOF);
1067    break;
1068  }
1069  case Instruction::Sub: {
1070    bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1071    computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1072                           KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1073                           Q);
1074    break;
1075  }
1076  case Instruction::Add: {
1077    bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1078    computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1079                           KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1080                           Q);
1081    break;
1082  }
1083  case Instruction::SRem:
1084    if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1085      APInt RA = Rem->getValue().abs();
1086      if (RA.isPowerOf2()) {
1087        APInt LowBits = RA - 1;
1088        computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1089                         Q);
1090
1091        // The low bits of the first operand are unchanged by the srem.
1092        KnownZero = KnownZero2 & LowBits;
1093        KnownOne = KnownOne2 & LowBits;
1094
1095        // If the first operand is non-negative or has all low bits zero, then
1096        // the upper bits are all zero.
1097        if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1098          KnownZero |= ~LowBits;
1099
1100        // If the first operand is negative and not all low bits are zero, then
1101        // the upper bits are all one.
1102        if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1103          KnownOne |= ~LowBits;
1104
1105        assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1106      }
1107    }
1108
1109    // The sign bit is the LHS's sign bit, except when the result of the
1110    // remainder is zero.
1111    if (KnownZero.isNonNegative()) {
1112      APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1113      computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
1114                       Q);
1115      // If it's known zero, our sign bit is also zero.
1116      if (LHSKnownZero.isNegative())
1117        KnownZero.setBit(BitWidth - 1);
1118    }
1119
1120    break;
1121  case Instruction::URem: {
1122    if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1123      const APInt &RA = Rem->getValue();
1124      if (RA.isPowerOf2()) {
1125        APInt LowBits = (RA - 1);
1126        computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1127        KnownZero |= ~LowBits;
1128        KnownOne &= LowBits;
1129        break;
1130      }
1131    }
1132
1133    // Since the result is less than or equal to either operand, any leading
1134    // zero bits in either operand must also exist in the result.
1135    computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1136    computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1137
1138    unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1139                                KnownZero2.countLeadingOnes());
1140    KnownOne.clearAllBits();
1141    KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1142    break;
1143  }
1144
1145  case Instruction::Alloca: {
1146    AllocaInst *AI = cast<AllocaInst>(I);
1147    unsigned Align = AI->getAlignment();
1148    if (Align == 0)
1149      Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1150
1151    if (Align > 0)
1152      KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1153    break;
1154  }
1155  case Instruction::GetElementPtr: {
1156    // Analyze all of the subscripts of this getelementptr instruction
1157    // to determine if we can prove known low zero bits.
1158    APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1159    computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1160                     Q);
1161    unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1162
1163    gep_type_iterator GTI = gep_type_begin(I);
1164    for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1165      Value *Index = I->getOperand(i);
1166      if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1167        // Handle struct member offset arithmetic.
1168
1169        // Handle case when index is vector zeroinitializer
1170        Constant *CIndex = cast<Constant>(Index);
1171        if (CIndex->isZeroValue())
1172          continue;
1173
1174        if (CIndex->getType()->isVectorTy())
1175          Index = CIndex->getSplatValue();
1176
1177        unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1178        const StructLayout *SL = Q.DL.getStructLayout(STy);
1179        uint64_t Offset = SL->getElementOffset(Idx);
1180        TrailZ = std::min<unsigned>(TrailZ,
1181                                    countTrailingZeros(Offset));
1182      } else {
1183        // Handle array index arithmetic.
1184        Type *IndexedTy = GTI.getIndexedType();
1185        if (!IndexedTy->isSized()) {
1186          TrailZ = 0;
1187          break;
1188        }
1189        unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1190        uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1191        LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1192        computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1193        TrailZ = std::min(TrailZ,
1194                          unsigned(countTrailingZeros(TypeSize) +
1195                                   LocalKnownZero.countTrailingOnes()));
1196      }
1197    }
1198
1199    KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1200    break;
1201  }
1202  case Instruction::PHI: {
1203    PHINode *P = cast<PHINode>(I);
1204    // Handle the case of a simple two-predecessor recurrence PHI.
1205    // There's a lot more that could theoretically be done here, but
1206    // this is sufficient to catch some interesting cases.
1207    if (P->getNumIncomingValues() == 2) {
1208      for (unsigned i = 0; i != 2; ++i) {
1209        Value *L = P->getIncomingValue(i);
1210        Value *R = P->getIncomingValue(!i);
1211        Operator *LU = dyn_cast<Operator>(L);
1212        if (!LU)
1213          continue;
1214        unsigned Opcode = LU->getOpcode();
1215        // Check for operations that have the property that if
1216        // both their operands have low zero bits, the result
1217        // will have low zero bits.
1218        if (Opcode == Instruction::Add ||
1219            Opcode == Instruction::Sub ||
1220            Opcode == Instruction::And ||
1221            Opcode == Instruction::Or ||
1222            Opcode == Instruction::Mul) {
1223          Value *LL = LU->getOperand(0);
1224          Value *LR = LU->getOperand(1);
1225          // Find a recurrence.
1226          if (LL == I)
1227            L = LR;
1228          else if (LR == I)
1229            L = LL;
1230          else
1231            break;
1232          // Ok, we have a PHI of the form L op= R. Check for low
1233          // zero bits.
1234          computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1235
1236          // We need to take the minimum number of known bits
1237          APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1238          computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1239
1240          KnownZero = APInt::getLowBitsSet(BitWidth,
1241                                           std::min(KnownZero2.countTrailingOnes(),
1242                                                    KnownZero3.countTrailingOnes()));
1243          break;
1244        }
1245      }
1246    }
1247
1248    // Unreachable blocks may have zero-operand PHI nodes.
1249    if (P->getNumIncomingValues() == 0)
1250      break;
1251
1252    // Otherwise take the unions of the known bit sets of the operands,
1253    // taking conservative care to avoid excessive recursion.
1254    if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1255      // Skip if every incoming value references to ourself.
1256      if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1257        break;
1258
1259      KnownZero = APInt::getAllOnesValue(BitWidth);
1260      KnownOne = APInt::getAllOnesValue(BitWidth);
1261      for (Value *IncValue : P->incoming_values()) {
1262        // Skip direct self references.
1263        if (IncValue == P) continue;
1264
1265        KnownZero2 = APInt(BitWidth, 0);
1266        KnownOne2 = APInt(BitWidth, 0);
1267        // Recurse, but cap the recursion to one level, because we don't
1268        // want to waste time spinning around in loops.
1269        computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1270        KnownZero &= KnownZero2;
1271        KnownOne &= KnownOne2;
1272        // If all bits have been ruled out, there's no need to check
1273        // more operands.
1274        if (!KnownZero && !KnownOne)
1275          break;
1276      }
1277    }
1278    break;
1279  }
1280  case Instruction::Call:
1281  case Instruction::Invoke:
1282    // If range metadata is attached to this call, set known bits from that,
1283    // and then intersect with known bits based on other properties of the
1284    // function.
1285    if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1286      computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1287    if (Value *RV = CallSite(I).getReturnedArgOperand()) {
1288      computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q);
1289      KnownZero |= KnownZero2;
1290      KnownOne |= KnownOne2;
1291    }
1292    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1293      switch (II->getIntrinsicID()) {
1294      default: break;
1295      case Intrinsic::bswap:
1296        computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1297        KnownZero |= KnownZero2.byteSwap();
1298        KnownOne |= KnownOne2.byteSwap();
1299        break;
1300      case Intrinsic::ctlz:
1301      case Intrinsic::cttz: {
1302        unsigned LowBits = Log2_32(BitWidth)+1;
1303        // If this call is undefined for 0, the result will be less than 2^n.
1304        if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1305          LowBits -= 1;
1306        KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1307        break;
1308      }
1309      case Intrinsic::ctpop: {
1310        computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1311        // We can bound the space the count needs.  Also, bits known to be zero
1312        // can't contribute to the population.
1313        unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1314        unsigned LeadingZeros =
1315          APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
1316        assert(LeadingZeros <= BitWidth);
1317        KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
1318        KnownOne &= ~KnownZero;
1319        // TODO: we could bound KnownOne using the lower bound on the number
1320        // of bits which might be set provided by popcnt KnownOne2.
1321        break;
1322      }
1323      case Intrinsic::x86_sse42_crc32_64_64:
1324        KnownZero |= APInt::getHighBitsSet(64, 32);
1325        break;
1326      }
1327    }
1328    break;
1329  case Instruction::ExtractValue:
1330    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1331      ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1332      if (EVI->getNumIndices() != 1) break;
1333      if (EVI->getIndices()[0] == 0) {
1334        switch (II->getIntrinsicID()) {
1335        default: break;
1336        case Intrinsic::uadd_with_overflow:
1337        case Intrinsic::sadd_with_overflow:
1338          computeKnownBitsAddSub(true, II->getArgOperand(0),
1339                                 II->getArgOperand(1), false, KnownZero,
1340                                 KnownOne, KnownZero2, KnownOne2, Depth, Q);
1341          break;
1342        case Intrinsic::usub_with_overflow:
1343        case Intrinsic::ssub_with_overflow:
1344          computeKnownBitsAddSub(false, II->getArgOperand(0),
1345                                 II->getArgOperand(1), false, KnownZero,
1346                                 KnownOne, KnownZero2, KnownOne2, Depth, Q);
1347          break;
1348        case Intrinsic::umul_with_overflow:
1349        case Intrinsic::smul_with_overflow:
1350          computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1351                              KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1352                              Q);
1353          break;
1354        }
1355      }
1356    }
1357  }
1358}
1359
1360/// Determine which bits of V are known to be either zero or one and return
1361/// them in the KnownZero/KnownOne bit sets.
1362///
1363/// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1364/// we cannot optimize based on the assumption that it is zero without changing
1365/// it to be an explicit zero.  If we don't change it to zero, other code could
1366/// optimized based on the contradictory assumption that it is non-zero.
1367/// Because instcombine aggressively folds operations with undef args anyway,
1368/// this won't lose us code quality.
1369///
1370/// This function is defined on values with integer type, values with pointer
1371/// type, and vectors of integers.  In the case
1372/// where V is a vector, known zero, and known one values are the
1373/// same width as the vector element, and the bit is set only if it is true
1374/// for all of the elements in the vector.
1375void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
1376                      unsigned Depth, const Query &Q) {
1377  assert(V && "No Value?");
1378  assert(Depth <= MaxDepth && "Limit Search Depth");
1379  unsigned BitWidth = KnownZero.getBitWidth();
1380
1381  assert((V->getType()->isIntOrIntVectorTy() ||
1382          V->getType()->getScalarType()->isPointerTy()) &&
1383         "Not integer or pointer type!");
1384  assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1385         (!V->getType()->isIntOrIntVectorTy() ||
1386          V->getType()->getScalarSizeInBits() == BitWidth) &&
1387         KnownZero.getBitWidth() == BitWidth &&
1388         KnownOne.getBitWidth() == BitWidth &&
1389         "V, KnownOne and KnownZero should have same BitWidth");
1390
1391  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1392    // We know all of the bits for a constant!
1393    KnownOne = CI->getValue();
1394    KnownZero = ~KnownOne;
1395    return;
1396  }
1397  // Null and aggregate-zero are all-zeros.
1398  if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1399    KnownOne.clearAllBits();
1400    KnownZero = APInt::getAllOnesValue(BitWidth);
1401    return;
1402  }
1403  // Handle a constant vector by taking the intersection of the known bits of
1404  // each element.
1405  if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1406    // We know that CDS must be a vector of integers. Take the intersection of
1407    // each element.
1408    KnownZero.setAllBits(); KnownOne.setAllBits();
1409    APInt Elt(KnownZero.getBitWidth(), 0);
1410    for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1411      Elt = CDS->getElementAsInteger(i);
1412      KnownZero &= ~Elt;
1413      KnownOne &= Elt;
1414    }
1415    return;
1416  }
1417
1418  if (auto *CV = dyn_cast<ConstantVector>(V)) {
1419    // We know that CV must be a vector of integers. Take the intersection of
1420    // each element.
1421    KnownZero.setAllBits(); KnownOne.setAllBits();
1422    APInt Elt(KnownZero.getBitWidth(), 0);
1423    for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1424      Constant *Element = CV->getAggregateElement(i);
1425      auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1426      if (!ElementCI) {
1427        KnownZero.clearAllBits();
1428        KnownOne.clearAllBits();
1429        return;
1430      }
1431      Elt = ElementCI->getValue();
1432      KnownZero &= ~Elt;
1433      KnownOne &= Elt;
1434    }
1435    return;
1436  }
1437
1438  // Start out not knowing anything.
1439  KnownZero.clearAllBits(); KnownOne.clearAllBits();
1440
1441  // Limit search depth.
1442  // All recursive calls that increase depth must come after this.
1443  if (Depth == MaxDepth)
1444    return;
1445
1446  // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1447  // the bits of its aliasee.
1448  if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1449    if (!GA->isInterposable())
1450      computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1451    return;
1452  }
1453
1454  if (Operator *I = dyn_cast<Operator>(V))
1455    computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1456
1457  // Aligned pointers have trailing zeros - refine KnownZero set
1458  if (V->getType()->isPointerTy()) {
1459    unsigned Align = V->getPointerAlignment(Q.DL);
1460    if (Align)
1461      KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1462  }
1463
1464  // computeKnownBitsFromAssume strictly refines KnownZero and
1465  // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1466
1467  // Check whether a nearby assume intrinsic can determine some known bits.
1468  computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1469
1470  assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1471}
1472
1473/// Determine whether the sign bit is known to be zero or one.
1474/// Convenience wrapper around computeKnownBits.
1475void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
1476                    unsigned Depth, const Query &Q) {
1477  unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1478  if (!BitWidth) {
1479    KnownZero = false;
1480    KnownOne = false;
1481    return;
1482  }
1483  APInt ZeroBits(BitWidth, 0);
1484  APInt OneBits(BitWidth, 0);
1485  computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1486  KnownOne = OneBits[BitWidth - 1];
1487  KnownZero = ZeroBits[BitWidth - 1];
1488}
1489
1490/// Return true if the given value is known to have exactly one
1491/// bit set when defined. For vectors return true if every element is known to
1492/// be a power of two when defined. Supports values with integer or pointer
1493/// types and vectors of integers.
1494bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
1495                            const Query &Q) {
1496  if (Constant *C = dyn_cast<Constant>(V)) {
1497    if (C->isNullValue())
1498      return OrZero;
1499
1500    const APInt *ConstIntOrConstSplatInt;
1501    if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1502      return ConstIntOrConstSplatInt->isPowerOf2();
1503  }
1504
1505  // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1506  // it is shifted off the end then the result is undefined.
1507  if (match(V, m_Shl(m_One(), m_Value())))
1508    return true;
1509
1510  // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1511  // bottom.  If it is shifted off the bottom then the result is undefined.
1512  if (match(V, m_LShr(m_SignBit(), m_Value())))
1513    return true;
1514
1515  // The remaining tests are all recursive, so bail out if we hit the limit.
1516  if (Depth++ == MaxDepth)
1517    return false;
1518
1519  Value *X = nullptr, *Y = nullptr;
1520  // A shift left or a logical shift right of a power of two is a power of two
1521  // or zero.
1522  if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1523                 match(V, m_LShr(m_Value(X), m_Value()))))
1524    return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1525
1526  if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1527    return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1528
1529  if (SelectInst *SI = dyn_cast<SelectInst>(V))
1530    return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1531           isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1532
1533  if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1534    // A power of two and'd with anything is a power of two or zero.
1535    if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1536        isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1537      return true;
1538    // X & (-X) is always a power of two or zero.
1539    if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1540      return true;
1541    return false;
1542  }
1543
1544  // Adding a power-of-two or zero to the same power-of-two or zero yields
1545  // either the original power-of-two, a larger power-of-two or zero.
1546  if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1547    OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1548    if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1549      if (match(X, m_And(m_Specific(Y), m_Value())) ||
1550          match(X, m_And(m_Value(), m_Specific(Y))))
1551        if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1552          return true;
1553      if (match(Y, m_And(m_Specific(X), m_Value())) ||
1554          match(Y, m_And(m_Value(), m_Specific(X))))
1555        if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1556          return true;
1557
1558      unsigned BitWidth = V->getType()->getScalarSizeInBits();
1559      APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1560      computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1561
1562      APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1563      computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1564      // If i8 V is a power of two or zero:
1565      //  ZeroBits: 1 1 1 0 1 1 1 1
1566      // ~ZeroBits: 0 0 0 1 0 0 0 0
1567      if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1568        // If OrZero isn't set, we cannot give back a zero result.
1569        // Make sure either the LHS or RHS has a bit set.
1570        if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1571          return true;
1572    }
1573  }
1574
1575  // An exact divide or right shift can only shift off zero bits, so the result
1576  // is a power of two only if the first operand is a power of two and not
1577  // copying a sign bit (sdiv int_min, 2).
1578  if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1579      match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1580    return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1581                                  Depth, Q);
1582  }
1583
1584  return false;
1585}
1586
1587/// \brief Test whether a GEP's result is known to be non-null.
1588///
1589/// Uses properties inherent in a GEP to try to determine whether it is known
1590/// to be non-null.
1591///
1592/// Currently this routine does not support vector GEPs.
1593static bool isGEPKnownNonNull(GEPOperator *GEP, unsigned Depth,
1594                              const Query &Q) {
1595  if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1596    return false;
1597
1598  // FIXME: Support vector-GEPs.
1599  assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1600
1601  // If the base pointer is non-null, we cannot walk to a null address with an
1602  // inbounds GEP in address space zero.
1603  if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1604    return true;
1605
1606  // Walk the GEP operands and see if any operand introduces a non-zero offset.
1607  // If so, then the GEP cannot produce a null pointer, as doing so would
1608  // inherently violate the inbounds contract within address space zero.
1609  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1610       GTI != GTE; ++GTI) {
1611    // Struct types are easy -- they must always be indexed by a constant.
1612    if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1613      ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1614      unsigned ElementIdx = OpC->getZExtValue();
1615      const StructLayout *SL = Q.DL.getStructLayout(STy);
1616      uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1617      if (ElementOffset > 0)
1618        return true;
1619      continue;
1620    }
1621
1622    // If we have a zero-sized type, the index doesn't matter. Keep looping.
1623    if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1624      continue;
1625
1626    // Fast path the constant operand case both for efficiency and so we don't
1627    // increment Depth when just zipping down an all-constant GEP.
1628    if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1629      if (!OpC->isZero())
1630        return true;
1631      continue;
1632    }
1633
1634    // We post-increment Depth here because while isKnownNonZero increments it
1635    // as well, when we pop back up that increment won't persist. We don't want
1636    // to recurse 10k times just because we have 10k GEP operands. We don't
1637    // bail completely out because we want to handle constant GEPs regardless
1638    // of depth.
1639    if (Depth++ >= MaxDepth)
1640      continue;
1641
1642    if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1643      return true;
1644  }
1645
1646  return false;
1647}
1648
1649/// Does the 'Range' metadata (which must be a valid MD_range operand list)
1650/// ensure that the value it's attached to is never Value?  'RangeType' is
1651/// is the type of the value described by the range.
1652static bool rangeMetadataExcludesValue(MDNode* Ranges, const APInt& Value) {
1653  const unsigned NumRanges = Ranges->getNumOperands() / 2;
1654  assert(NumRanges >= 1);
1655  for (unsigned i = 0; i < NumRanges; ++i) {
1656    ConstantInt *Lower =
1657        mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1658    ConstantInt *Upper =
1659        mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1660    ConstantRange Range(Lower->getValue(), Upper->getValue());
1661    if (Range.contains(Value))
1662      return false;
1663  }
1664  return true;
1665}
1666
1667/// Return true if the given value is known to be non-zero when defined.
1668/// For vectors return true if every element is known to be non-zero when
1669/// defined. Supports values with integer or pointer type and vectors of
1670/// integers.
1671bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q) {
1672  if (auto *C = dyn_cast<Constant>(V)) {
1673    if (C->isNullValue())
1674      return false;
1675    if (isa<ConstantInt>(C))
1676      // Must be non-zero due to null test above.
1677      return true;
1678
1679    // For constant vectors, check that all elements are undefined or known
1680    // non-zero to determine that the whole vector is known non-zero.
1681    if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1682      for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1683        Constant *Elt = C->getAggregateElement(i);
1684        if (!Elt || Elt->isNullValue())
1685          return false;
1686        if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1687          return false;
1688      }
1689      return true;
1690    }
1691
1692    return false;
1693  }
1694
1695  if (auto *I = dyn_cast<Instruction>(V)) {
1696    if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1697      // If the possible ranges don't contain zero, then the value is
1698      // definitely non-zero.
1699      if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1700        const APInt ZeroValue(Ty->getBitWidth(), 0);
1701        if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1702          return true;
1703      }
1704    }
1705  }
1706
1707  // The remaining tests are all recursive, so bail out if we hit the limit.
1708  if (Depth++ >= MaxDepth)
1709    return false;
1710
1711  // Check for pointer simplifications.
1712  if (V->getType()->isPointerTy()) {
1713    if (isKnownNonNull(V))
1714      return true;
1715    if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1716      if (isGEPKnownNonNull(GEP, Depth, Q))
1717        return true;
1718  }
1719
1720  unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1721
1722  // X | Y != 0 if X != 0 or Y != 0.
1723  Value *X = nullptr, *Y = nullptr;
1724  if (match(V, m_Or(m_Value(X), m_Value(Y))))
1725    return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1726
1727  // ext X != 0 if X != 0.
1728  if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1729    return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1730
1731  // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1732  // if the lowest bit is shifted off the end.
1733  if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1734    // shl nuw can't remove any non-zero bits.
1735    OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1736    if (BO->hasNoUnsignedWrap())
1737      return isKnownNonZero(X, Depth, Q);
1738
1739    APInt KnownZero(BitWidth, 0);
1740    APInt KnownOne(BitWidth, 0);
1741    computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1742    if (KnownOne[0])
1743      return true;
1744  }
1745  // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1746  // defined if the sign bit is shifted off the end.
1747  else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1748    // shr exact can only shift out zero bits.
1749    PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1750    if (BO->isExact())
1751      return isKnownNonZero(X, Depth, Q);
1752
1753    bool XKnownNonNegative, XKnownNegative;
1754    ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1755    if (XKnownNegative)
1756      return true;
1757
1758    // If the shifter operand is a constant, and all of the bits shifted
1759    // out are known to be zero, and X is known non-zero then at least one
1760    // non-zero bit must remain.
1761    if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1762      APInt KnownZero(BitWidth, 0);
1763      APInt KnownOne(BitWidth, 0);
1764      computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1765
1766      auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1767      // Is there a known one in the portion not shifted out?
1768      if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1769        return true;
1770      // Are all the bits to be shifted out known zero?
1771      if (KnownZero.countTrailingOnes() >= ShiftVal)
1772        return isKnownNonZero(X, Depth, Q);
1773    }
1774  }
1775  // div exact can only produce a zero if the dividend is zero.
1776  else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1777    return isKnownNonZero(X, Depth, Q);
1778  }
1779  // X + Y.
1780  else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1781    bool XKnownNonNegative, XKnownNegative;
1782    bool YKnownNonNegative, YKnownNegative;
1783    ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1784    ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1785
1786    // If X and Y are both non-negative (as signed values) then their sum is not
1787    // zero unless both X and Y are zero.
1788    if (XKnownNonNegative && YKnownNonNegative)
1789      if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1790        return true;
1791
1792    // If X and Y are both negative (as signed values) then their sum is not
1793    // zero unless both X and Y equal INT_MIN.
1794    if (BitWidth && XKnownNegative && YKnownNegative) {
1795      APInt KnownZero(BitWidth, 0);
1796      APInt KnownOne(BitWidth, 0);
1797      APInt Mask = APInt::getSignedMaxValue(BitWidth);
1798      // The sign bit of X is set.  If some other bit is set then X is not equal
1799      // to INT_MIN.
1800      computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1801      if ((KnownOne & Mask) != 0)
1802        return true;
1803      // The sign bit of Y is set.  If some other bit is set then Y is not equal
1804      // to INT_MIN.
1805      computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1806      if ((KnownOne & Mask) != 0)
1807        return true;
1808    }
1809
1810    // The sum of a non-negative number and a power of two is not zero.
1811    if (XKnownNonNegative &&
1812        isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1813      return true;
1814    if (YKnownNonNegative &&
1815        isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1816      return true;
1817  }
1818  // X * Y.
1819  else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1820    OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1821    // If X and Y are non-zero then so is X * Y as long as the multiplication
1822    // does not overflow.
1823    if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1824        isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1825      return true;
1826  }
1827  // (C ? X : Y) != 0 if X != 0 and Y != 0.
1828  else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1829    if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1830        isKnownNonZero(SI->getFalseValue(), Depth, Q))
1831      return true;
1832  }
1833  // PHI
1834  else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1835    // Try and detect a recurrence that monotonically increases from a
1836    // starting value, as these are common as induction variables.
1837    if (PN->getNumIncomingValues() == 2) {
1838      Value *Start = PN->getIncomingValue(0);
1839      Value *Induction = PN->getIncomingValue(1);
1840      if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1841        std::swap(Start, Induction);
1842      if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1843        if (!C->isZero() && !C->isNegative()) {
1844          ConstantInt *X;
1845          if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1846               match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1847              !X->isNegative())
1848            return true;
1849        }
1850      }
1851    }
1852    // Check if all incoming values are non-zero constant.
1853    bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1854      return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1855    });
1856    if (AllNonZeroConstants)
1857      return true;
1858  }
1859
1860  if (!BitWidth) return false;
1861  APInt KnownZero(BitWidth, 0);
1862  APInt KnownOne(BitWidth, 0);
1863  computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1864  return KnownOne != 0;
1865}
1866
1867/// Return true if V2 == V1 + X, where X is known non-zero.
1868static bool isAddOfNonZero(Value *V1, Value *V2, const Query &Q) {
1869  BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1870  if (!BO || BO->getOpcode() != Instruction::Add)
1871    return false;
1872  Value *Op = nullptr;
1873  if (V2 == BO->getOperand(0))
1874    Op = BO->getOperand(1);
1875  else if (V2 == BO->getOperand(1))
1876    Op = BO->getOperand(0);
1877  else
1878    return false;
1879  return isKnownNonZero(Op, 0, Q);
1880}
1881
1882/// Return true if it is known that V1 != V2.
1883static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q) {
1884  if (V1->getType()->isVectorTy() || V1 == V2)
1885    return false;
1886  if (V1->getType() != V2->getType())
1887    // We can't look through casts yet.
1888    return false;
1889  if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1890    return true;
1891
1892  if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
1893    // Are any known bits in V1 contradictory to known bits in V2? If V1
1894    // has a known zero where V2 has a known one, they must not be equal.
1895    auto BitWidth = Ty->getBitWidth();
1896    APInt KnownZero1(BitWidth, 0);
1897    APInt KnownOne1(BitWidth, 0);
1898    computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
1899    APInt KnownZero2(BitWidth, 0);
1900    APInt KnownOne2(BitWidth, 0);
1901    computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
1902
1903    auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
1904    if (OppositeBits.getBoolValue())
1905      return true;
1906  }
1907  return false;
1908}
1909
1910/// Return true if 'V & Mask' is known to be zero.  We use this predicate to
1911/// simplify operations downstream. Mask is known to be zero for bits that V
1912/// cannot have.
1913///
1914/// This function is defined on values with integer type, values with pointer
1915/// type, and vectors of integers.  In the case
1916/// where V is a vector, the mask, known zero, and known one values are the
1917/// same width as the vector element, and the bit is set only if it is true
1918/// for all of the elements in the vector.
1919bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
1920                       const Query &Q) {
1921  APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1922  computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1923  return (KnownZero & Mask) == Mask;
1924}
1925
1926/// For vector constants, loop over the elements and find the constant with the
1927/// minimum number of sign bits. Return 0 if the value is not a vector constant
1928/// or if any element was not analyzed; otherwise, return the count for the
1929/// element with the minimum number of sign bits.
1930static unsigned computeNumSignBitsVectorConstant(Value *V, unsigned TyBits) {
1931  auto *CV = dyn_cast<Constant>(V);
1932  if (!CV || !CV->getType()->isVectorTy())
1933    return 0;
1934
1935  unsigned MinSignBits = TyBits;
1936  unsigned NumElts = CV->getType()->getVectorNumElements();
1937  for (unsigned i = 0; i != NumElts; ++i) {
1938    // If we find a non-ConstantInt, bail out.
1939    auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
1940    if (!Elt)
1941      return 0;
1942
1943    // If the sign bit is 1, flip the bits, so we always count leading zeros.
1944    APInt EltVal = Elt->getValue();
1945    if (EltVal.isNegative())
1946      EltVal = ~EltVal;
1947    MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
1948  }
1949
1950  return MinSignBits;
1951}
1952
1953/// Return the number of times the sign bit of the register is replicated into
1954/// the other bits. We know that at least 1 bit is always equal to the sign bit
1955/// (itself), but other cases can give us information. For example, immediately
1956/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
1957/// other, so we return 3. For vectors, return the number of sign bits for the
1958/// vector element with the mininum number of known sign bits.
1959unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q) {
1960  unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
1961  unsigned Tmp, Tmp2;
1962  unsigned FirstAnswer = 1;
1963
1964  // Note that ConstantInt is handled by the general computeKnownBits case
1965  // below.
1966
1967  if (Depth == 6)
1968    return 1;  // Limit search depth.
1969
1970  Operator *U = dyn_cast<Operator>(V);
1971  switch (Operator::getOpcode(V)) {
1972  default: break;
1973  case Instruction::SExt:
1974    Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
1975    return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
1976
1977  case Instruction::SDiv: {
1978    const APInt *Denominator;
1979    // sdiv X, C -> adds log(C) sign bits.
1980    if (match(U->getOperand(1), m_APInt(Denominator))) {
1981
1982      // Ignore non-positive denominator.
1983      if (!Denominator->isStrictlyPositive())
1984        break;
1985
1986      // Calculate the incoming numerator bits.
1987      unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
1988
1989      // Add floor(log(C)) bits to the numerator bits.
1990      return std::min(TyBits, NumBits + Denominator->logBase2());
1991    }
1992    break;
1993  }
1994
1995  case Instruction::SRem: {
1996    const APInt *Denominator;
1997    // srem X, C -> we know that the result is within [-C+1,C) when C is a
1998    // positive constant.  This let us put a lower bound on the number of sign
1999    // bits.
2000    if (match(U->getOperand(1), m_APInt(Denominator))) {
2001
2002      // Ignore non-positive denominator.
2003      if (!Denominator->isStrictlyPositive())
2004        break;
2005
2006      // Calculate the incoming numerator bits. SRem by a positive constant
2007      // can't lower the number of sign bits.
2008      unsigned NumrBits =
2009          ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2010
2011      // Calculate the leading sign bit constraints by examining the
2012      // denominator.  Given that the denominator is positive, there are two
2013      // cases:
2014      //
2015      //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2016      //     (1 << ceilLogBase2(C)).
2017      //
2018      //  2. the numerator is negative.  Then the result range is (-C,0] and
2019      //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2020      //
2021      // Thus a lower bound on the number of sign bits is `TyBits -
2022      // ceilLogBase2(C)`.
2023
2024      unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2025      return std::max(NumrBits, ResBits);
2026    }
2027    break;
2028  }
2029
2030  case Instruction::AShr: {
2031    Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2032    // ashr X, C   -> adds C sign bits.  Vectors too.
2033    const APInt *ShAmt;
2034    if (match(U->getOperand(1), m_APInt(ShAmt))) {
2035      Tmp += ShAmt->getZExtValue();
2036      if (Tmp > TyBits) Tmp = TyBits;
2037    }
2038    return Tmp;
2039  }
2040  case Instruction::Shl: {
2041    const APInt *ShAmt;
2042    if (match(U->getOperand(1), m_APInt(ShAmt))) {
2043      // shl destroys sign bits.
2044      Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2045      Tmp2 = ShAmt->getZExtValue();
2046      if (Tmp2 >= TyBits ||      // Bad shift.
2047          Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2048      return Tmp - Tmp2;
2049    }
2050    break;
2051  }
2052  case Instruction::And:
2053  case Instruction::Or:
2054  case Instruction::Xor:    // NOT is handled here.
2055    // Logical binary ops preserve the number of sign bits at the worst.
2056    Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2057    if (Tmp != 1) {
2058      Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2059      FirstAnswer = std::min(Tmp, Tmp2);
2060      // We computed what we know about the sign bits as our first
2061      // answer. Now proceed to the generic code that uses
2062      // computeKnownBits, and pick whichever answer is better.
2063    }
2064    break;
2065
2066  case Instruction::Select:
2067    Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2068    if (Tmp == 1) return 1;  // Early out.
2069    Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2070    return std::min(Tmp, Tmp2);
2071
2072  case Instruction::Add:
2073    // Add can have at most one carry bit.  Thus we know that the output
2074    // is, at worst, one more bit than the inputs.
2075    Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2076    if (Tmp == 1) return 1;  // Early out.
2077
2078    // Special case decrementing a value (ADD X, -1):
2079    if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2080      if (CRHS->isAllOnesValue()) {
2081        APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2082        computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2083
2084        // If the input is known to be 0 or 1, the output is 0/-1, which is all
2085        // sign bits set.
2086        if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2087          return TyBits;
2088
2089        // If we are subtracting one from a positive number, there is no carry
2090        // out of the result.
2091        if (KnownZero.isNegative())
2092          return Tmp;
2093      }
2094
2095    Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2096    if (Tmp2 == 1) return 1;
2097    return std::min(Tmp, Tmp2)-1;
2098
2099  case Instruction::Sub:
2100    Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2101    if (Tmp2 == 1) return 1;
2102
2103    // Handle NEG.
2104    if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2105      if (CLHS->isNullValue()) {
2106        APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2107        computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2108        // If the input is known to be 0 or 1, the output is 0/-1, which is all
2109        // sign bits set.
2110        if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2111          return TyBits;
2112
2113        // If the input is known to be positive (the sign bit is known clear),
2114        // the output of the NEG has the same number of sign bits as the input.
2115        if (KnownZero.isNegative())
2116          return Tmp2;
2117
2118        // Otherwise, we treat this like a SUB.
2119      }
2120
2121    // Sub can have at most one carry bit.  Thus we know that the output
2122    // is, at worst, one more bit than the inputs.
2123    Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2124    if (Tmp == 1) return 1;  // Early out.
2125    return std::min(Tmp, Tmp2)-1;
2126
2127  case Instruction::PHI: {
2128    PHINode *PN = cast<PHINode>(U);
2129    unsigned NumIncomingValues = PN->getNumIncomingValues();
2130    // Don't analyze large in-degree PHIs.
2131    if (NumIncomingValues > 4) break;
2132    // Unreachable blocks may have zero-operand PHI nodes.
2133    if (NumIncomingValues == 0) break;
2134
2135    // Take the minimum of all incoming values.  This can't infinitely loop
2136    // because of our depth threshold.
2137    Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2138    for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2139      if (Tmp == 1) return Tmp;
2140      Tmp = std::min(
2141          Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2142    }
2143    return Tmp;
2144  }
2145
2146  case Instruction::Trunc:
2147    // FIXME: it's tricky to do anything useful for this, but it is an important
2148    // case for targets like X86.
2149    break;
2150  }
2151
2152  // Finally, if we can prove that the top bits of the result are 0's or 1's,
2153  // use this information.
2154
2155  // If we can examine all elements of a vector constant successfully, we're
2156  // done (we can't do any better than that). If not, keep trying.
2157  if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2158    return VecSignBits;
2159
2160  APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2161  computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2162
2163  // If we know that the sign bit is either zero or one, determine the number of
2164  // identical bits in the top of the input value.
2165  if (KnownZero.isNegative())
2166    return std::max(FirstAnswer, KnownZero.countLeadingOnes());
2167
2168  if (KnownOne.isNegative())
2169    return std::max(FirstAnswer, KnownOne.countLeadingOnes());
2170
2171  // computeKnownBits gave us no extra information about the top bits.
2172  return FirstAnswer;
2173}
2174
2175/// This function computes the integer multiple of Base that equals V.
2176/// If successful, it returns true and returns the multiple in
2177/// Multiple. If unsuccessful, it returns false. It looks
2178/// through SExt instructions only if LookThroughSExt is true.
2179bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2180                           bool LookThroughSExt, unsigned Depth) {
2181  const unsigned MaxDepth = 6;
2182
2183  assert(V && "No Value?");
2184  assert(Depth <= MaxDepth && "Limit Search Depth");
2185  assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2186
2187  Type *T = V->getType();
2188
2189  ConstantInt *CI = dyn_cast<ConstantInt>(V);
2190
2191  if (Base == 0)
2192    return false;
2193
2194  if (Base == 1) {
2195    Multiple = V;
2196    return true;
2197  }
2198
2199  ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2200  Constant *BaseVal = ConstantInt::get(T, Base);
2201  if (CO && CO == BaseVal) {
2202    // Multiple is 1.
2203    Multiple = ConstantInt::get(T, 1);
2204    return true;
2205  }
2206
2207  if (CI && CI->getZExtValue() % Base == 0) {
2208    Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2209    return true;
2210  }
2211
2212  if (Depth == MaxDepth) return false;  // Limit search depth.
2213
2214  Operator *I = dyn_cast<Operator>(V);
2215  if (!I) return false;
2216
2217  switch (I->getOpcode()) {
2218  default: break;
2219  case Instruction::SExt:
2220    if (!LookThroughSExt) return false;
2221    // otherwise fall through to ZExt
2222  case Instruction::ZExt:
2223    return ComputeMultiple(I->getOperand(0), Base, Multiple,
2224                           LookThroughSExt, Depth+1);
2225  case Instruction::Shl:
2226  case Instruction::Mul: {
2227    Value *Op0 = I->getOperand(0);
2228    Value *Op1 = I->getOperand(1);
2229
2230    if (I->getOpcode() == Instruction::Shl) {
2231      ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2232      if (!Op1CI) return false;
2233      // Turn Op0 << Op1 into Op0 * 2^Op1
2234      APInt Op1Int = Op1CI->getValue();
2235      uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2236      APInt API(Op1Int.getBitWidth(), 0);
2237      API.setBit(BitToSet);
2238      Op1 = ConstantInt::get(V->getContext(), API);
2239    }
2240
2241    Value *Mul0 = nullptr;
2242    if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2243      if (Constant *Op1C = dyn_cast<Constant>(Op1))
2244        if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2245          if (Op1C->getType()->getPrimitiveSizeInBits() <
2246              MulC->getType()->getPrimitiveSizeInBits())
2247            Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2248          if (Op1C->getType()->getPrimitiveSizeInBits() >
2249              MulC->getType()->getPrimitiveSizeInBits())
2250            MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2251
2252          // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2253          Multiple = ConstantExpr::getMul(MulC, Op1C);
2254          return true;
2255        }
2256
2257      if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2258        if (Mul0CI->getValue() == 1) {
2259          // V == Base * Op1, so return Op1
2260          Multiple = Op1;
2261          return true;
2262        }
2263    }
2264
2265    Value *Mul1 = nullptr;
2266    if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2267      if (Constant *Op0C = dyn_cast<Constant>(Op0))
2268        if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2269          if (Op0C->getType()->getPrimitiveSizeInBits() <
2270              MulC->getType()->getPrimitiveSizeInBits())
2271            Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2272          if (Op0C->getType()->getPrimitiveSizeInBits() >
2273              MulC->getType()->getPrimitiveSizeInBits())
2274            MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2275
2276          // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2277          Multiple = ConstantExpr::getMul(MulC, Op0C);
2278          return true;
2279        }
2280
2281      if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2282        if (Mul1CI->getValue() == 1) {
2283          // V == Base * Op0, so return Op0
2284          Multiple = Op0;
2285          return true;
2286        }
2287    }
2288  }
2289  }
2290
2291  // We could not determine if V is a multiple of Base.
2292  return false;
2293}
2294
2295Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2296                                            const TargetLibraryInfo *TLI) {
2297  const Function *F = ICS.getCalledFunction();
2298  if (!F)
2299    return Intrinsic::not_intrinsic;
2300
2301  if (F->isIntrinsic())
2302    return F->getIntrinsicID();
2303
2304  if (!TLI)
2305    return Intrinsic::not_intrinsic;
2306
2307  LibFunc::Func Func;
2308  // We're going to make assumptions on the semantics of the functions, check
2309  // that the target knows that it's available in this environment and it does
2310  // not have local linkage.
2311  if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2312    return Intrinsic::not_intrinsic;
2313
2314  if (!ICS.onlyReadsMemory())
2315    return Intrinsic::not_intrinsic;
2316
2317  // Otherwise check if we have a call to a function that can be turned into a
2318  // vector intrinsic.
2319  switch (Func) {
2320  default:
2321    break;
2322  case LibFunc::sin:
2323  case LibFunc::sinf:
2324  case LibFunc::sinl:
2325    return Intrinsic::sin;
2326  case LibFunc::cos:
2327  case LibFunc::cosf:
2328  case LibFunc::cosl:
2329    return Intrinsic::cos;
2330  case LibFunc::exp:
2331  case LibFunc::expf:
2332  case LibFunc::expl:
2333    return Intrinsic::exp;
2334  case LibFunc::exp2:
2335  case LibFunc::exp2f:
2336  case LibFunc::exp2l:
2337    return Intrinsic::exp2;
2338  case LibFunc::log:
2339  case LibFunc::logf:
2340  case LibFunc::logl:
2341    return Intrinsic::log;
2342  case LibFunc::log10:
2343  case LibFunc::log10f:
2344  case LibFunc::log10l:
2345    return Intrinsic::log10;
2346  case LibFunc::log2:
2347  case LibFunc::log2f:
2348  case LibFunc::log2l:
2349    return Intrinsic::log2;
2350  case LibFunc::fabs:
2351  case LibFunc::fabsf:
2352  case LibFunc::fabsl:
2353    return Intrinsic::fabs;
2354  case LibFunc::fmin:
2355  case LibFunc::fminf:
2356  case LibFunc::fminl:
2357    return Intrinsic::minnum;
2358  case LibFunc::fmax:
2359  case LibFunc::fmaxf:
2360  case LibFunc::fmaxl:
2361    return Intrinsic::maxnum;
2362  case LibFunc::copysign:
2363  case LibFunc::copysignf:
2364  case LibFunc::copysignl:
2365    return Intrinsic::copysign;
2366  case LibFunc::floor:
2367  case LibFunc::floorf:
2368  case LibFunc::floorl:
2369    return Intrinsic::floor;
2370  case LibFunc::ceil:
2371  case LibFunc::ceilf:
2372  case LibFunc::ceill:
2373    return Intrinsic::ceil;
2374  case LibFunc::trunc:
2375  case LibFunc::truncf:
2376  case LibFunc::truncl:
2377    return Intrinsic::trunc;
2378  case LibFunc::rint:
2379  case LibFunc::rintf:
2380  case LibFunc::rintl:
2381    return Intrinsic::rint;
2382  case LibFunc::nearbyint:
2383  case LibFunc::nearbyintf:
2384  case LibFunc::nearbyintl:
2385    return Intrinsic::nearbyint;
2386  case LibFunc::round:
2387  case LibFunc::roundf:
2388  case LibFunc::roundl:
2389    return Intrinsic::round;
2390  case LibFunc::pow:
2391  case LibFunc::powf:
2392  case LibFunc::powl:
2393    return Intrinsic::pow;
2394  case LibFunc::sqrt:
2395  case LibFunc::sqrtf:
2396  case LibFunc::sqrtl:
2397    if (ICS->hasNoNaNs())
2398      return Intrinsic::sqrt;
2399    return Intrinsic::not_intrinsic;
2400  }
2401
2402  return Intrinsic::not_intrinsic;
2403}
2404
2405/// Return true if we can prove that the specified FP value is never equal to
2406/// -0.0.
2407///
2408/// NOTE: this function will need to be revisited when we support non-default
2409/// rounding modes!
2410///
2411bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2412                                unsigned Depth) {
2413  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2414    return !CFP->getValueAPF().isNegZero();
2415
2416  // FIXME: Magic number! At the least, this should be given a name because it's
2417  // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2418  // expose it as a parameter, so it can be used for testing / experimenting.
2419  if (Depth == 6)
2420    return false;  // Limit search depth.
2421
2422  const Operator *I = dyn_cast<Operator>(V);
2423  if (!I) return false;
2424
2425  // Check if the nsz fast-math flag is set
2426  if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2427    if (FPO->hasNoSignedZeros())
2428      return true;
2429
2430  // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2431  if (I->getOpcode() == Instruction::FAdd)
2432    if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2433      if (CFP->isNullValue())
2434        return true;
2435
2436  // sitofp and uitofp turn into +0.0 for zero.
2437  if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2438    return true;
2439
2440  if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2441    Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2442    switch (IID) {
2443    default:
2444      break;
2445    // sqrt(-0.0) = -0.0, no other negative results are possible.
2446    case Intrinsic::sqrt:
2447      return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2448    // fabs(x) != -0.0
2449    case Intrinsic::fabs:
2450      return true;
2451    }
2452  }
2453
2454  return false;
2455}
2456
2457bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2458                                       const TargetLibraryInfo *TLI,
2459                                       unsigned Depth) {
2460  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2461    return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2462
2463  // FIXME: Magic number! At the least, this should be given a name because it's
2464  // used similarly in CannotBeNegativeZero(). A better fix may be to
2465  // expose it as a parameter, so it can be used for testing / experimenting.
2466  if (Depth == 6)
2467    return false;  // Limit search depth.
2468
2469  const Operator *I = dyn_cast<Operator>(V);
2470  if (!I) return false;
2471
2472  switch (I->getOpcode()) {
2473  default: break;
2474  // Unsigned integers are always nonnegative.
2475  case Instruction::UIToFP:
2476    return true;
2477  case Instruction::FMul:
2478    // x*x is always non-negative or a NaN.
2479    if (I->getOperand(0) == I->getOperand(1))
2480      return true;
2481    // Fall through
2482  case Instruction::FAdd:
2483  case Instruction::FDiv:
2484  case Instruction::FRem:
2485    return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2486           CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2487  case Instruction::Select:
2488    return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) &&
2489           CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2490  case Instruction::FPExt:
2491  case Instruction::FPTrunc:
2492    // Widening/narrowing never change sign.
2493    return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2494  case Instruction::Call:
2495    Intrinsic::ID IID = getIntrinsicForCallSite(cast<CallInst>(I), TLI);
2496    switch (IID) {
2497    default:
2498      break;
2499    case Intrinsic::maxnum:
2500      return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) ||
2501             CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2502    case Intrinsic::minnum:
2503      return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2504             CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2505    case Intrinsic::exp:
2506    case Intrinsic::exp2:
2507    case Intrinsic::fabs:
2508    case Intrinsic::sqrt:
2509      return true;
2510    case Intrinsic::powi:
2511      if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2512        // powi(x,n) is non-negative if n is even.
2513        if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2514          return true;
2515      }
2516      return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2517    case Intrinsic::fma:
2518    case Intrinsic::fmuladd:
2519      // x*x+y is non-negative if y is non-negative.
2520      return I->getOperand(0) == I->getOperand(1) &&
2521             CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2522    }
2523    break;
2524  }
2525  return false;
2526}
2527
2528/// If the specified value can be set by repeating the same byte in memory,
2529/// return the i8 value that it is represented with.  This is
2530/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2531/// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2532/// byte store (e.g. i16 0x1234), return null.
2533Value *llvm::isBytewiseValue(Value *V) {
2534  // All byte-wide stores are splatable, even of arbitrary variables.
2535  if (V->getType()->isIntegerTy(8)) return V;
2536
2537  // Handle 'null' ConstantArrayZero etc.
2538  if (Constant *C = dyn_cast<Constant>(V))
2539    if (C->isNullValue())
2540      return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2541
2542  // Constant float and double values can be handled as integer values if the
2543  // corresponding integer value is "byteable".  An important case is 0.0.
2544  if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2545    if (CFP->getType()->isFloatTy())
2546      V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2547    if (CFP->getType()->isDoubleTy())
2548      V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2549    // Don't handle long double formats, which have strange constraints.
2550  }
2551
2552  // We can handle constant integers that are multiple of 8 bits.
2553  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2554    if (CI->getBitWidth() % 8 == 0) {
2555      assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2556
2557      if (!CI->getValue().isSplat(8))
2558        return nullptr;
2559      return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2560    }
2561  }
2562
2563  // A ConstantDataArray/Vector is splatable if all its members are equal and
2564  // also splatable.
2565  if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2566    Value *Elt = CA->getElementAsConstant(0);
2567    Value *Val = isBytewiseValue(Elt);
2568    if (!Val)
2569      return nullptr;
2570
2571    for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2572      if (CA->getElementAsConstant(I) != Elt)
2573        return nullptr;
2574
2575    return Val;
2576  }
2577
2578  // Conceptually, we could handle things like:
2579  //   %a = zext i8 %X to i16
2580  //   %b = shl i16 %a, 8
2581  //   %c = or i16 %a, %b
2582  // but until there is an example that actually needs this, it doesn't seem
2583  // worth worrying about.
2584  return nullptr;
2585}
2586
2587
2588// This is the recursive version of BuildSubAggregate. It takes a few different
2589// arguments. Idxs is the index within the nested struct From that we are
2590// looking at now (which is of type IndexedType). IdxSkip is the number of
2591// indices from Idxs that should be left out when inserting into the resulting
2592// struct. To is the result struct built so far, new insertvalue instructions
2593// build on that.
2594static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2595                                SmallVectorImpl<unsigned> &Idxs,
2596                                unsigned IdxSkip,
2597                                Instruction *InsertBefore) {
2598  llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2599  if (STy) {
2600    // Save the original To argument so we can modify it
2601    Value *OrigTo = To;
2602    // General case, the type indexed by Idxs is a struct
2603    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2604      // Process each struct element recursively
2605      Idxs.push_back(i);
2606      Value *PrevTo = To;
2607      To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2608                             InsertBefore);
2609      Idxs.pop_back();
2610      if (!To) {
2611        // Couldn't find any inserted value for this index? Cleanup
2612        while (PrevTo != OrigTo) {
2613          InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2614          PrevTo = Del->getAggregateOperand();
2615          Del->eraseFromParent();
2616        }
2617        // Stop processing elements
2618        break;
2619      }
2620    }
2621    // If we successfully found a value for each of our subaggregates
2622    if (To)
2623      return To;
2624  }
2625  // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2626  // the struct's elements had a value that was inserted directly. In the latter
2627  // case, perhaps we can't determine each of the subelements individually, but
2628  // we might be able to find the complete struct somewhere.
2629
2630  // Find the value that is at that particular spot
2631  Value *V = FindInsertedValue(From, Idxs);
2632
2633  if (!V)
2634    return nullptr;
2635
2636  // Insert the value in the new (sub) aggregrate
2637  return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2638                                       "tmp", InsertBefore);
2639}
2640
2641// This helper takes a nested struct and extracts a part of it (which is again a
2642// struct) into a new value. For example, given the struct:
2643// { a, { b, { c, d }, e } }
2644// and the indices "1, 1" this returns
2645// { c, d }.
2646//
2647// It does this by inserting an insertvalue for each element in the resulting
2648// struct, as opposed to just inserting a single struct. This will only work if
2649// each of the elements of the substruct are known (ie, inserted into From by an
2650// insertvalue instruction somewhere).
2651//
2652// All inserted insertvalue instructions are inserted before InsertBefore
2653static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2654                                Instruction *InsertBefore) {
2655  assert(InsertBefore && "Must have someplace to insert!");
2656  Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2657                                                             idx_range);
2658  Value *To = UndefValue::get(IndexedType);
2659  SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2660  unsigned IdxSkip = Idxs.size();
2661
2662  return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2663}
2664
2665/// Given an aggregrate and an sequence of indices, see if
2666/// the scalar value indexed is already around as a register, for example if it
2667/// were inserted directly into the aggregrate.
2668///
2669/// If InsertBefore is not null, this function will duplicate (modified)
2670/// insertvalues when a part of a nested struct is extracted.
2671Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2672                               Instruction *InsertBefore) {
2673  // Nothing to index? Just return V then (this is useful at the end of our
2674  // recursion).
2675  if (idx_range.empty())
2676    return V;
2677  // We have indices, so V should have an indexable type.
2678  assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2679         "Not looking at a struct or array?");
2680  assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2681         "Invalid indices for type?");
2682
2683  if (Constant *C = dyn_cast<Constant>(V)) {
2684    C = C->getAggregateElement(idx_range[0]);
2685    if (!C) return nullptr;
2686    return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2687  }
2688
2689  if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2690    // Loop the indices for the insertvalue instruction in parallel with the
2691    // requested indices
2692    const unsigned *req_idx = idx_range.begin();
2693    for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2694         i != e; ++i, ++req_idx) {
2695      if (req_idx == idx_range.end()) {
2696        // We can't handle this without inserting insertvalues
2697        if (!InsertBefore)
2698          return nullptr;
2699
2700        // The requested index identifies a part of a nested aggregate. Handle
2701        // this specially. For example,
2702        // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2703        // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2704        // %C = extractvalue {i32, { i32, i32 } } %B, 1
2705        // This can be changed into
2706        // %A = insertvalue {i32, i32 } undef, i32 10, 0
2707        // %C = insertvalue {i32, i32 } %A, i32 11, 1
2708        // which allows the unused 0,0 element from the nested struct to be
2709        // removed.
2710        return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2711                                 InsertBefore);
2712      }
2713
2714      // This insert value inserts something else than what we are looking for.
2715      // See if the (aggregate) value inserted into has the value we are
2716      // looking for, then.
2717      if (*req_idx != *i)
2718        return FindInsertedValue(I->getAggregateOperand(), idx_range,
2719                                 InsertBefore);
2720    }
2721    // If we end up here, the indices of the insertvalue match with those
2722    // requested (though possibly only partially). Now we recursively look at
2723    // the inserted value, passing any remaining indices.
2724    return FindInsertedValue(I->getInsertedValueOperand(),
2725                             makeArrayRef(req_idx, idx_range.end()),
2726                             InsertBefore);
2727  }
2728
2729  if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2730    // If we're extracting a value from an aggregate that was extracted from
2731    // something else, we can extract from that something else directly instead.
2732    // However, we will need to chain I's indices with the requested indices.
2733
2734    // Calculate the number of indices required
2735    unsigned size = I->getNumIndices() + idx_range.size();
2736    // Allocate some space to put the new indices in
2737    SmallVector<unsigned, 5> Idxs;
2738    Idxs.reserve(size);
2739    // Add indices from the extract value instruction
2740    Idxs.append(I->idx_begin(), I->idx_end());
2741
2742    // Add requested indices
2743    Idxs.append(idx_range.begin(), idx_range.end());
2744
2745    assert(Idxs.size() == size
2746           && "Number of indices added not correct?");
2747
2748    return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2749  }
2750  // Otherwise, we don't know (such as, extracting from a function return value
2751  // or load instruction)
2752  return nullptr;
2753}
2754
2755/// Analyze the specified pointer to see if it can be expressed as a base
2756/// pointer plus a constant offset. Return the base and offset to the caller.
2757Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2758                                              const DataLayout &DL) {
2759  unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2760  APInt ByteOffset(BitWidth, 0);
2761
2762  // We walk up the defs but use a visited set to handle unreachable code. In
2763  // that case, we stop after accumulating the cycle once (not that it
2764  // matters).
2765  SmallPtrSet<Value *, 16> Visited;
2766  while (Visited.insert(Ptr).second) {
2767    if (Ptr->getType()->isVectorTy())
2768      break;
2769
2770    if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2771      APInt GEPOffset(BitWidth, 0);
2772      if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2773        break;
2774
2775      ByteOffset += GEPOffset;
2776
2777      Ptr = GEP->getPointerOperand();
2778    } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2779               Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2780      Ptr = cast<Operator>(Ptr)->getOperand(0);
2781    } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2782      if (GA->isInterposable())
2783        break;
2784      Ptr = GA->getAliasee();
2785    } else {
2786      break;
2787    }
2788  }
2789  Offset = ByteOffset.getSExtValue();
2790  return Ptr;
2791}
2792
2793bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
2794  // Make sure the GEP has exactly three arguments.
2795  if (GEP->getNumOperands() != 3)
2796    return false;
2797
2798  // Make sure the index-ee is a pointer to array of i8.
2799  ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2800  if (!AT || !AT->getElementType()->isIntegerTy(8))
2801    return false;
2802
2803  // Check to make sure that the first operand of the GEP is an integer and
2804  // has value 0 so that we are sure we're indexing into the initializer.
2805  const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2806  if (!FirstIdx || !FirstIdx->isZero())
2807    return false;
2808
2809  return true;
2810}
2811
2812/// This function computes the length of a null-terminated C string pointed to
2813/// by V. If successful, it returns true and returns the string in Str.
2814/// If unsuccessful, it returns false.
2815bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2816                                 uint64_t Offset, bool TrimAtNul) {
2817  assert(V);
2818
2819  // Look through bitcast instructions and geps.
2820  V = V->stripPointerCasts();
2821
2822  // If the value is a GEP instruction or constant expression, treat it as an
2823  // offset.
2824  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2825    // The GEP operator should be based on a pointer to string constant, and is
2826    // indexing into the string constant.
2827    if (!isGEPBasedOnPointerToString(GEP))
2828      return false;
2829
2830    // If the second index isn't a ConstantInt, then this is a variable index
2831    // into the array.  If this occurs, we can't say anything meaningful about
2832    // the string.
2833    uint64_t StartIdx = 0;
2834    if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2835      StartIdx = CI->getZExtValue();
2836    else
2837      return false;
2838    return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2839                                 TrimAtNul);
2840  }
2841
2842  // The GEP instruction, constant or instruction, must reference a global
2843  // variable that is a constant and is initialized. The referenced constant
2844  // initializer is the array that we'll use for optimization.
2845  const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2846  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2847    return false;
2848
2849  // Handle the all-zeros case.
2850  if (GV->getInitializer()->isNullValue()) {
2851    // This is a degenerate case. The initializer is constant zero so the
2852    // length of the string must be zero.
2853    Str = "";
2854    return true;
2855  }
2856
2857  // This must be a ConstantDataArray.
2858  const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
2859  if (!Array || !Array->isString())
2860    return false;
2861
2862  // Get the number of elements in the array.
2863  uint64_t NumElts = Array->getType()->getArrayNumElements();
2864
2865  // Start out with the entire array in the StringRef.
2866  Str = Array->getAsString();
2867
2868  if (Offset > NumElts)
2869    return false;
2870
2871  // Skip over 'offset' bytes.
2872  Str = Str.substr(Offset);
2873
2874  if (TrimAtNul) {
2875    // Trim off the \0 and anything after it.  If the array is not nul
2876    // terminated, we just return the whole end of string.  The client may know
2877    // some other way that the string is length-bound.
2878    Str = Str.substr(0, Str.find('\0'));
2879  }
2880  return true;
2881}
2882
2883// These next two are very similar to the above, but also look through PHI
2884// nodes.
2885// TODO: See if we can integrate these two together.
2886
2887/// If we can compute the length of the string pointed to by
2888/// the specified pointer, return 'len+1'.  If we can't, return 0.
2889static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
2890  // Look through noop bitcast instructions.
2891  V = V->stripPointerCasts();
2892
2893  // If this is a PHI node, there are two cases: either we have already seen it
2894  // or we haven't.
2895  if (PHINode *PN = dyn_cast<PHINode>(V)) {
2896    if (!PHIs.insert(PN).second)
2897      return ~0ULL;  // already in the set.
2898
2899    // If it was new, see if all the input strings are the same length.
2900    uint64_t LenSoFar = ~0ULL;
2901    for (Value *IncValue : PN->incoming_values()) {
2902      uint64_t Len = GetStringLengthH(IncValue, PHIs);
2903      if (Len == 0) return 0; // Unknown length -> unknown.
2904
2905      if (Len == ~0ULL) continue;
2906
2907      if (Len != LenSoFar && LenSoFar != ~0ULL)
2908        return 0;    // Disagree -> unknown.
2909      LenSoFar = Len;
2910    }
2911
2912    // Success, all agree.
2913    return LenSoFar;
2914  }
2915
2916  // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
2917  if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
2918    uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
2919    if (Len1 == 0) return 0;
2920    uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
2921    if (Len2 == 0) return 0;
2922    if (Len1 == ~0ULL) return Len2;
2923    if (Len2 == ~0ULL) return Len1;
2924    if (Len1 != Len2) return 0;
2925    return Len1;
2926  }
2927
2928  // Otherwise, see if we can read the string.
2929  StringRef StrData;
2930  if (!getConstantStringInfo(V, StrData))
2931    return 0;
2932
2933  return StrData.size()+1;
2934}
2935
2936/// If we can compute the length of the string pointed to by
2937/// the specified pointer, return 'len+1'.  If we can't, return 0.
2938uint64_t llvm::GetStringLength(Value *V) {
2939  if (!V->getType()->isPointerTy()) return 0;
2940
2941  SmallPtrSet<PHINode*, 32> PHIs;
2942  uint64_t Len = GetStringLengthH(V, PHIs);
2943  // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
2944  // an empty string as a length.
2945  return Len == ~0ULL ? 1 : Len;
2946}
2947
2948/// \brief \p PN defines a loop-variant pointer to an object.  Check if the
2949/// previous iteration of the loop was referring to the same object as \p PN.
2950static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) {
2951  // Find the loop-defined value.
2952  Loop *L = LI->getLoopFor(PN->getParent());
2953  if (PN->getNumIncomingValues() != 2)
2954    return true;
2955
2956  // Find the value from previous iteration.
2957  auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
2958  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2959    PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
2960  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2961    return true;
2962
2963  // If a new pointer is loaded in the loop, the pointer references a different
2964  // object in every iteration.  E.g.:
2965  //    for (i)
2966  //       int *p = a[i];
2967  //       ...
2968  if (auto *Load = dyn_cast<LoadInst>(PrevValue))
2969    if (!L->isLoopInvariant(Load->getPointerOperand()))
2970      return false;
2971  return true;
2972}
2973
2974Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
2975                                 unsigned MaxLookup) {
2976  if (!V->getType()->isPointerTy())
2977    return V;
2978  for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
2979    if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2980      V = GEP->getPointerOperand();
2981    } else if (Operator::getOpcode(V) == Instruction::BitCast ||
2982               Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
2983      V = cast<Operator>(V)->getOperand(0);
2984    } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2985      if (GA->isInterposable())
2986        return V;
2987      V = GA->getAliasee();
2988    } else {
2989      if (auto CS = CallSite(V))
2990        if (Value *RV = CS.getReturnedArgOperand()) {
2991          V = RV;
2992          continue;
2993        }
2994
2995      // See if InstructionSimplify knows any relevant tricks.
2996      if (Instruction *I = dyn_cast<Instruction>(V))
2997        // TODO: Acquire a DominatorTree and AssumptionCache and use them.
2998        if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
2999          V = Simplified;
3000          continue;
3001        }
3002
3003      return V;
3004    }
3005    assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3006  }
3007  return V;
3008}
3009
3010void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3011                                const DataLayout &DL, LoopInfo *LI,
3012                                unsigned MaxLookup) {
3013  SmallPtrSet<Value *, 4> Visited;
3014  SmallVector<Value *, 4> Worklist;
3015  Worklist.push_back(V);
3016  do {
3017    Value *P = Worklist.pop_back_val();
3018    P = GetUnderlyingObject(P, DL, MaxLookup);
3019
3020    if (!Visited.insert(P).second)
3021      continue;
3022
3023    if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3024      Worklist.push_back(SI->getTrueValue());
3025      Worklist.push_back(SI->getFalseValue());
3026      continue;
3027    }
3028
3029    if (PHINode *PN = dyn_cast<PHINode>(P)) {
3030      // If this PHI changes the underlying object in every iteration of the
3031      // loop, don't look through it.  Consider:
3032      //   int **A;
3033      //   for (i) {
3034      //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3035      //     Curr = A[i];
3036      //     *Prev, *Curr;
3037      //
3038      // Prev is tracking Curr one iteration behind so they refer to different
3039      // underlying objects.
3040      if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3041          isSameUnderlyingObjectInLoop(PN, LI))
3042        for (Value *IncValue : PN->incoming_values())
3043          Worklist.push_back(IncValue);
3044      continue;
3045    }
3046
3047    Objects.push_back(P);
3048  } while (!Worklist.empty());
3049}
3050
3051/// Return true if the only users of this pointer are lifetime markers.
3052bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3053  for (const User *U : V->users()) {
3054    const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3055    if (!II) return false;
3056
3057    if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3058        II->getIntrinsicID() != Intrinsic::lifetime_end)
3059      return false;
3060  }
3061  return true;
3062}
3063
3064bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3065                                        const Instruction *CtxI,
3066                                        const DominatorTree *DT) {
3067  const Operator *Inst = dyn_cast<Operator>(V);
3068  if (!Inst)
3069    return false;
3070
3071  for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3072    if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3073      if (C->canTrap())
3074        return false;
3075
3076  switch (Inst->getOpcode()) {
3077  default:
3078    return true;
3079  case Instruction::UDiv:
3080  case Instruction::URem: {
3081    // x / y is undefined if y == 0.
3082    const APInt *V;
3083    if (match(Inst->getOperand(1), m_APInt(V)))
3084      return *V != 0;
3085    return false;
3086  }
3087  case Instruction::SDiv:
3088  case Instruction::SRem: {
3089    // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3090    const APInt *Numerator, *Denominator;
3091    if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3092      return false;
3093    // We cannot hoist this division if the denominator is 0.
3094    if (*Denominator == 0)
3095      return false;
3096    // It's safe to hoist if the denominator is not 0 or -1.
3097    if (*Denominator != -1)
3098      return true;
3099    // At this point we know that the denominator is -1.  It is safe to hoist as
3100    // long we know that the numerator is not INT_MIN.
3101    if (match(Inst->getOperand(0), m_APInt(Numerator)))
3102      return !Numerator->isMinSignedValue();
3103    // The numerator *might* be MinSignedValue.
3104    return false;
3105  }
3106  case Instruction::Load: {
3107    const LoadInst *LI = cast<LoadInst>(Inst);
3108    if (!LI->isUnordered() ||
3109        // Speculative load may create a race that did not exist in the source.
3110        LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3111        // Speculative load may load data from dirty regions.
3112        LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3113      return false;
3114    const DataLayout &DL = LI->getModule()->getDataLayout();
3115    return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3116                                              LI->getAlignment(), DL, CtxI, DT);
3117  }
3118  case Instruction::Call: {
3119    if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3120      switch (II->getIntrinsicID()) {
3121      // These synthetic intrinsics have no side-effects and just mark
3122      // information about their operands.
3123      // FIXME: There are other no-op synthetic instructions that potentially
3124      // should be considered at least *safe* to speculate...
3125      case Intrinsic::dbg_declare:
3126      case Intrinsic::dbg_value:
3127        return true;
3128
3129      case Intrinsic::bswap:
3130      case Intrinsic::ctlz:
3131      case Intrinsic::ctpop:
3132      case Intrinsic::cttz:
3133      case Intrinsic::objectsize:
3134      case Intrinsic::sadd_with_overflow:
3135      case Intrinsic::smul_with_overflow:
3136      case Intrinsic::ssub_with_overflow:
3137      case Intrinsic::uadd_with_overflow:
3138      case Intrinsic::umul_with_overflow:
3139      case Intrinsic::usub_with_overflow:
3140        return true;
3141      // These intrinsics are defined to have the same behavior as libm
3142      // functions except for setting errno.
3143      case Intrinsic::sqrt:
3144      case Intrinsic::fma:
3145      case Intrinsic::fmuladd:
3146        return true;
3147      // These intrinsics are defined to have the same behavior as libm
3148      // functions, and the corresponding libm functions never set errno.
3149      case Intrinsic::trunc:
3150      case Intrinsic::copysign:
3151      case Intrinsic::fabs:
3152      case Intrinsic::minnum:
3153      case Intrinsic::maxnum:
3154        return true;
3155      // These intrinsics are defined to have the same behavior as libm
3156      // functions, which never overflow when operating on the IEEE754 types
3157      // that we support, and never set errno otherwise.
3158      case Intrinsic::ceil:
3159      case Intrinsic::floor:
3160      case Intrinsic::nearbyint:
3161      case Intrinsic::rint:
3162      case Intrinsic::round:
3163        return true;
3164      // TODO: are convert_{from,to}_fp16 safe?
3165      // TODO: can we list target-specific intrinsics here?
3166      default: break;
3167      }
3168    }
3169    return false; // The called function could have undefined behavior or
3170                  // side-effects, even if marked readnone nounwind.
3171  }
3172  case Instruction::VAArg:
3173  case Instruction::Alloca:
3174  case Instruction::Invoke:
3175  case Instruction::PHI:
3176  case Instruction::Store:
3177  case Instruction::Ret:
3178  case Instruction::Br:
3179  case Instruction::IndirectBr:
3180  case Instruction::Switch:
3181  case Instruction::Unreachable:
3182  case Instruction::Fence:
3183  case Instruction::AtomicRMW:
3184  case Instruction::AtomicCmpXchg:
3185  case Instruction::LandingPad:
3186  case Instruction::Resume:
3187  case Instruction::CatchSwitch:
3188  case Instruction::CatchPad:
3189  case Instruction::CatchRet:
3190  case Instruction::CleanupPad:
3191  case Instruction::CleanupRet:
3192    return false; // Misc instructions which have effects
3193  }
3194}
3195
3196bool llvm::mayBeMemoryDependent(const Instruction &I) {
3197  return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3198}
3199
3200/// Return true if we know that the specified value is never null.
3201bool llvm::isKnownNonNull(const Value *V) {
3202  assert(V->getType()->isPointerTy() && "V must be pointer type");
3203
3204  // Alloca never returns null, malloc might.
3205  if (isa<AllocaInst>(V)) return true;
3206
3207  // A byval, inalloca, or nonnull argument is never null.
3208  if (const Argument *A = dyn_cast<Argument>(V))
3209    return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3210
3211  // A global variable in address space 0 is non null unless extern weak.
3212  // Other address spaces may have null as a valid address for a global,
3213  // so we can't assume anything.
3214  if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3215    return !GV->hasExternalWeakLinkage() &&
3216           GV->getType()->getAddressSpace() == 0;
3217
3218  // A Load tagged with nonnull metadata is never null.
3219  if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3220    return LI->getMetadata(LLVMContext::MD_nonnull);
3221
3222  if (auto CS = ImmutableCallSite(V))
3223    if (CS.isReturnNonNull())
3224      return true;
3225
3226  return false;
3227}
3228
3229static bool isKnownNonNullFromDominatingCondition(const Value *V,
3230                                                  const Instruction *CtxI,
3231                                                  const DominatorTree *DT) {
3232  assert(V->getType()->isPointerTy() && "V must be pointer type");
3233
3234  unsigned NumUsesExplored = 0;
3235  for (auto *U : V->users()) {
3236    // Avoid massive lists
3237    if (NumUsesExplored >= DomConditionsMaxUses)
3238      break;
3239    NumUsesExplored++;
3240    // Consider only compare instructions uniquely controlling a branch
3241    CmpInst::Predicate Pred;
3242    if (!match(const_cast<User *>(U),
3243               m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3244        (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3245      continue;
3246
3247    for (auto *CmpU : U->users()) {
3248      if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3249        assert(BI->isConditional() && "uses a comparison!");
3250
3251        BasicBlock *NonNullSuccessor =
3252            BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3253        BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3254        if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3255          return true;
3256      } else if (Pred == ICmpInst::ICMP_NE &&
3257                 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3258                 DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3259        return true;
3260      }
3261    }
3262  }
3263
3264  return false;
3265}
3266
3267bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3268                            const DominatorTree *DT) {
3269  if (isKnownNonNull(V))
3270    return true;
3271
3272  return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
3273}
3274
3275OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
3276                                                   const DataLayout &DL,
3277                                                   AssumptionCache *AC,
3278                                                   const Instruction *CxtI,
3279                                                   const DominatorTree *DT) {
3280  // Multiplying n * m significant bits yields a result of n + m significant
3281  // bits. If the total number of significant bits does not exceed the
3282  // result bit width (minus 1), there is no overflow.
3283  // This means if we have enough leading zero bits in the operands
3284  // we can guarantee that the result does not overflow.
3285  // Ref: "Hacker's Delight" by Henry Warren
3286  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3287  APInt LHSKnownZero(BitWidth, 0);
3288  APInt LHSKnownOne(BitWidth, 0);
3289  APInt RHSKnownZero(BitWidth, 0);
3290  APInt RHSKnownOne(BitWidth, 0);
3291  computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3292                   DT);
3293  computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3294                   DT);
3295  // Note that underestimating the number of zero bits gives a more
3296  // conservative answer.
3297  unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3298                      RHSKnownZero.countLeadingOnes();
3299  // First handle the easy case: if we have enough zero bits there's
3300  // definitely no overflow.
3301  if (ZeroBits >= BitWidth)
3302    return OverflowResult::NeverOverflows;
3303
3304  // Get the largest possible values for each operand.
3305  APInt LHSMax = ~LHSKnownZero;
3306  APInt RHSMax = ~RHSKnownZero;
3307
3308  // We know the multiply operation doesn't overflow if the maximum values for
3309  // each operand will not overflow after we multiply them together.
3310  bool MaxOverflow;
3311  LHSMax.umul_ov(RHSMax, MaxOverflow);
3312  if (!MaxOverflow)
3313    return OverflowResult::NeverOverflows;
3314
3315  // We know it always overflows if multiplying the smallest possible values for
3316  // the operands also results in overflow.
3317  bool MinOverflow;
3318  LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3319  if (MinOverflow)
3320    return OverflowResult::AlwaysOverflows;
3321
3322  return OverflowResult::MayOverflow;
3323}
3324
3325OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
3326                                                   const DataLayout &DL,
3327                                                   AssumptionCache *AC,
3328                                                   const Instruction *CxtI,
3329                                                   const DominatorTree *DT) {
3330  bool LHSKnownNonNegative, LHSKnownNegative;
3331  ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3332                 AC, CxtI, DT);
3333  if (LHSKnownNonNegative || LHSKnownNegative) {
3334    bool RHSKnownNonNegative, RHSKnownNegative;
3335    ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3336                   AC, CxtI, DT);
3337
3338    if (LHSKnownNegative && RHSKnownNegative) {
3339      // The sign bit is set in both cases: this MUST overflow.
3340      // Create a simple add instruction, and insert it into the struct.
3341      return OverflowResult::AlwaysOverflows;
3342    }
3343
3344    if (LHSKnownNonNegative && RHSKnownNonNegative) {
3345      // The sign bit is clear in both cases: this CANNOT overflow.
3346      // Create a simple add instruction, and insert it into the struct.
3347      return OverflowResult::NeverOverflows;
3348    }
3349  }
3350
3351  return OverflowResult::MayOverflow;
3352}
3353
3354static OverflowResult computeOverflowForSignedAdd(
3355    Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
3356    AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
3357  if (Add && Add->hasNoSignedWrap()) {
3358    return OverflowResult::NeverOverflows;
3359  }
3360
3361  bool LHSKnownNonNegative, LHSKnownNegative;
3362  bool RHSKnownNonNegative, RHSKnownNegative;
3363  ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3364                 AC, CxtI, DT);
3365  ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3366                 AC, CxtI, DT);
3367
3368  if ((LHSKnownNonNegative && RHSKnownNegative) ||
3369      (LHSKnownNegative && RHSKnownNonNegative)) {
3370    // The sign bits are opposite: this CANNOT overflow.
3371    return OverflowResult::NeverOverflows;
3372  }
3373
3374  // The remaining code needs Add to be available. Early returns if not so.
3375  if (!Add)
3376    return OverflowResult::MayOverflow;
3377
3378  // If the sign of Add is the same as at least one of the operands, this add
3379  // CANNOT overflow. This is particularly useful when the sum is
3380  // @llvm.assume'ed non-negative rather than proved so from analyzing its
3381  // operands.
3382  bool LHSOrRHSKnownNonNegative =
3383      (LHSKnownNonNegative || RHSKnownNonNegative);
3384  bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3385  if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3386    bool AddKnownNonNegative, AddKnownNegative;
3387    ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3388                   /*Depth=*/0, AC, CxtI, DT);
3389    if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3390        (AddKnownNegative && LHSOrRHSKnownNegative)) {
3391      return OverflowResult::NeverOverflows;
3392    }
3393  }
3394
3395  return OverflowResult::MayOverflow;
3396}
3397
3398bool llvm::isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT) {
3399#ifndef NDEBUG
3400  auto IID = II->getIntrinsicID();
3401  assert((IID == Intrinsic::sadd_with_overflow ||
3402          IID == Intrinsic::uadd_with_overflow ||
3403          IID == Intrinsic::ssub_with_overflow ||
3404          IID == Intrinsic::usub_with_overflow ||
3405          IID == Intrinsic::smul_with_overflow ||
3406          IID == Intrinsic::umul_with_overflow) &&
3407         "Not an overflow intrinsic!");
3408#endif
3409
3410  SmallVector<BranchInst *, 2> GuardingBranches;
3411  SmallVector<ExtractValueInst *, 2> Results;
3412
3413  for (User *U : II->users()) {
3414    if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3415      assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3416
3417      if (EVI->getIndices()[0] == 0)
3418        Results.push_back(EVI);
3419      else {
3420        assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3421
3422        for (auto *U : EVI->users())
3423          if (auto *B = dyn_cast<BranchInst>(U)) {
3424            assert(B->isConditional() && "How else is it using an i1?");
3425            GuardingBranches.push_back(B);
3426          }
3427      }
3428    } else {
3429      // We are using the aggregate directly in a way we don't want to analyze
3430      // here (storing it to a global, say).
3431      return false;
3432    }
3433  }
3434
3435  auto AllUsesGuardedByBranch = [&](BranchInst *BI) {
3436    BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3437    if (!NoWrapEdge.isSingleEdge())
3438      return false;
3439
3440    // Check if all users of the add are provably no-wrap.
3441    for (auto *Result : Results) {
3442      // If the extractvalue itself is not executed on overflow, the we don't
3443      // need to check each use separately, since domination is transitive.
3444      if (DT.dominates(NoWrapEdge, Result->getParent()))
3445        continue;
3446
3447      for (auto &RU : Result->uses())
3448        if (!DT.dominates(NoWrapEdge, RU))
3449          return false;
3450    }
3451
3452    return true;
3453  };
3454
3455  return any_of(GuardingBranches, AllUsesGuardedByBranch);
3456}
3457
3458
3459OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
3460                                                 const DataLayout &DL,
3461                                                 AssumptionCache *AC,
3462                                                 const Instruction *CxtI,
3463                                                 const DominatorTree *DT) {
3464  return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3465                                       Add, DL, AC, CxtI, DT);
3466}
3467
3468OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
3469                                                 const DataLayout &DL,
3470                                                 AssumptionCache *AC,
3471                                                 const Instruction *CxtI,
3472                                                 const DominatorTree *DT) {
3473  return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3474}
3475
3476bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3477  // A memory operation returns normally if it isn't volatile. A volatile
3478  // operation is allowed to trap.
3479  //
3480  // An atomic operation isn't guaranteed to return in a reasonable amount of
3481  // time because it's possible for another thread to interfere with it for an
3482  // arbitrary length of time, but programs aren't allowed to rely on that.
3483  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3484    return !LI->isVolatile();
3485  if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3486    return !SI->isVolatile();
3487  if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3488    return !CXI->isVolatile();
3489  if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3490    return !RMWI->isVolatile();
3491  if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3492    return !MII->isVolatile();
3493
3494  // If there is no successor, then execution can't transfer to it.
3495  if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3496    return !CRI->unwindsToCaller();
3497  if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3498    return !CatchSwitch->unwindsToCaller();
3499  if (isa<ResumeInst>(I))
3500    return false;
3501  if (isa<ReturnInst>(I))
3502    return false;
3503
3504  // Calls can throw, or contain an infinite loop, or kill the process.
3505  if (CallSite CS = CallSite(const_cast<Instruction*>(I))) {
3506    // Calls which don't write to arbitrary memory are safe.
3507    // FIXME: Ignoring infinite loops without any side-effects is too aggressive,
3508    // but it's consistent with other passes. See http://llvm.org/PR965 .
3509    // FIXME: This isn't aggressive enough; a call which only writes to a
3510    // global is guaranteed to return.
3511    return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3512           match(I, m_Intrinsic<Intrinsic::assume>());
3513  }
3514
3515  // Other instructions return normally.
3516  return true;
3517}
3518
3519bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3520                                                  const Loop *L) {
3521  // The loop header is guaranteed to be executed for every iteration.
3522  //
3523  // FIXME: Relax this constraint to cover all basic blocks that are
3524  // guaranteed to be executed at every iteration.
3525  if (I->getParent() != L->getHeader()) return false;
3526
3527  for (const Instruction &LI : *L->getHeader()) {
3528    if (&LI == I) return true;
3529    if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3530  }
3531  llvm_unreachable("Instruction not contained in its own parent basic block.");
3532}
3533
3534bool llvm::propagatesFullPoison(const Instruction *I) {
3535  switch (I->getOpcode()) {
3536    case Instruction::Add:
3537    case Instruction::Sub:
3538    case Instruction::Xor:
3539    case Instruction::Trunc:
3540    case Instruction::BitCast:
3541    case Instruction::AddrSpaceCast:
3542      // These operations all propagate poison unconditionally. Note that poison
3543      // is not any particular value, so xor or subtraction of poison with
3544      // itself still yields poison, not zero.
3545      return true;
3546
3547    case Instruction::AShr:
3548    case Instruction::SExt:
3549      // For these operations, one bit of the input is replicated across
3550      // multiple output bits. A replicated poison bit is still poison.
3551      return true;
3552
3553    case Instruction::Shl: {
3554      // Left shift *by* a poison value is poison. The number of
3555      // positions to shift is unsigned, so no negative values are
3556      // possible there. Left shift by zero places preserves poison. So
3557      // it only remains to consider left shift of poison by a positive
3558      // number of places.
3559      //
3560      // A left shift by a positive number of places leaves the lowest order bit
3561      // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3562      // make the poison operand violate that flag, yielding a fresh full-poison
3563      // value.
3564      auto *OBO = cast<OverflowingBinaryOperator>(I);
3565      return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3566    }
3567
3568    case Instruction::Mul: {
3569      // A multiplication by zero yields a non-poison zero result, so we need to
3570      // rule out zero as an operand. Conservatively, multiplication by a
3571      // non-zero constant is not multiplication by zero.
3572      //
3573      // Multiplication by a non-zero constant can leave some bits
3574      // non-poisoned. For example, a multiplication by 2 leaves the lowest
3575      // order bit unpoisoned. So we need to consider that.
3576      //
3577      // Multiplication by 1 preserves poison. If the multiplication has a
3578      // no-wrap flag, then we can make the poison operand violate that flag
3579      // when multiplied by any integer other than 0 and 1.
3580      auto *OBO = cast<OverflowingBinaryOperator>(I);
3581      if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3582        for (Value *V : OBO->operands()) {
3583          if (auto *CI = dyn_cast<ConstantInt>(V)) {
3584            // A ConstantInt cannot yield poison, so we can assume that it is
3585            // the other operand that is poison.
3586            return !CI->isZero();
3587          }
3588        }
3589      }
3590      return false;
3591    }
3592
3593    case Instruction::ICmp:
3594      // Comparing poison with any value yields poison.  This is why, for
3595      // instance, x s< (x +nsw 1) can be folded to true.
3596      return true;
3597
3598    case Instruction::GetElementPtr:
3599      // A GEP implicitly represents a sequence of additions, subtractions,
3600      // truncations, sign extensions and multiplications. The multiplications
3601      // are by the non-zero sizes of some set of types, so we do not have to be
3602      // concerned with multiplication by zero. If the GEP is in-bounds, then
3603      // these operations are implicitly no-signed-wrap so poison is propagated
3604      // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3605      return cast<GEPOperator>(I)->isInBounds();
3606
3607    default:
3608      return false;
3609  }
3610}
3611
3612const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3613  switch (I->getOpcode()) {
3614    case Instruction::Store:
3615      return cast<StoreInst>(I)->getPointerOperand();
3616
3617    case Instruction::Load:
3618      return cast<LoadInst>(I)->getPointerOperand();
3619
3620    case Instruction::AtomicCmpXchg:
3621      return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3622
3623    case Instruction::AtomicRMW:
3624      return cast<AtomicRMWInst>(I)->getPointerOperand();
3625
3626    case Instruction::UDiv:
3627    case Instruction::SDiv:
3628    case Instruction::URem:
3629    case Instruction::SRem:
3630      return I->getOperand(1);
3631
3632    default:
3633      return nullptr;
3634  }
3635}
3636
3637bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3638  // We currently only look for uses of poison values within the same basic
3639  // block, as that makes it easier to guarantee that the uses will be
3640  // executed given that PoisonI is executed.
3641  //
3642  // FIXME: Expand this to consider uses beyond the same basic block. To do
3643  // this, look out for the distinction between post-dominance and strong
3644  // post-dominance.
3645  const BasicBlock *BB = PoisonI->getParent();
3646
3647  // Set of instructions that we have proved will yield poison if PoisonI
3648  // does.
3649  SmallSet<const Value *, 16> YieldsPoison;
3650  SmallSet<const BasicBlock *, 4> Visited;
3651  YieldsPoison.insert(PoisonI);
3652  Visited.insert(PoisonI->getParent());
3653
3654  BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3655
3656  unsigned Iter = 0;
3657  while (Iter++ < MaxDepth) {
3658    for (auto &I : make_range(Begin, End)) {
3659      if (&I != PoisonI) {
3660        const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3661        if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3662          return true;
3663        if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3664          return false;
3665      }
3666
3667      // Mark poison that propagates from I through uses of I.
3668      if (YieldsPoison.count(&I)) {
3669        for (const User *User : I.users()) {
3670          const Instruction *UserI = cast<Instruction>(User);
3671          if (propagatesFullPoison(UserI))
3672            YieldsPoison.insert(User);
3673        }
3674      }
3675    }
3676
3677    if (auto *NextBB = BB->getSingleSuccessor()) {
3678      if (Visited.insert(NextBB).second) {
3679        BB = NextBB;
3680        Begin = BB->getFirstNonPHI()->getIterator();
3681        End = BB->end();
3682        continue;
3683      }
3684    }
3685
3686    break;
3687  };
3688  return false;
3689}
3690
3691static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
3692  if (FMF.noNaNs())
3693    return true;
3694
3695  if (auto *C = dyn_cast<ConstantFP>(V))
3696    return !C->isNaN();
3697  return false;
3698}
3699
3700static bool isKnownNonZero(Value *V) {
3701  if (auto *C = dyn_cast<ConstantFP>(V))
3702    return !C->isZero();
3703  return false;
3704}
3705
3706static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3707                                              FastMathFlags FMF,
3708                                              Value *CmpLHS, Value *CmpRHS,
3709                                              Value *TrueVal, Value *FalseVal,
3710                                              Value *&LHS, Value *&RHS) {
3711  LHS = CmpLHS;
3712  RHS = CmpRHS;
3713
3714  // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
3715  // return inconsistent results between implementations.
3716  //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3717  //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3718  // Therefore we behave conservatively and only proceed if at least one of the
3719  // operands is known to not be zero, or if we don't care about signed zeroes.
3720  switch (Pred) {
3721  default: break;
3722  case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3723  case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3724    if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3725        !isKnownNonZero(CmpRHS))
3726      return {SPF_UNKNOWN, SPNB_NA, false};
3727  }
3728
3729  SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3730  bool Ordered = false;
3731
3732  // When given one NaN and one non-NaN input:
3733  //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3734  //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3735  //     ordered comparison fails), which could be NaN or non-NaN.
3736  // so here we discover exactly what NaN behavior is required/accepted.
3737  if (CmpInst::isFPPredicate(Pred)) {
3738    bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3739    bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3740
3741    if (LHSSafe && RHSSafe) {
3742      // Both operands are known non-NaN.
3743      NaNBehavior = SPNB_RETURNS_ANY;
3744    } else if (CmpInst::isOrdered(Pred)) {
3745      // An ordered comparison will return false when given a NaN, so it
3746      // returns the RHS.
3747      Ordered = true;
3748      if (LHSSafe)
3749        // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3750        NaNBehavior = SPNB_RETURNS_NAN;
3751      else if (RHSSafe)
3752        NaNBehavior = SPNB_RETURNS_OTHER;
3753      else
3754        // Completely unsafe.
3755        return {SPF_UNKNOWN, SPNB_NA, false};
3756    } else {
3757      Ordered = false;
3758      // An unordered comparison will return true when given a NaN, so it
3759      // returns the LHS.
3760      if (LHSSafe)
3761        // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
3762        NaNBehavior = SPNB_RETURNS_OTHER;
3763      else if (RHSSafe)
3764        NaNBehavior = SPNB_RETURNS_NAN;
3765      else
3766        // Completely unsafe.
3767        return {SPF_UNKNOWN, SPNB_NA, false};
3768    }
3769  }
3770
3771  if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
3772    std::swap(CmpLHS, CmpRHS);
3773    Pred = CmpInst::getSwappedPredicate(Pred);
3774    if (NaNBehavior == SPNB_RETURNS_NAN)
3775      NaNBehavior = SPNB_RETURNS_OTHER;
3776    else if (NaNBehavior == SPNB_RETURNS_OTHER)
3777      NaNBehavior = SPNB_RETURNS_NAN;
3778    Ordered = !Ordered;
3779  }
3780
3781  // ([if]cmp X, Y) ? X : Y
3782  if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
3783    switch (Pred) {
3784    default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
3785    case ICmpInst::ICMP_UGT:
3786    case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
3787    case ICmpInst::ICMP_SGT:
3788    case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
3789    case ICmpInst::ICMP_ULT:
3790    case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
3791    case ICmpInst::ICMP_SLT:
3792    case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
3793    case FCmpInst::FCMP_UGT:
3794    case FCmpInst::FCMP_UGE:
3795    case FCmpInst::FCMP_OGT:
3796    case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
3797    case FCmpInst::FCMP_ULT:
3798    case FCmpInst::FCMP_ULE:
3799    case FCmpInst::FCMP_OLT:
3800    case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
3801    }
3802  }
3803
3804  if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
3805    if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
3806        (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
3807
3808      // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
3809      // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
3810      if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
3811        return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3812      }
3813
3814      // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
3815      // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
3816      if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
3817        return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3818      }
3819    }
3820
3821    // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C)
3822    if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) {
3823      if (Pred == ICmpInst::ICMP_SGT && C1->getType() == C2->getType() &&
3824          ~C1->getValue() == C2->getValue() &&
3825          (match(TrueVal, m_Not(m_Specific(CmpLHS))) ||
3826           match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
3827        LHS = TrueVal;
3828        RHS = FalseVal;
3829        return {SPF_SMIN, SPNB_NA, false};
3830      }
3831    }
3832  }
3833
3834  // TODO: (X > 4) ? X : 5   -->  (X >= 5) ? X : 5  -->  MAX(X, 5)
3835
3836  return {SPF_UNKNOWN, SPNB_NA, false};
3837}
3838
3839static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
3840                              Instruction::CastOps *CastOp) {
3841  CastInst *CI = dyn_cast<CastInst>(V1);
3842  Constant *C = dyn_cast<Constant>(V2);
3843  if (!CI)
3844    return nullptr;
3845  *CastOp = CI->getOpcode();
3846
3847  if (auto *CI2 = dyn_cast<CastInst>(V2)) {
3848    // If V1 and V2 are both the same cast from the same type, we can look
3849    // through V1.
3850    if (CI2->getOpcode() == CI->getOpcode() &&
3851        CI2->getSrcTy() == CI->getSrcTy())
3852      return CI2->getOperand(0);
3853    return nullptr;
3854  } else if (!C) {
3855    return nullptr;
3856  }
3857
3858  Constant *CastedTo = nullptr;
3859
3860  if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
3861    CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy());
3862
3863  if (isa<SExtInst>(CI) && CmpI->isSigned())
3864    CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy(), true);
3865
3866  if (isa<TruncInst>(CI))
3867    CastedTo = ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
3868
3869  if (isa<FPTruncInst>(CI))
3870    CastedTo = ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
3871
3872  if (isa<FPExtInst>(CI))
3873    CastedTo = ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
3874
3875  if (isa<FPToUIInst>(CI))
3876    CastedTo = ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
3877
3878  if (isa<FPToSIInst>(CI))
3879    CastedTo = ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
3880
3881  if (isa<UIToFPInst>(CI))
3882    CastedTo = ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
3883
3884  if (isa<SIToFPInst>(CI))
3885    CastedTo = ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
3886
3887  if (!CastedTo)
3888    return nullptr;
3889
3890  Constant *CastedBack =
3891      ConstantExpr::getCast(CI->getOpcode(), CastedTo, C->getType(), true);
3892  // Make sure the cast doesn't lose any information.
3893  if (CastedBack != C)
3894    return nullptr;
3895
3896  return CastedTo;
3897}
3898
3899SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
3900                                             Instruction::CastOps *CastOp) {
3901  SelectInst *SI = dyn_cast<SelectInst>(V);
3902  if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
3903
3904  CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
3905  if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
3906
3907  CmpInst::Predicate Pred = CmpI->getPredicate();
3908  Value *CmpLHS = CmpI->getOperand(0);
3909  Value *CmpRHS = CmpI->getOperand(1);
3910  Value *TrueVal = SI->getTrueValue();
3911  Value *FalseVal = SI->getFalseValue();
3912  FastMathFlags FMF;
3913  if (isa<FPMathOperator>(CmpI))
3914    FMF = CmpI->getFastMathFlags();
3915
3916  // Bail out early.
3917  if (CmpI->isEquality())
3918    return {SPF_UNKNOWN, SPNB_NA, false};
3919
3920  // Deal with type mismatches.
3921  if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
3922    if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
3923      return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3924                                  cast<CastInst>(TrueVal)->getOperand(0), C,
3925                                  LHS, RHS);
3926    if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
3927      return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3928                                  C, cast<CastInst>(FalseVal)->getOperand(0),
3929                                  LHS, RHS);
3930  }
3931  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
3932                              LHS, RHS);
3933}
3934
3935ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) {
3936  const unsigned NumRanges = Ranges.getNumOperands() / 2;
3937  assert(NumRanges >= 1 && "Must have at least one range!");
3938  assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
3939
3940  auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
3941  auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
3942
3943  ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
3944
3945  for (unsigned i = 1; i < NumRanges; ++i) {
3946    auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
3947    auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
3948
3949    // Note: unionWith will potentially create a range that contains values not
3950    // contained in any of the original N ranges.
3951    CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
3952  }
3953
3954  return CR;
3955}
3956
3957/// Return true if "icmp Pred LHS RHS" is always true.
3958static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
3959                            const DataLayout &DL, unsigned Depth,
3960                            AssumptionCache *AC, const Instruction *CxtI,
3961                            const DominatorTree *DT) {
3962  assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
3963  if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
3964    return true;
3965
3966  switch (Pred) {
3967  default:
3968    return false;
3969
3970  case CmpInst::ICMP_SLE: {
3971    const APInt *C;
3972
3973    // LHS s<= LHS +_{nsw} C   if C >= 0
3974    if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
3975      return !C->isNegative();
3976    return false;
3977  }
3978
3979  case CmpInst::ICMP_ULE: {
3980    const APInt *C;
3981
3982    // LHS u<= LHS +_{nuw} C   for any C
3983    if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
3984      return true;
3985
3986    // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
3987    auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X,
3988                                       const APInt *&CA, const APInt *&CB) {
3989      if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
3990          match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
3991        return true;
3992
3993      // If X & C == 0 then (X | C) == X +_{nuw} C
3994      if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
3995          match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
3996        unsigned BitWidth = CA->getBitWidth();
3997        APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3998        computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
3999
4000        if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
4001          return true;
4002      }
4003
4004      return false;
4005    };
4006
4007    Value *X;
4008    const APInt *CLHS, *CRHS;
4009    if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4010      return CLHS->ule(*CRHS);
4011
4012    return false;
4013  }
4014  }
4015}
4016
4017/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4018/// ALHS ARHS" is true.  Otherwise, return None.
4019static Optional<bool>
4020isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS, Value *ARHS,
4021                      Value *BLHS, Value *BRHS, const DataLayout &DL,
4022                      unsigned Depth, AssumptionCache *AC,
4023                      const Instruction *CxtI, const DominatorTree *DT) {
4024  switch (Pred) {
4025  default:
4026    return None;
4027
4028  case CmpInst::ICMP_SLT:
4029  case CmpInst::ICMP_SLE:
4030    if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4031                        DT) &&
4032        isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4033      return true;
4034    return None;
4035
4036  case CmpInst::ICMP_ULT:
4037  case CmpInst::ICMP_ULE:
4038    if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4039                        DT) &&
4040        isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4041      return true;
4042    return None;
4043  }
4044}
4045
4046/// Return true if the operands of the two compares match.  IsSwappedOps is true
4047/// when the operands match, but are swapped.
4048static bool isMatchingOps(Value *ALHS, Value *ARHS, Value *BLHS, Value *BRHS,
4049                          bool &IsSwappedOps) {
4050
4051  bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4052  IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4053  return IsMatchingOps || IsSwappedOps;
4054}
4055
4056/// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4057/// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4058/// BRHS" is false.  Otherwise, return None if we can't infer anything.
4059static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4060                                                    Value *ALHS, Value *ARHS,
4061                                                    CmpInst::Predicate BPred,
4062                                                    Value *BLHS, Value *BRHS,
4063                                                    bool IsSwappedOps) {
4064  // Canonicalize the operands so they're matching.
4065  if (IsSwappedOps) {
4066    std::swap(BLHS, BRHS);
4067    BPred = ICmpInst::getSwappedPredicate(BPred);
4068  }
4069  if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4070    return true;
4071  if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4072    return false;
4073
4074  return None;
4075}
4076
4077/// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4078/// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4079/// C2" is false.  Otherwise, return None if we can't infer anything.
4080static Optional<bool>
4081isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, Value *ALHS,
4082                                 ConstantInt *C1, CmpInst::Predicate BPred,
4083                                 Value *BLHS, ConstantInt *C2) {
4084  assert(ALHS == BLHS && "LHS operands must match.");
4085  ConstantRange DomCR =
4086      ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4087  ConstantRange CR =
4088      ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4089  ConstantRange Intersection = DomCR.intersectWith(CR);
4090  ConstantRange Difference = DomCR.difference(CR);
4091  if (Intersection.isEmptySet())
4092    return false;
4093  if (Difference.isEmptySet())
4094    return true;
4095  return None;
4096}
4097
4098Optional<bool> llvm::isImpliedCondition(Value *LHS, Value *RHS,
4099                                        const DataLayout &DL, bool InvertAPred,
4100                                        unsigned Depth, AssumptionCache *AC,
4101                                        const Instruction *CxtI,
4102                                        const DominatorTree *DT) {
4103  // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4104  if (LHS->getType() != RHS->getType())
4105    return None;
4106
4107  Type *OpTy = LHS->getType();
4108  assert(OpTy->getScalarType()->isIntegerTy(1));
4109
4110  // LHS ==> RHS by definition
4111  if (!InvertAPred && LHS == RHS)
4112    return true;
4113
4114  if (OpTy->isVectorTy())
4115    // TODO: extending the code below to handle vectors
4116    return None;
4117  assert(OpTy->isIntegerTy(1) && "implied by above");
4118
4119  ICmpInst::Predicate APred, BPred;
4120  Value *ALHS, *ARHS;
4121  Value *BLHS, *BRHS;
4122
4123  if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4124      !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4125    return None;
4126
4127  if (InvertAPred)
4128    APred = CmpInst::getInversePredicate(APred);
4129
4130  // Can we infer anything when the two compares have matching operands?
4131  bool IsSwappedOps;
4132  if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4133    if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4134            APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4135      return Implication;
4136    // No amount of additional analysis will infer the second condition, so
4137    // early exit.
4138    return None;
4139  }
4140
4141  // Can we infer anything when the LHS operands match and the RHS operands are
4142  // constants (not necessarily matching)?
4143  if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4144    if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4145            APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4146            cast<ConstantInt>(BRHS)))
4147      return Implication;
4148    // No amount of additional analysis will infer the second condition, so
4149    // early exit.
4150    return None;
4151  }
4152
4153  if (APred == BPred)
4154    return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
4155                                 CxtI, DT);
4156
4157  return None;
4158}
4159