Analysis.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines several CodeGen-specific LLVM IR analysis utilties.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/Analysis.h"
15#include "llvm/Analysis/ValueTracking.h"
16#include "llvm/CodeGen/MachineFunction.h"
17#include "llvm/IR/DataLayout.h"
18#include "llvm/IR/DerivedTypes.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/LLVMContext.h"
23#include "llvm/IR/Module.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Target/TargetLowering.h"
27using namespace llvm;
28
29/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
30/// of insertvalue or extractvalue indices that identify a member, return
31/// the linearized index of the start of the member.
32///
33unsigned llvm::ComputeLinearIndex(Type *Ty,
34                                  const unsigned *Indices,
35                                  const unsigned *IndicesEnd,
36                                  unsigned CurIndex) {
37  // Base case: We're done.
38  if (Indices && Indices == IndicesEnd)
39    return CurIndex;
40
41  // Given a struct type, recursively traverse the elements.
42  if (StructType *STy = dyn_cast<StructType>(Ty)) {
43    for (StructType::element_iterator EB = STy->element_begin(),
44                                      EI = EB,
45                                      EE = STy->element_end();
46        EI != EE; ++EI) {
47      if (Indices && *Indices == unsigned(EI - EB))
48        return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
49      CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
50    }
51    return CurIndex;
52  }
53  // Given an array type, recursively traverse the elements.
54  else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
55    Type *EltTy = ATy->getElementType();
56    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
57      if (Indices && *Indices == i)
58        return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
59      CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
60    }
61    return CurIndex;
62  }
63  // We haven't found the type we're looking for, so keep searching.
64  return CurIndex + 1;
65}
66
67/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
68/// EVTs that represent all the individual underlying
69/// non-aggregate types that comprise it.
70///
71/// If Offsets is non-null, it points to a vector to be filled in
72/// with the in-memory offsets of each of the individual values.
73///
74void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
75                           SmallVectorImpl<EVT> &ValueVTs,
76                           SmallVectorImpl<uint64_t> *Offsets,
77                           uint64_t StartingOffset) {
78  // Given a struct type, recursively traverse the elements.
79  if (StructType *STy = dyn_cast<StructType>(Ty)) {
80    const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
81    for (StructType::element_iterator EB = STy->element_begin(),
82                                      EI = EB,
83                                      EE = STy->element_end();
84         EI != EE; ++EI)
85      ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
86                      StartingOffset + SL->getElementOffset(EI - EB));
87    return;
88  }
89  // Given an array type, recursively traverse the elements.
90  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
91    Type *EltTy = ATy->getElementType();
92    uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
93    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
94      ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
95                      StartingOffset + i * EltSize);
96    return;
97  }
98  // Interpret void as zero return values.
99  if (Ty->isVoidTy())
100    return;
101  // Base case: we can get an EVT for this LLVM IR type.
102  ValueVTs.push_back(TLI.getValueType(Ty));
103  if (Offsets)
104    Offsets->push_back(StartingOffset);
105}
106
107/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
108GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
109  V = V->stripPointerCasts();
110  GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
111
112  if (GV && GV->getName() == "llvm.eh.catch.all.value") {
113    assert(GV->hasInitializer() &&
114           "The EH catch-all value must have an initializer");
115    Value *Init = GV->getInitializer();
116    GV = dyn_cast<GlobalVariable>(Init);
117    if (!GV) V = cast<ConstantPointerNull>(Init);
118  }
119
120  assert((GV || isa<ConstantPointerNull>(V)) &&
121         "TypeInfo must be a global variable or NULL");
122  return GV;
123}
124
125/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
126/// processed uses a memory 'm' constraint.
127bool
128llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
129                                const TargetLowering &TLI) {
130  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
131    InlineAsm::ConstraintInfo &CI = CInfos[i];
132    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
133      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
134      if (CType == TargetLowering::C_Memory)
135        return true;
136    }
137
138    // Indirect operand accesses access memory.
139    if (CI.isIndirect)
140      return true;
141  }
142
143  return false;
144}
145
146/// getFCmpCondCode - Return the ISD condition code corresponding to
147/// the given LLVM IR floating-point condition code.  This includes
148/// consideration of global floating-point math flags.
149///
150ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
151  switch (Pred) {
152  case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
153  case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
154  case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
155  case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
156  case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
157  case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
158  case FCmpInst::FCMP_ONE:   return ISD::SETONE;
159  case FCmpInst::FCMP_ORD:   return ISD::SETO;
160  case FCmpInst::FCMP_UNO:   return ISD::SETUO;
161  case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
162  case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
163  case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
164  case FCmpInst::FCMP_ULT:   return ISD::SETULT;
165  case FCmpInst::FCMP_ULE:   return ISD::SETULE;
166  case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
167  case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
168  default: llvm_unreachable("Invalid FCmp predicate opcode!");
169  }
170}
171
172ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
173  switch (CC) {
174    case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
175    case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
176    case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
177    case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
178    case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
179    case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
180    default: return CC;
181  }
182}
183
184/// getICmpCondCode - Return the ISD condition code corresponding to
185/// the given LLVM IR integer condition code.
186///
187ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
188  switch (Pred) {
189  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
190  case ICmpInst::ICMP_NE:  return ISD::SETNE;
191  case ICmpInst::ICMP_SLE: return ISD::SETLE;
192  case ICmpInst::ICMP_ULE: return ISD::SETULE;
193  case ICmpInst::ICMP_SGE: return ISD::SETGE;
194  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
195  case ICmpInst::ICMP_SLT: return ISD::SETLT;
196  case ICmpInst::ICMP_ULT: return ISD::SETULT;
197  case ICmpInst::ICMP_SGT: return ISD::SETGT;
198  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
199  default:
200    llvm_unreachable("Invalid ICmp predicate opcode!");
201  }
202}
203
204static bool isNoopBitcast(Type *T1, Type *T2,
205                          const TargetLoweringBase& TLI) {
206  return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
207         (isa<VectorType>(T1) && isa<VectorType>(T2) &&
208          TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
209}
210
211/// Look through operations that will be free to find the earliest source of
212/// this value.
213///
214/// @param ValLoc If V has aggegate type, we will be interested in a particular
215/// scalar component. This records its address; the reverse of this list gives a
216/// sequence of indices appropriate for an extractvalue to locate the important
217/// value. This value is updated during the function and on exit will indicate
218/// similar information for the Value returned.
219///
220/// @param DataBits If this function looks through truncate instructions, this
221/// will record the smallest size attained.
222static const Value *getNoopInput(const Value *V,
223                                 SmallVectorImpl<unsigned> &ValLoc,
224                                 unsigned &DataBits,
225                                 const TargetLoweringBase &TLI) {
226  while (true) {
227    // Try to look through V1; if V1 is not an instruction, it can't be looked
228    // through.
229    const Instruction *I = dyn_cast<Instruction>(V);
230    if (!I || I->getNumOperands() == 0) return V;
231    const Value *NoopInput = 0;
232
233    Value *Op = I->getOperand(0);
234    if (isa<BitCastInst>(I)) {
235      // Look through truly no-op bitcasts.
236      if (isNoopBitcast(Op->getType(), I->getType(), TLI))
237        NoopInput = Op;
238    } else if (isa<GetElementPtrInst>(I)) {
239      // Look through getelementptr
240      if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
241        NoopInput = Op;
242    } else if (isa<IntToPtrInst>(I)) {
243      // Look through inttoptr.
244      // Make sure this isn't a truncating or extending cast.  We could
245      // support this eventually, but don't bother for now.
246      if (!isa<VectorType>(I->getType()) &&
247          TLI.getPointerTy().getSizeInBits() ==
248          cast<IntegerType>(Op->getType())->getBitWidth())
249        NoopInput = Op;
250    } else if (isa<PtrToIntInst>(I)) {
251      // Look through ptrtoint.
252      // Make sure this isn't a truncating or extending cast.  We could
253      // support this eventually, but don't bother for now.
254      if (!isa<VectorType>(I->getType()) &&
255          TLI.getPointerTy().getSizeInBits() ==
256          cast<IntegerType>(I->getType())->getBitWidth())
257        NoopInput = Op;
258    } else if (isa<TruncInst>(I) &&
259               TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
260      DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
261      NoopInput = Op;
262    } else if (isa<CallInst>(I)) {
263      // Look through call (skipping callee)
264      for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
265           i != e; ++i) {
266        unsigned attrInd = i - I->op_begin() + 1;
267        if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
268            isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
269          NoopInput = *i;
270          break;
271        }
272      }
273    } else if (isa<InvokeInst>(I)) {
274      // Look through invoke (skipping BB, BB, Callee)
275      for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
276           i != e; ++i) {
277        unsigned attrInd = i - I->op_begin() + 1;
278        if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
279            isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
280          NoopInput = *i;
281          break;
282        }
283      }
284    } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
285      // Value may come from either the aggregate or the scalar
286      ArrayRef<unsigned> InsertLoc = IVI->getIndices();
287      if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(),
288                     ValLoc.rbegin())) {
289        // The type being inserted is a nested sub-type of the aggregate; we
290        // have to remove those initial indices to get the location we're
291        // interested in for the operand.
292        ValLoc.resize(ValLoc.size() - InsertLoc.size());
293        NoopInput = IVI->getInsertedValueOperand();
294      } else {
295        // The struct we're inserting into has the value we're interested in, no
296        // change of address.
297        NoopInput = Op;
298      }
299    } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
300      // The part we're interested in will inevitably be some sub-section of the
301      // previous aggregate. Combine the two paths to obtain the true address of
302      // our element.
303      ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
304      std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(),
305                std::back_inserter(ValLoc));
306      NoopInput = Op;
307    }
308    // Terminate if we couldn't find anything to look through.
309    if (!NoopInput)
310      return V;
311
312    V = NoopInput;
313  }
314}
315
316/// Return true if this scalar return value only has bits discarded on its path
317/// from the "tail call" to the "ret". This includes the obvious noop
318/// instructions handled by getNoopInput above as well as free truncations (or
319/// extensions prior to the call).
320static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
321                                 SmallVectorImpl<unsigned> &RetIndices,
322                                 SmallVectorImpl<unsigned> &CallIndices,
323                                 bool AllowDifferingSizes,
324                                 const TargetLoweringBase &TLI) {
325
326  // Trace the sub-value needed by the return value as far back up the graph as
327  // possible, in the hope that it will intersect with the value produced by the
328  // call. In the simple case with no "returned" attribute, the hope is actually
329  // that we end up back at the tail call instruction itself.
330  unsigned BitsRequired = UINT_MAX;
331  RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI);
332
333  // If this slot in the value returned is undef, it doesn't matter what the
334  // call puts there, it'll be fine.
335  if (isa<UndefValue>(RetVal))
336    return true;
337
338  // Now do a similar search up through the graph to find where the value
339  // actually returned by the "tail call" comes from. In the simple case without
340  // a "returned" attribute, the search will be blocked immediately and the loop
341  // a Noop.
342  unsigned BitsProvided = UINT_MAX;
343  CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI);
344
345  // There's no hope if we can't actually trace them to (the same part of!) the
346  // same value.
347  if (CallVal != RetVal || CallIndices != RetIndices)
348    return false;
349
350  // However, intervening truncates may have made the call non-tail. Make sure
351  // all the bits that are needed by the "ret" have been provided by the "tail
352  // call". FIXME: with sufficiently cunning bit-tracking, we could look through
353  // extensions too.
354  if (BitsProvided < BitsRequired ||
355      (!AllowDifferingSizes && BitsProvided != BitsRequired))
356    return false;
357
358  return true;
359}
360
361/// For an aggregate type, determine whether a given index is within bounds or
362/// not.
363static bool indexReallyValid(CompositeType *T, unsigned Idx) {
364  if (ArrayType *AT = dyn_cast<ArrayType>(T))
365    return Idx < AT->getNumElements();
366
367  return Idx < cast<StructType>(T)->getNumElements();
368}
369
370/// Move the given iterators to the next leaf type in depth first traversal.
371///
372/// Performs a depth-first traversal of the type as specified by its arguments,
373/// stopping at the next leaf node (which may be a legitimate scalar type or an
374/// empty struct or array).
375///
376/// @param SubTypes List of the partial components making up the type from
377/// outermost to innermost non-empty aggregate. The element currently
378/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
379///
380/// @param Path Set of extractvalue indices leading from the outermost type
381/// (SubTypes[0]) to the leaf node currently represented.
382///
383/// @returns true if a new type was found, false otherwise. Calling this
384/// function again on a finished iterator will repeatedly return
385/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
386/// aggregate or a non-aggregate
387static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
388                                  SmallVectorImpl<unsigned> &Path) {
389  // First march back up the tree until we can successfully increment one of the
390  // coordinates in Path.
391  while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
392    Path.pop_back();
393    SubTypes.pop_back();
394  }
395
396  // If we reached the top, then the iterator is done.
397  if (Path.empty())
398    return false;
399
400  // We know there's *some* valid leaf now, so march back down the tree picking
401  // out the left-most element at each node.
402  ++Path.back();
403  Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
404  while (DeeperType->isAggregateType()) {
405    CompositeType *CT = cast<CompositeType>(DeeperType);
406    if (!indexReallyValid(CT, 0))
407      return true;
408
409    SubTypes.push_back(CT);
410    Path.push_back(0);
411
412    DeeperType = CT->getTypeAtIndex(0U);
413  }
414
415  return true;
416}
417
418/// Find the first non-empty, scalar-like type in Next and setup the iterator
419/// components.
420///
421/// Assuming Next is an aggregate of some kind, this function will traverse the
422/// tree from left to right (i.e. depth-first) looking for the first
423/// non-aggregate type which will play a role in function return.
424///
425/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
426/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
427/// i32 in that type.
428static bool firstRealType(Type *Next,
429                          SmallVectorImpl<CompositeType *> &SubTypes,
430                          SmallVectorImpl<unsigned> &Path) {
431  // First initialise the iterator components to the first "leaf" node
432  // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
433  // despite nominally being an aggregate).
434  while (Next->isAggregateType() &&
435         indexReallyValid(cast<CompositeType>(Next), 0)) {
436    SubTypes.push_back(cast<CompositeType>(Next));
437    Path.push_back(0);
438    Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
439  }
440
441  // If there's no Path now, Next was originally scalar already (or empty
442  // leaf). We're done.
443  if (Path.empty())
444    return true;
445
446  // Otherwise, use normal iteration to keep looking through the tree until we
447  // find a non-aggregate type.
448  while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
449    if (!advanceToNextLeafType(SubTypes, Path))
450      return false;
451  }
452
453  return true;
454}
455
456/// Set the iterator data-structures to the next non-empty, non-aggregate
457/// subtype.
458static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
459                         SmallVectorImpl<unsigned> &Path) {
460  do {
461    if (!advanceToNextLeafType(SubTypes, Path))
462      return false;
463
464    assert(!Path.empty() && "found a leaf but didn't set the path?");
465  } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
466
467  return true;
468}
469
470
471/// Test if the given instruction is in a position to be optimized
472/// with a tail-call. This roughly means that it's in a block with
473/// a return and there's nothing that needs to be scheduled
474/// between it and the return.
475///
476/// This function only tests target-independent requirements.
477bool llvm::isInTailCallPosition(ImmutableCallSite CS,
478                                const TargetLowering &TLI) {
479  const Instruction *I = CS.getInstruction();
480  const BasicBlock *ExitBB = I->getParent();
481  const TerminatorInst *Term = ExitBB->getTerminator();
482  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
483
484  // The block must end in a return statement or unreachable.
485  //
486  // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
487  // an unreachable, for now. The way tailcall optimization is currently
488  // implemented means it will add an epilogue followed by a jump. That is
489  // not profitable. Also, if the callee is a special function (e.g.
490  // longjmp on x86), it can end up causing miscompilation that has not
491  // been fully understood.
492  if (!Ret &&
493      (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt ||
494       !isa<UnreachableInst>(Term)))
495    return false;
496
497  // If I will have a chain, make sure no other instruction that will have a
498  // chain interposes between I and the return.
499  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
500      !isSafeToSpeculativelyExecute(I))
501    for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
502      if (&*BBI == I)
503        break;
504      // Debug info intrinsics do not get in the way of tail call optimization.
505      if (isa<DbgInfoIntrinsic>(BBI))
506        continue;
507      if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
508          !isSafeToSpeculativelyExecute(BBI))
509        return false;
510    }
511
512  return returnTypeIsEligibleForTailCall(ExitBB->getParent(), I, Ret, TLI);
513}
514
515bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
516                                           const Instruction *I,
517                                           const ReturnInst *Ret,
518                                           const TargetLoweringBase &TLI) {
519  // If the block ends with a void return or unreachable, it doesn't matter
520  // what the call's return type is.
521  if (!Ret || Ret->getNumOperands() == 0) return true;
522
523  // If the return value is undef, it doesn't matter what the call's
524  // return type is.
525  if (isa<UndefValue>(Ret->getOperand(0))) return true;
526
527  // Make sure the attributes attached to each return are compatible.
528  AttrBuilder CallerAttrs(F->getAttributes(),
529                          AttributeSet::ReturnIndex);
530  AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
531                          AttributeSet::ReturnIndex);
532
533  // Noalias is completely benign as far as calling convention goes, it
534  // shouldn't affect whether the call is a tail call.
535  CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
536  CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
537
538  bool AllowDifferingSizes = true;
539  if (CallerAttrs.contains(Attribute::ZExt)) {
540    if (!CalleeAttrs.contains(Attribute::ZExt))
541      return false;
542
543    AllowDifferingSizes = false;
544    CallerAttrs.removeAttribute(Attribute::ZExt);
545    CalleeAttrs.removeAttribute(Attribute::ZExt);
546  } else if (CallerAttrs.contains(Attribute::SExt)) {
547    if (!CalleeAttrs.contains(Attribute::SExt))
548      return false;
549
550    AllowDifferingSizes = false;
551    CallerAttrs.removeAttribute(Attribute::SExt);
552    CalleeAttrs.removeAttribute(Attribute::SExt);
553  }
554
555  // If they're still different, there's some facet we don't understand
556  // (currently only "inreg", but in future who knows). It may be OK but the
557  // only safe option is to reject the tail call.
558  if (CallerAttrs != CalleeAttrs)
559    return false;
560
561  const Value *RetVal = Ret->getOperand(0), *CallVal = I;
562  SmallVector<unsigned, 4> RetPath, CallPath;
563  SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
564
565  bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
566  bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
567
568  // Nothing's actually returned, it doesn't matter what the callee put there
569  // it's a valid tail call.
570  if (RetEmpty)
571    return true;
572
573  // Iterate pairwise through each of the value types making up the tail call
574  // and the corresponding return. For each one we want to know whether it's
575  // essentially going directly from the tail call to the ret, via operations
576  // that end up not generating any code.
577  //
578  // We allow a certain amount of covariance here. For example it's permitted
579  // for the tail call to define more bits than the ret actually cares about
580  // (e.g. via a truncate).
581  do {
582    if (CallEmpty) {
583      // We've exhausted the values produced by the tail call instruction, the
584      // rest are essentially undef. The type doesn't really matter, but we need
585      // *something*.
586      Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
587      CallVal = UndefValue::get(SlotType);
588    }
589
590    // The manipulations performed when we're looking through an insertvalue or
591    // an extractvalue would happen at the front of the RetPath list, so since
592    // we have to copy it anyway it's more efficient to create a reversed copy.
593    using std::copy;
594    SmallVector<unsigned, 4> TmpRetPath, TmpCallPath;
595    copy(RetPath.rbegin(), RetPath.rend(), std::back_inserter(TmpRetPath));
596    copy(CallPath.rbegin(), CallPath.rend(), std::back_inserter(TmpCallPath));
597
598    // Finally, we can check whether the value produced by the tail call at this
599    // index is compatible with the value we return.
600    if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
601                              AllowDifferingSizes, TLI))
602      return false;
603
604    CallEmpty  = !nextRealType(CallSubTypes, CallPath);
605  } while(nextRealType(RetSubTypes, RetPath));
606
607  return true;
608}
609