Analysis.cpp revision 5eb6d65a27fd77a0bf10bd49f5cccb9f1796d98b
1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities --*- C++ ------*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines several CodeGen-specific LLVM IR analysis utilties. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/CodeGen/Analysis.h" 15#include "llvm/DerivedTypes.h" 16#include "llvm/Function.h" 17#include "llvm/Instructions.h" 18#include "llvm/IntrinsicInst.h" 19#include "llvm/LLVMContext.h" 20#include "llvm/Module.h" 21#include "llvm/CodeGen/MachineFunction.h" 22#include "llvm/Target/TargetData.h" 23#include "llvm/Target/TargetLowering.h" 24#include "llvm/Target/TargetOptions.h" 25#include "llvm/Support/ErrorHandling.h" 26#include "llvm/Support/MathExtras.h" 27using namespace llvm; 28 29/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence 30/// of insertvalue or extractvalue indices that identify a member, return 31/// the linearized index of the start of the member. 32/// 33unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty, 34 const unsigned *Indices, 35 const unsigned *IndicesEnd, 36 unsigned CurIndex) { 37 // Base case: We're done. 38 if (Indices && Indices == IndicesEnd) 39 return CurIndex; 40 41 // Given a struct type, recursively traverse the elements. 42 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 43 for (StructType::element_iterator EB = STy->element_begin(), 44 EI = EB, 45 EE = STy->element_end(); 46 EI != EE; ++EI) { 47 if (Indices && *Indices == unsigned(EI - EB)) 48 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex); 49 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex); 50 } 51 return CurIndex; 52 } 53 // Given an array type, recursively traverse the elements. 54 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 55 const Type *EltTy = ATy->getElementType(); 56 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) { 57 if (Indices && *Indices == i) 58 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex); 59 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex); 60 } 61 return CurIndex; 62 } 63 // We haven't found the type we're looking for, so keep searching. 64 return CurIndex + 1; 65} 66 67/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of 68/// EVTs that represent all the individual underlying 69/// non-aggregate types that comprise it. 70/// 71/// If Offsets is non-null, it points to a vector to be filled in 72/// with the in-memory offsets of each of the individual values. 73/// 74void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty, 75 SmallVectorImpl<EVT> &ValueVTs, 76 SmallVectorImpl<uint64_t> *Offsets, 77 uint64_t StartingOffset) { 78 // Given a struct type, recursively traverse the elements. 79 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 80 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy); 81 for (StructType::element_iterator EB = STy->element_begin(), 82 EI = EB, 83 EE = STy->element_end(); 84 EI != EE; ++EI) 85 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, 86 StartingOffset + SL->getElementOffset(EI - EB)); 87 return; 88 } 89 // Given an array type, recursively traverse the elements. 90 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 91 const Type *EltTy = ATy->getElementType(); 92 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); 93 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 94 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, 95 StartingOffset + i * EltSize); 96 return; 97 } 98 // Interpret void as zero return values. 99 if (Ty->isVoidTy()) 100 return; 101 // Base case: we can get an EVT for this LLVM IR type. 102 ValueVTs.push_back(TLI.getValueType(Ty)); 103 if (Offsets) 104 Offsets->push_back(StartingOffset); 105} 106 107/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. 108GlobalVariable *llvm::ExtractTypeInfo(Value *V) { 109 V = V->stripPointerCasts(); 110 GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 111 112 if (GV && GV->getName() == ".llvm.eh.catch.all.value") { 113 assert(GV->hasInitializer() && 114 "The EH catch-all value must have an initializer"); 115 Value *Init = GV->getInitializer(); 116 GV = dyn_cast<GlobalVariable>(Init); 117 if (!GV) V = cast<ConstantPointerNull>(Init); 118 } 119 120 assert((GV || isa<ConstantPointerNull>(V)) && 121 "TypeInfo must be a global variable or NULL"); 122 return GV; 123} 124 125/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being 126/// processed uses a memory 'm' constraint. 127bool 128llvm::hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos, 129 const TargetLowering &TLI) { 130 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { 131 InlineAsm::ConstraintInfo &CI = CInfos[i]; 132 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { 133 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]); 134 if (CType == TargetLowering::C_Memory) 135 return true; 136 } 137 138 // Indirect operand accesses access memory. 139 if (CI.isIndirect) 140 return true; 141 } 142 143 return false; 144} 145 146/// getFCmpCondCode - Return the ISD condition code corresponding to 147/// the given LLVM IR floating-point condition code. This includes 148/// consideration of global floating-point math flags. 149/// 150ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { 151 ISD::CondCode FPC, FOC; 152 switch (Pred) { 153 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 154 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 155 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 156 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 157 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 158 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 159 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 160 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break; 161 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break; 162 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 163 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 164 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 165 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 166 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 167 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 168 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 169 default: 170 llvm_unreachable("Invalid FCmp predicate opcode!"); 171 FOC = FPC = ISD::SETFALSE; 172 break; 173 } 174 if (FiniteOnlyFPMath()) 175 return FOC; 176 else 177 return FPC; 178} 179 180/// getICmpCondCode - Return the ISD condition code corresponding to 181/// the given LLVM IR integer condition code. 182/// 183ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { 184 switch (Pred) { 185 case ICmpInst::ICMP_EQ: return ISD::SETEQ; 186 case ICmpInst::ICMP_NE: return ISD::SETNE; 187 case ICmpInst::ICMP_SLE: return ISD::SETLE; 188 case ICmpInst::ICMP_ULE: return ISD::SETULE; 189 case ICmpInst::ICMP_SGE: return ISD::SETGE; 190 case ICmpInst::ICMP_UGE: return ISD::SETUGE; 191 case ICmpInst::ICMP_SLT: return ISD::SETLT; 192 case ICmpInst::ICMP_ULT: return ISD::SETULT; 193 case ICmpInst::ICMP_SGT: return ISD::SETGT; 194 case ICmpInst::ICMP_UGT: return ISD::SETUGT; 195 default: 196 llvm_unreachable("Invalid ICmp predicate opcode!"); 197 return ISD::SETNE; 198 } 199} 200 201/// Test if the given instruction is in a position to be optimized 202/// with a tail-call. This roughly means that it's in a block with 203/// a return and there's nothing that needs to be scheduled 204/// between it and the return. 205/// 206/// This function only tests target-independent requirements. 207bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, 208 const TargetLowering &TLI) { 209 const Instruction *I = CS.getInstruction(); 210 const BasicBlock *ExitBB = I->getParent(); 211 const TerminatorInst *Term = ExitBB->getTerminator(); 212 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); 213 const Function *F = ExitBB->getParent(); 214 215 // The block must end in a return statement or unreachable. 216 // 217 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in 218 // an unreachable, for now. The way tailcall optimization is currently 219 // implemented means it will add an epilogue followed by a jump. That is 220 // not profitable. Also, if the callee is a special function (e.g. 221 // longjmp on x86), it can end up causing miscompilation that has not 222 // been fully understood. 223 if (!Ret && 224 (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false; 225 226 // If I will have a chain, make sure no other instruction that will have a 227 // chain interposes between I and the return. 228 if (I->mayHaveSideEffects() || I->mayReadFromMemory() || 229 !I->isSafeToSpeculativelyExecute()) 230 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; 231 --BBI) { 232 if (&*BBI == I) 233 break; 234 // Debug info intrinsics do not get in the way of tail call optimization. 235 if (isa<DbgInfoIntrinsic>(BBI)) 236 continue; 237 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || 238 !BBI->isSafeToSpeculativelyExecute()) 239 return false; 240 } 241 242 // If the block ends with a void return or unreachable, it doesn't matter 243 // what the call's return type is. 244 if (!Ret || Ret->getNumOperands() == 0) return true; 245 246 // If the return value is undef, it doesn't matter what the call's 247 // return type is. 248 if (isa<UndefValue>(Ret->getOperand(0))) return true; 249 250 // Conservatively require the attributes of the call to match those of 251 // the return. Ignore noalias because it doesn't affect the call sequence. 252 unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); 253 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) 254 return false; 255 256 // It's not safe to eliminate the sign / zero extension of the return value. 257 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) 258 return false; 259 260 // Otherwise, make sure the unmodified return value of I is the return value. 261 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ; 262 U = dyn_cast<Instruction>(U->getOperand(0))) { 263 if (!U) 264 return false; 265 if (!U->hasOneUse()) 266 return false; 267 if (U == I) 268 break; 269 // Check for a truly no-op truncate. 270 if (isa<TruncInst>(U) && 271 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType())) 272 continue; 273 // Check for a truly no-op bitcast. 274 if (isa<BitCastInst>(U) && 275 (U->getOperand(0)->getType() == U->getType() || 276 (U->getOperand(0)->getType()->isPointerTy() && 277 U->getType()->isPointerTy()))) 278 continue; 279 // Otherwise it's not a true no-op. 280 return false; 281 } 282 283 return true; 284} 285 286