Analysis.cpp revision f0426601977c3e386d2d26c72a2cca691dc42072
1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines several CodeGen-specific LLVM IR analysis utilties. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/CodeGen/Analysis.h" 15#include "llvm/Analysis/ValueTracking.h" 16#include "llvm/DerivedTypes.h" 17#include "llvm/Function.h" 18#include "llvm/Instructions.h" 19#include "llvm/IntrinsicInst.h" 20#include "llvm/LLVMContext.h" 21#include "llvm/Module.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/SelectionDAG.h" 24#include "llvm/Target/TargetData.h" 25#include "llvm/Target/TargetLowering.h" 26#include "llvm/Target/TargetOptions.h" 27#include "llvm/Support/ErrorHandling.h" 28#include "llvm/Support/MathExtras.h" 29using namespace llvm; 30 31/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence 32/// of insertvalue or extractvalue indices that identify a member, return 33/// the linearized index of the start of the member. 34/// 35unsigned llvm::ComputeLinearIndex(Type *Ty, 36 const unsigned *Indices, 37 const unsigned *IndicesEnd, 38 unsigned CurIndex) { 39 // Base case: We're done. 40 if (Indices && Indices == IndicesEnd) 41 return CurIndex; 42 43 // Given a struct type, recursively traverse the elements. 44 if (StructType *STy = dyn_cast<StructType>(Ty)) { 45 for (StructType::element_iterator EB = STy->element_begin(), 46 EI = EB, 47 EE = STy->element_end(); 48 EI != EE; ++EI) { 49 if (Indices && *Indices == unsigned(EI - EB)) 50 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex); 51 CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex); 52 } 53 return CurIndex; 54 } 55 // Given an array type, recursively traverse the elements. 56 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 57 Type *EltTy = ATy->getElementType(); 58 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) { 59 if (Indices && *Indices == i) 60 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex); 61 CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex); 62 } 63 return CurIndex; 64 } 65 // We haven't found the type we're looking for, so keep searching. 66 return CurIndex + 1; 67} 68 69/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of 70/// EVTs that represent all the individual underlying 71/// non-aggregate types that comprise it. 72/// 73/// If Offsets is non-null, it points to a vector to be filled in 74/// with the in-memory offsets of each of the individual values. 75/// 76void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, 77 SmallVectorImpl<EVT> &ValueVTs, 78 SmallVectorImpl<uint64_t> *Offsets, 79 uint64_t StartingOffset) { 80 // Given a struct type, recursively traverse the elements. 81 if (StructType *STy = dyn_cast<StructType>(Ty)) { 82 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy); 83 for (StructType::element_iterator EB = STy->element_begin(), 84 EI = EB, 85 EE = STy->element_end(); 86 EI != EE; ++EI) 87 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, 88 StartingOffset + SL->getElementOffset(EI - EB)); 89 return; 90 } 91 // Given an array type, recursively traverse the elements. 92 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 93 Type *EltTy = ATy->getElementType(); 94 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); 95 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 96 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, 97 StartingOffset + i * EltSize); 98 return; 99 } 100 // Interpret void as zero return values. 101 if (Ty->isVoidTy()) 102 return; 103 // Base case: we can get an EVT for this LLVM IR type. 104 ValueVTs.push_back(TLI.getValueType(Ty)); 105 if (Offsets) 106 Offsets->push_back(StartingOffset); 107} 108 109/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. 110GlobalVariable *llvm::ExtractTypeInfo(Value *V) { 111 V = V->stripPointerCasts(); 112 GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 113 114 if (GV && GV->getName() == "llvm.eh.catch.all.value") { 115 assert(GV->hasInitializer() && 116 "The EH catch-all value must have an initializer"); 117 Value *Init = GV->getInitializer(); 118 GV = dyn_cast<GlobalVariable>(Init); 119 if (!GV) V = cast<ConstantPointerNull>(Init); 120 } 121 122 assert((GV || isa<ConstantPointerNull>(V)) && 123 "TypeInfo must be a global variable or NULL"); 124 return GV; 125} 126 127/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being 128/// processed uses a memory 'm' constraint. 129bool 130llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, 131 const TargetLowering &TLI) { 132 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { 133 InlineAsm::ConstraintInfo &CI = CInfos[i]; 134 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { 135 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]); 136 if (CType == TargetLowering::C_Memory) 137 return true; 138 } 139 140 // Indirect operand accesses access memory. 141 if (CI.isIndirect) 142 return true; 143 } 144 145 return false; 146} 147 148/// getFCmpCondCode - Return the ISD condition code corresponding to 149/// the given LLVM IR floating-point condition code. This includes 150/// consideration of global floating-point math flags. 151/// 152ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { 153 switch (Pred) { 154 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE; 155 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ; 156 case FCmpInst::FCMP_OGT: return ISD::SETOGT; 157 case FCmpInst::FCMP_OGE: return ISD::SETOGE; 158 case FCmpInst::FCMP_OLT: return ISD::SETOLT; 159 case FCmpInst::FCMP_OLE: return ISD::SETOLE; 160 case FCmpInst::FCMP_ONE: return ISD::SETONE; 161 case FCmpInst::FCMP_ORD: return ISD::SETO; 162 case FCmpInst::FCMP_UNO: return ISD::SETUO; 163 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ; 164 case FCmpInst::FCMP_UGT: return ISD::SETUGT; 165 case FCmpInst::FCMP_UGE: return ISD::SETUGE; 166 case FCmpInst::FCMP_ULT: return ISD::SETULT; 167 case FCmpInst::FCMP_ULE: return ISD::SETULE; 168 case FCmpInst::FCMP_UNE: return ISD::SETUNE; 169 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE; 170 default: break; 171 } 172 llvm_unreachable("Invalid FCmp predicate opcode!"); 173 return ISD::SETFALSE; 174} 175 176ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) { 177 switch (CC) { 178 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ; 179 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE; 180 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT; 181 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE; 182 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT; 183 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE; 184 default: break; 185 } 186 return CC; 187} 188 189/// getICmpCondCode - Return the ISD condition code corresponding to 190/// the given LLVM IR integer condition code. 191/// 192ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { 193 switch (Pred) { 194 case ICmpInst::ICMP_EQ: return ISD::SETEQ; 195 case ICmpInst::ICMP_NE: return ISD::SETNE; 196 case ICmpInst::ICMP_SLE: return ISD::SETLE; 197 case ICmpInst::ICMP_ULE: return ISD::SETULE; 198 case ICmpInst::ICMP_SGE: return ISD::SETGE; 199 case ICmpInst::ICMP_UGE: return ISD::SETUGE; 200 case ICmpInst::ICMP_SLT: return ISD::SETLT; 201 case ICmpInst::ICMP_ULT: return ISD::SETULT; 202 case ICmpInst::ICMP_SGT: return ISD::SETGT; 203 case ICmpInst::ICMP_UGT: return ISD::SETUGT; 204 default: 205 llvm_unreachable("Invalid ICmp predicate opcode!"); 206 return ISD::SETNE; 207 } 208} 209 210/// Test if the given instruction is in a position to be optimized 211/// with a tail-call. This roughly means that it's in a block with 212/// a return and there's nothing that needs to be scheduled 213/// between it and the return. 214/// 215/// This function only tests target-independent requirements. 216bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, 217 const TargetLowering &TLI) { 218 const Instruction *I = CS.getInstruction(); 219 const BasicBlock *ExitBB = I->getParent(); 220 const TerminatorInst *Term = ExitBB->getTerminator(); 221 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); 222 223 // The block must end in a return statement or unreachable. 224 // 225 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in 226 // an unreachable, for now. The way tailcall optimization is currently 227 // implemented means it will add an epilogue followed by a jump. That is 228 // not profitable. Also, if the callee is a special function (e.g. 229 // longjmp on x86), it can end up causing miscompilation that has not 230 // been fully understood. 231 if (!Ret && 232 (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt || 233 !isa<UnreachableInst>(Term))) return false; 234 235 // If I will have a chain, make sure no other instruction that will have a 236 // chain interposes between I and the return. 237 if (I->mayHaveSideEffects() || I->mayReadFromMemory() || 238 !isSafeToSpeculativelyExecute(I)) 239 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; 240 --BBI) { 241 if (&*BBI == I) 242 break; 243 // Debug info intrinsics do not get in the way of tail call optimization. 244 if (isa<DbgInfoIntrinsic>(BBI)) 245 continue; 246 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || 247 !isSafeToSpeculativelyExecute(BBI)) 248 return false; 249 } 250 251 // If the block ends with a void return or unreachable, it doesn't matter 252 // what the call's return type is. 253 if (!Ret || Ret->getNumOperands() == 0) return true; 254 255 // If the return value is undef, it doesn't matter what the call's 256 // return type is. 257 if (isa<UndefValue>(Ret->getOperand(0))) return true; 258 259 // Conservatively require the attributes of the call to match those of 260 // the return. Ignore noalias because it doesn't affect the call sequence. 261 const Function *F = ExitBB->getParent(); 262 unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); 263 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) 264 return false; 265 266 // It's not safe to eliminate the sign / zero extension of the return value. 267 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) 268 return false; 269 270 // Otherwise, make sure the unmodified return value of I is the return value. 271 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ; 272 U = dyn_cast<Instruction>(U->getOperand(0))) { 273 if (!U) 274 return false; 275 if (!U->hasOneUse()) 276 return false; 277 if (U == I) 278 break; 279 // Check for a truly no-op truncate. 280 if (isa<TruncInst>(U) && 281 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType())) 282 continue; 283 // Check for a truly no-op bitcast. 284 if (isa<BitCastInst>(U) && 285 (U->getOperand(0)->getType() == U->getType() || 286 (U->getOperand(0)->getType()->isPointerTy() && 287 U->getType()->isPointerTy()))) 288 continue; 289 // Otherwise it's not a true no-op. 290 return false; 291 } 292 293 return true; 294} 295 296bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 297 const TargetLowering &TLI) { 298 const Function *F = DAG.getMachineFunction().getFunction(); 299 300 // Conservatively require the attributes of the call to match those of 301 // the return. Ignore noalias because it doesn't affect the call sequence. 302 unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); 303 if (CallerRetAttr & ~Attribute::NoAlias) 304 return false; 305 306 // It's not safe to eliminate the sign / zero extension of the return value. 307 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) 308 return false; 309 310 // Check if the only use is a function return node. 311 return TLI.isUsedByReturnOnly(Node); 312} 313