1//===-- FunctionLoweringInfo.cpp ------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements routines for translating functions from LLVM IR into 11// Machine IR. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/CodeGen/FunctionLoweringInfo.h" 16#include "llvm/ADT/PostOrderIterator.h" 17#include "llvm/CodeGen/Analysis.h" 18#include "llvm/CodeGen/MachineFrameInfo.h" 19#include "llvm/CodeGen/MachineFunction.h" 20#include "llvm/CodeGen/MachineInstrBuilder.h" 21#include "llvm/CodeGen/MachineModuleInfo.h" 22#include "llvm/CodeGen/MachineRegisterInfo.h" 23#include "llvm/CodeGen/WinEHFuncInfo.h" 24#include "llvm/IR/DataLayout.h" 25#include "llvm/IR/DebugInfo.h" 26#include "llvm/IR/DerivedTypes.h" 27#include "llvm/IR/Function.h" 28#include "llvm/IR/Instructions.h" 29#include "llvm/IR/IntrinsicInst.h" 30#include "llvm/IR/LLVMContext.h" 31#include "llvm/IR/Module.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/MathExtras.h" 35#include "llvm/Support/raw_ostream.h" 36#include "llvm/Target/TargetFrameLowering.h" 37#include "llvm/Target/TargetInstrInfo.h" 38#include "llvm/Target/TargetLowering.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/Target/TargetRegisterInfo.h" 41#include "llvm/Target/TargetSubtargetInfo.h" 42#include <algorithm> 43using namespace llvm; 44 45#define DEBUG_TYPE "function-lowering-info" 46 47/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 48/// PHI nodes or outside of the basic block that defines it, or used by a 49/// switch or atomic instruction, which may expand to multiple basic blocks. 50static bool isUsedOutsideOfDefiningBlock(const Instruction *I) { 51 if (I->use_empty()) return false; 52 if (isa<PHINode>(I)) return true; 53 const BasicBlock *BB = I->getParent(); 54 for (const User *U : I->users()) 55 if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U)) 56 return true; 57 58 return false; 59} 60 61static ISD::NodeType getPreferredExtendForValue(const Value *V) { 62 // For the users of the source value being used for compare instruction, if 63 // the number of signed predicate is greater than unsigned predicate, we 64 // prefer to use SIGN_EXTEND. 65 // 66 // With this optimization, we would be able to reduce some redundant sign or 67 // zero extension instruction, and eventually more machine CSE opportunities 68 // can be exposed. 69 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 70 unsigned NumOfSigned = 0, NumOfUnsigned = 0; 71 for (const User *U : V->users()) { 72 if (const auto *CI = dyn_cast<CmpInst>(U)) { 73 NumOfSigned += CI->isSigned(); 74 NumOfUnsigned += CI->isUnsigned(); 75 } 76 } 77 if (NumOfSigned > NumOfUnsigned) 78 ExtendKind = ISD::SIGN_EXTEND; 79 80 return ExtendKind; 81} 82 83void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, 84 SelectionDAG *DAG) { 85 Fn = &fn; 86 MF = &mf; 87 TLI = MF->getSubtarget().getTargetLowering(); 88 RegInfo = &MF->getRegInfo(); 89 MachineModuleInfo &MMI = MF->getMMI(); 90 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 91 unsigned StackAlign = TFI->getStackAlignment(); 92 93 // Check whether the function can return without sret-demotion. 94 SmallVector<ISD::OutputArg, 4> Outs; 95 GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI, 96 mf.getDataLayout()); 97 CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF, 98 Fn->isVarArg(), Outs, Fn->getContext()); 99 100 // If this personality uses funclets, we need to do a bit more work. 101 DenseMap<const AllocaInst *, int *> CatchObjects; 102 EHPersonality Personality = classifyEHPersonality( 103 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr); 104 if (isFuncletEHPersonality(Personality)) { 105 // Calculate state numbers if we haven't already. 106 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo(); 107 if (Personality == EHPersonality::MSVC_CXX) 108 calculateWinCXXEHStateNumbers(&fn, EHInfo); 109 else if (isAsynchronousEHPersonality(Personality)) 110 calculateSEHStateNumbers(&fn, EHInfo); 111 else if (Personality == EHPersonality::CoreCLR) 112 calculateClrEHStateNumbers(&fn, EHInfo); 113 114 // Map all BB references in the WinEH data to MBBs. 115 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 116 for (WinEHHandlerType &H : TBME.HandlerArray) { 117 if (const AllocaInst *AI = H.CatchObj.Alloca) 118 CatchObjects.insert({AI, &H.CatchObj.FrameIndex}); 119 else 120 H.CatchObj.FrameIndex = INT_MAX; 121 } 122 } 123 } 124 125 // Initialize the mapping of values to registers. This is only set up for 126 // instruction values that are used outside of the block that defines 127 // them. 128 Function::const_iterator BB = Fn->begin(), EB = Fn->end(); 129 for (; BB != EB; ++BB) 130 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); 131 I != E; ++I) { 132 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 133 Type *Ty = AI->getAllocatedType(); 134 unsigned Align = 135 std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty), 136 AI->getAlignment()); 137 138 // Static allocas can be folded into the initial stack frame 139 // adjustment. For targets that don't realign the stack, don't 140 // do this if there is an extra alignment requirement. 141 if (AI->isStaticAlloca() && 142 (TFI->isStackRealignable() || (Align <= StackAlign))) { 143 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize()); 144 uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty); 145 146 TySize *= CUI->getZExtValue(); // Get total allocated size. 147 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 148 int FrameIndex = INT_MAX; 149 auto Iter = CatchObjects.find(AI); 150 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) { 151 FrameIndex = MF->getFrameInfo()->CreateFixedObject( 152 TySize, 0, /*Immutable=*/false, /*isAliased=*/true); 153 MF->getFrameInfo()->setObjectAlignment(FrameIndex, Align); 154 } else { 155 FrameIndex = 156 MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI); 157 } 158 159 StaticAllocaMap[AI] = FrameIndex; 160 // Update the catch handler information. 161 if (Iter != CatchObjects.end()) 162 *Iter->second = FrameIndex; 163 } else { 164 // FIXME: Overaligned static allocas should be grouped into 165 // a single dynamic allocation instead of using a separate 166 // stack allocation for each one. 167 if (Align <= StackAlign) 168 Align = 0; 169 // Inform the Frame Information that we have variable-sized objects. 170 MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, AI); 171 } 172 } 173 174 // Look for inline asm that clobbers the SP register. 175 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 176 ImmutableCallSite CS(&*I); 177 if (isa<InlineAsm>(CS.getCalledValue())) { 178 unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); 179 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 180 std::vector<TargetLowering::AsmOperandInfo> Ops = 181 TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS); 182 for (size_t I = 0, E = Ops.size(); I != E; ++I) { 183 TargetLowering::AsmOperandInfo &Op = Ops[I]; 184 if (Op.Type == InlineAsm::isClobber) { 185 // Clobbers don't have SDValue operands, hence SDValue(). 186 TLI->ComputeConstraintToUse(Op, SDValue(), DAG); 187 std::pair<unsigned, const TargetRegisterClass *> PhysReg = 188 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode, 189 Op.ConstraintVT); 190 if (PhysReg.first == SP) 191 MF->getFrameInfo()->setHasOpaqueSPAdjustment(true); 192 } 193 } 194 } 195 } 196 197 // Look for calls to the @llvm.va_start intrinsic. We can omit some 198 // prologue boilerplate for variadic functions that don't examine their 199 // arguments. 200 if (const auto *II = dyn_cast<IntrinsicInst>(I)) { 201 if (II->getIntrinsicID() == Intrinsic::vastart) 202 MF->getFrameInfo()->setHasVAStart(true); 203 } 204 205 // If we have a musttail call in a variadic function, we need to ensure we 206 // forward implicit register parameters. 207 if (const auto *CI = dyn_cast<CallInst>(I)) { 208 if (CI->isMustTailCall() && Fn->isVarArg()) 209 MF->getFrameInfo()->setHasMustTailInVarArgFunc(true); 210 } 211 212 // Mark values used outside their block as exported, by allocating 213 // a virtual register for them. 214 if (isUsedOutsideOfDefiningBlock(&*I)) 215 if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(I))) 216 InitializeRegForValue(&*I); 217 218 // Collect llvm.dbg.declare information. This is done now instead of 219 // during the initial isel pass through the IR so that it is done 220 // in a predictable order. 221 if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) { 222 assert(DI->getVariable() && "Missing variable"); 223 assert(DI->getDebugLoc() && "Missing location"); 224 if (MMI.hasDebugInfo()) { 225 // Don't handle byval struct arguments or VLAs, for example. 226 // Non-byval arguments are handled here (they refer to the stack 227 // temporary alloca at this point). 228 const Value *Address = DI->getAddress(); 229 if (Address) { 230 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) 231 Address = BCI->getOperand(0); 232 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) { 233 DenseMap<const AllocaInst *, int>::iterator SI = 234 StaticAllocaMap.find(AI); 235 if (SI != StaticAllocaMap.end()) { // Check for VLAs. 236 int FI = SI->second; 237 MMI.setVariableDbgInfo(DI->getVariable(), DI->getExpression(), 238 FI, DI->getDebugLoc()); 239 } 240 } 241 } 242 } 243 } 244 245 // Decide the preferred extend type for a value. 246 PreferredExtendType[&*I] = getPreferredExtendForValue(&*I); 247 } 248 249 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 250 // also creates the initial PHI MachineInstrs, though none of the input 251 // operands are populated. 252 for (BB = Fn->begin(); BB != EB; ++BB) { 253 // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks 254 // are really data, and no instructions can live here. 255 if (BB->isEHPad()) { 256 const Instruction *I = BB->getFirstNonPHI(); 257 // If this is a non-landingpad EH pad, mark this function as using 258 // funclets. 259 // FIXME: SEH catchpads do not create funclets, so we could avoid setting 260 // this in such cases in order to improve frame layout. 261 if (!isa<LandingPadInst>(I)) { 262 MMI.setHasEHFunclets(true); 263 MF->getFrameInfo()->setHasOpaqueSPAdjustment(true); 264 } 265 if (isa<CatchSwitchInst>(I)) { 266 assert(&*BB->begin() == I && 267 "WinEHPrepare failed to remove PHIs from imaginary BBs"); 268 continue; 269 } 270 if (isa<FuncletPadInst>(I)) 271 assert(&*BB->begin() == I && "WinEHPrepare failed to demote PHIs"); 272 } 273 274 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&*BB); 275 MBBMap[&*BB] = MBB; 276 MF->push_back(MBB); 277 278 // Transfer the address-taken flag. This is necessary because there could 279 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only 280 // the first one should be marked. 281 if (BB->hasAddressTaken()) 282 MBB->setHasAddressTaken(); 283 284 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 285 // appropriate. 286 for (BasicBlock::const_iterator I = BB->begin(); 287 const PHINode *PN = dyn_cast<PHINode>(I); ++I) { 288 if (PN->use_empty()) continue; 289 290 // Skip empty types 291 if (PN->getType()->isEmptyTy()) 292 continue; 293 294 DebugLoc DL = PN->getDebugLoc(); 295 unsigned PHIReg = ValueMap[PN]; 296 assert(PHIReg && "PHI node does not have an assigned virtual register!"); 297 298 SmallVector<EVT, 4> ValueVTs; 299 ComputeValueVTs(*TLI, MF->getDataLayout(), PN->getType(), ValueVTs); 300 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 301 EVT VT = ValueVTs[vti]; 302 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT); 303 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 304 for (unsigned i = 0; i != NumRegisters; ++i) 305 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i); 306 PHIReg += NumRegisters; 307 } 308 } 309 } 310 311 // Mark landing pad blocks. 312 SmallVector<const LandingPadInst *, 4> LPads; 313 for (BB = Fn->begin(); BB != EB; ++BB) { 314 const Instruction *FNP = BB->getFirstNonPHI(); 315 if (BB->isEHPad() && MBBMap.count(&*BB)) 316 MBBMap[&*BB]->setIsEHPad(); 317 if (const auto *LPI = dyn_cast<LandingPadInst>(FNP)) 318 LPads.push_back(LPI); 319 } 320 321 if (!isFuncletEHPersonality(Personality)) 322 return; 323 324 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo(); 325 326 // Map all BB references in the WinEH data to MBBs. 327 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 328 for (WinEHHandlerType &H : TBME.HandlerArray) { 329 if (H.Handler) 330 H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()]; 331 } 332 } 333 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap) 334 if (UME.Cleanup) 335 UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()]; 336 for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) { 337 const BasicBlock *BB = UME.Handler.get<const BasicBlock *>(); 338 UME.Handler = MBBMap[BB]; 339 } 340 for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) { 341 const BasicBlock *BB = CME.Handler.get<const BasicBlock *>(); 342 CME.Handler = MBBMap[BB]; 343 } 344} 345 346/// clear - Clear out all the function-specific state. This returns this 347/// FunctionLoweringInfo to an empty state, ready to be used for a 348/// different function. 349void FunctionLoweringInfo::clear() { 350 MBBMap.clear(); 351 ValueMap.clear(); 352 StaticAllocaMap.clear(); 353 LiveOutRegInfo.clear(); 354 VisitedBBs.clear(); 355 ArgDbgValues.clear(); 356 ByValArgFrameIndexMap.clear(); 357 RegFixups.clear(); 358 StatepointStackSlots.clear(); 359 StatepointSpillMaps.clear(); 360 PreferredExtendType.clear(); 361} 362 363/// CreateReg - Allocate a single virtual register for the given type. 364unsigned FunctionLoweringInfo::CreateReg(MVT VT) { 365 return RegInfo->createVirtualRegister( 366 MF->getSubtarget().getTargetLowering()->getRegClassFor(VT)); 367} 368 369/// CreateRegs - Allocate the appropriate number of virtual registers of 370/// the correctly promoted or expanded types. Assign these registers 371/// consecutive vreg numbers and return the first assigned number. 372/// 373/// In the case that the given value has struct or array type, this function 374/// will assign registers for each member or element. 375/// 376unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) { 377 const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); 378 379 SmallVector<EVT, 4> ValueVTs; 380 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs); 381 382 unsigned FirstReg = 0; 383 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { 384 EVT ValueVT = ValueVTs[Value]; 385 MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT); 386 387 unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT); 388 for (unsigned i = 0; i != NumRegs; ++i) { 389 unsigned R = CreateReg(RegisterVT); 390 if (!FirstReg) FirstReg = R; 391 } 392 } 393 return FirstReg; 394} 395 396/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the 397/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If 398/// the register's LiveOutInfo is for a smaller bit width, it is extended to 399/// the larger bit width by zero extension. The bit width must be no smaller 400/// than the LiveOutInfo's existing bit width. 401const FunctionLoweringInfo::LiveOutInfo * 402FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) { 403 if (!LiveOutRegInfo.inBounds(Reg)) 404 return nullptr; 405 406 LiveOutInfo *LOI = &LiveOutRegInfo[Reg]; 407 if (!LOI->IsValid) 408 return nullptr; 409 410 if (BitWidth > LOI->KnownZero.getBitWidth()) { 411 LOI->NumSignBits = 1; 412 LOI->KnownZero = LOI->KnownZero.zextOrTrunc(BitWidth); 413 LOI->KnownOne = LOI->KnownOne.zextOrTrunc(BitWidth); 414 } 415 416 return LOI; 417} 418 419/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination 420/// register based on the LiveOutInfo of its operands. 421void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) { 422 Type *Ty = PN->getType(); 423 if (!Ty->isIntegerTy() || Ty->isVectorTy()) 424 return; 425 426 SmallVector<EVT, 1> ValueVTs; 427 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs); 428 assert(ValueVTs.size() == 1 && 429 "PHIs with non-vector integer types should have a single VT."); 430 EVT IntVT = ValueVTs[0]; 431 432 if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1) 433 return; 434 IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT); 435 unsigned BitWidth = IntVT.getSizeInBits(); 436 437 unsigned DestReg = ValueMap[PN]; 438 if (!TargetRegisterInfo::isVirtualRegister(DestReg)) 439 return; 440 LiveOutRegInfo.grow(DestReg); 441 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg]; 442 443 Value *V = PN->getIncomingValue(0); 444 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 445 DestLOI.NumSignBits = 1; 446 APInt Zero(BitWidth, 0); 447 DestLOI.KnownZero = Zero; 448 DestLOI.KnownOne = Zero; 449 return; 450 } 451 452 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 453 APInt Val = CI->getValue().zextOrTrunc(BitWidth); 454 DestLOI.NumSignBits = Val.getNumSignBits(); 455 DestLOI.KnownZero = ~Val; 456 DestLOI.KnownOne = Val; 457 } else { 458 assert(ValueMap.count(V) && "V should have been placed in ValueMap when its" 459 "CopyToReg node was created."); 460 unsigned SrcReg = ValueMap[V]; 461 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { 462 DestLOI.IsValid = false; 463 return; 464 } 465 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 466 if (!SrcLOI) { 467 DestLOI.IsValid = false; 468 return; 469 } 470 DestLOI = *SrcLOI; 471 } 472 473 assert(DestLOI.KnownZero.getBitWidth() == BitWidth && 474 DestLOI.KnownOne.getBitWidth() == BitWidth && 475 "Masks should have the same bit width as the type."); 476 477 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 478 Value *V = PN->getIncomingValue(i); 479 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 480 DestLOI.NumSignBits = 1; 481 APInt Zero(BitWidth, 0); 482 DestLOI.KnownZero = Zero; 483 DestLOI.KnownOne = Zero; 484 return; 485 } 486 487 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 488 APInt Val = CI->getValue().zextOrTrunc(BitWidth); 489 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits()); 490 DestLOI.KnownZero &= ~Val; 491 DestLOI.KnownOne &= Val; 492 continue; 493 } 494 495 assert(ValueMap.count(V) && "V should have been placed in ValueMap when " 496 "its CopyToReg node was created."); 497 unsigned SrcReg = ValueMap[V]; 498 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { 499 DestLOI.IsValid = false; 500 return; 501 } 502 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 503 if (!SrcLOI) { 504 DestLOI.IsValid = false; 505 return; 506 } 507 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits); 508 DestLOI.KnownZero &= SrcLOI->KnownZero; 509 DestLOI.KnownOne &= SrcLOI->KnownOne; 510 } 511} 512 513/// setArgumentFrameIndex - Record frame index for the byval 514/// argument. This overrides previous frame index entry for this argument, 515/// if any. 516void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A, 517 int FI) { 518 ByValArgFrameIndexMap[A] = FI; 519} 520 521/// getArgumentFrameIndex - Get frame index for the byval argument. 522/// If the argument does not have any assigned frame index then 0 is 523/// returned. 524int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) { 525 DenseMap<const Argument *, int>::iterator I = 526 ByValArgFrameIndexMap.find(A); 527 if (I != ByValArgFrameIndexMap.end()) 528 return I->second; 529 DEBUG(dbgs() << "Argument does not have assigned frame index!\n"); 530 return 0; 531} 532 533unsigned FunctionLoweringInfo::getCatchPadExceptionPointerVReg( 534 const Value *CPI, const TargetRegisterClass *RC) { 535 MachineRegisterInfo &MRI = MF->getRegInfo(); 536 auto I = CatchPadExceptionPointers.insert({CPI, 0}); 537 unsigned &VReg = I.first->second; 538 if (I.second) 539 VReg = MRI.createVirtualRegister(RC); 540 assert(VReg && "null vreg in exception pointer table!"); 541 return VReg; 542} 543 544/// ComputeUsesVAFloatArgument - Determine if any floating-point values are 545/// being passed to this variadic function, and set the MachineModuleInfo's 546/// usesVAFloatArgument flag if so. This flag is used to emit an undefined 547/// reference to _fltused on Windows, which will link in MSVCRT's 548/// floating-point support. 549void llvm::ComputeUsesVAFloatArgument(const CallInst &I, 550 MachineModuleInfo *MMI) 551{ 552 FunctionType *FT = cast<FunctionType>( 553 I.getCalledValue()->getType()->getContainedType(0)); 554 if (FT->isVarArg() && !MMI->usesVAFloatArgument()) { 555 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { 556 Type* T = I.getArgOperand(i)->getType(); 557 for (auto i : post_order(T)) { 558 if (i->isFloatingPointTy()) { 559 MMI->setUsesVAFloatArgument(true); 560 return; 561 } 562 } 563 } 564 } 565} 566 567/// AddLandingPadInfo - Extract the exception handling information from the 568/// landingpad instruction and add them to the specified machine module info. 569void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI, 570 MachineBasicBlock *MBB) { 571 if (const auto *PF = dyn_cast<Function>( 572 I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts())) 573 MMI.addPersonality(PF); 574 575 if (I.isCleanup()) 576 MMI.addCleanup(MBB); 577 578 // FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct, 579 // but we need to do it this way because of how the DWARF EH emitter 580 // processes the clauses. 581 for (unsigned i = I.getNumClauses(); i != 0; --i) { 582 Value *Val = I.getClause(i - 1); 583 if (I.isCatch(i - 1)) { 584 MMI.addCatchTypeInfo(MBB, 585 dyn_cast<GlobalValue>(Val->stripPointerCasts())); 586 } else { 587 // Add filters in a list. 588 Constant *CVal = cast<Constant>(Val); 589 SmallVector<const GlobalValue*, 4> FilterList; 590 for (User::op_iterator 591 II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) 592 FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts())); 593 594 MMI.addFilterTypeInfo(MBB, FilterList); 595 } 596 } 597} 598 599unsigned FunctionLoweringInfo::findSwiftErrorVReg(const MachineBasicBlock *MBB, 600 const Value* Val) const { 601 // Find the index in SwiftErrorVals. 602 SwiftErrorValues::const_iterator I = 603 std::find(SwiftErrorVals.begin(), SwiftErrorVals.end(), Val); 604 assert(I != SwiftErrorVals.end() && "Can't find value in SwiftErrorVals"); 605 return SwiftErrorMap.lookup(MBB)[I - SwiftErrorVals.begin()]; 606} 607 608void FunctionLoweringInfo::setSwiftErrorVReg(const MachineBasicBlock *MBB, 609 const Value* Val, unsigned VReg) { 610 // Find the index in SwiftErrorVals. 611 SwiftErrorValues::iterator I = 612 std::find(SwiftErrorVals.begin(), SwiftErrorVals.end(), Val); 613 assert(I != SwiftErrorVals.end() && "Can't find value in SwiftErrorVals"); 614 SwiftErrorMap[MBB][I - SwiftErrorVals.begin()] = VReg; 615} 616