FunctionLoweringInfo.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- FunctionLoweringInfo.cpp ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements routines for translating functions from LLVM IR into
11// Machine IR.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "function-lowering-info"
16#include "llvm/CodeGen/FunctionLoweringInfo.h"
17#include "llvm/ADT/PostOrderIterator.h"
18#include "llvm/CodeGen/Analysis.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineModuleInfo.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/DebugInfo.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/Module.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Target/TargetFrameLowering.h"
36#include "llvm/Target/TargetInstrInfo.h"
37#include "llvm/Target/TargetLowering.h"
38#include "llvm/Target/TargetOptions.h"
39#include "llvm/Target/TargetRegisterInfo.h"
40#include <algorithm>
41using namespace llvm;
42
43/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
44/// PHI nodes or outside of the basic block that defines it, or used by a
45/// switch or atomic instruction, which may expand to multiple basic blocks.
46static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
47  if (I->use_empty()) return false;
48  if (isa<PHINode>(I)) return true;
49  const BasicBlock *BB = I->getParent();
50  for (const User *U : I->users())
51    if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
52      return true;
53
54  return false;
55}
56
57void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
58                               SelectionDAG *DAG) {
59  const TargetLowering *TLI = TM.getTargetLowering();
60
61  Fn = &fn;
62  MF = &mf;
63  RegInfo = &MF->getRegInfo();
64
65  // Check whether the function can return without sret-demotion.
66  SmallVector<ISD::OutputArg, 4> Outs;
67  GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI);
68  CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
69                                       Fn->isVarArg(),
70                                       Outs, Fn->getContext());
71
72  // Initialize the mapping of values to registers.  This is only set up for
73  // instruction values that are used outside of the block that defines
74  // them.
75  Function::const_iterator BB = Fn->begin(), EB = Fn->end();
76  for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
77    if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
78      // Don't fold inalloca allocas or other dynamic allocas into the initial
79      // stack frame allocation, even if they are in the entry block.
80      if (!AI->isStaticAlloca())
81        continue;
82
83      if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
84        Type *Ty = AI->getAllocatedType();
85        uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
86        unsigned Align =
87          std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
88                   AI->getAlignment());
89
90        TySize *= CUI->getZExtValue();   // Get total allocated size.
91        if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
92
93        StaticAllocaMap[AI] =
94          MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
95      }
96    }
97
98  for (; BB != EB; ++BB)
99    for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
100         I != E; ++I) {
101      // Look for dynamic allocas.
102      if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
103        if (!AI->isStaticAlloca()) {
104          unsigned Align = std::max(
105              (unsigned)TLI->getDataLayout()->getPrefTypeAlignment(
106                AI->getAllocatedType()),
107              AI->getAlignment());
108          unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
109          if (Align <= StackAlign)
110            Align = 0;
111          // Inform the Frame Information that we have variable-sized objects.
112          MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, AI);
113        }
114      }
115
116      // Look for inline asm that clobbers the SP register.
117      if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
118        ImmutableCallSite CS(I);
119        if (isa<InlineAsm>(CS.getCalledValue())) {
120          unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
121          std::vector<TargetLowering::AsmOperandInfo> Ops =
122            TLI->ParseConstraints(CS);
123          for (size_t I = 0, E = Ops.size(); I != E; ++I) {
124            TargetLowering::AsmOperandInfo &Op = Ops[I];
125            if (Op.Type == InlineAsm::isClobber) {
126              // Clobbers don't have SDValue operands, hence SDValue().
127              TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
128              std::pair<unsigned, const TargetRegisterClass*> PhysReg =
129                TLI->getRegForInlineAsmConstraint(Op.ConstraintCode,
130                                                  Op.ConstraintVT);
131              if (PhysReg.first == SP)
132                MF->getFrameInfo()->setHasInlineAsmWithSPAdjust(true);
133            }
134          }
135        }
136      }
137
138      // Mark values used outside their block as exported, by allocating
139      // a virtual register for them.
140      if (isUsedOutsideOfDefiningBlock(I))
141        if (!isa<AllocaInst>(I) ||
142            !StaticAllocaMap.count(cast<AllocaInst>(I)))
143          InitializeRegForValue(I);
144
145      // Collect llvm.dbg.declare information. This is done now instead of
146      // during the initial isel pass through the IR so that it is done
147      // in a predictable order.
148      if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) {
149        MachineModuleInfo &MMI = MF->getMMI();
150        DIVariable DIVar(DI->getVariable());
151        assert((!DIVar || DIVar.isVariable()) &&
152          "Variable in DbgDeclareInst should be either null or a DIVariable.");
153        if (MMI.hasDebugInfo() &&
154            DIVar &&
155            !DI->getDebugLoc().isUnknown()) {
156          // Don't handle byval struct arguments or VLAs, for example.
157          // Non-byval arguments are handled here (they refer to the stack
158          // temporary alloca at this point).
159          const Value *Address = DI->getAddress();
160          if (Address) {
161            if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
162              Address = BCI->getOperand(0);
163            if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
164              DenseMap<const AllocaInst *, int>::iterator SI =
165                StaticAllocaMap.find(AI);
166              if (SI != StaticAllocaMap.end()) { // Check for VLAs.
167                int FI = SI->second;
168                MMI.setVariableDbgInfo(DI->getVariable(),
169                                       FI, DI->getDebugLoc());
170              }
171            }
172          }
173        }
174      }
175    }
176
177  // Create an initial MachineBasicBlock for each LLVM BasicBlock in F.  This
178  // also creates the initial PHI MachineInstrs, though none of the input
179  // operands are populated.
180  for (BB = Fn->begin(); BB != EB; ++BB) {
181    MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
182    MBBMap[BB] = MBB;
183    MF->push_back(MBB);
184
185    // Transfer the address-taken flag. This is necessary because there could
186    // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
187    // the first one should be marked.
188    if (BB->hasAddressTaken())
189      MBB->setHasAddressTaken();
190
191    // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
192    // appropriate.
193    for (BasicBlock::const_iterator I = BB->begin();
194         const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
195      if (PN->use_empty()) continue;
196
197      // Skip empty types
198      if (PN->getType()->isEmptyTy())
199        continue;
200
201      DebugLoc DL = PN->getDebugLoc();
202      unsigned PHIReg = ValueMap[PN];
203      assert(PHIReg && "PHI node does not have an assigned virtual register!");
204
205      SmallVector<EVT, 4> ValueVTs;
206      ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
207      for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
208        EVT VT = ValueVTs[vti];
209        unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
210        const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
211        for (unsigned i = 0; i != NumRegisters; ++i)
212          BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
213        PHIReg += NumRegisters;
214      }
215    }
216  }
217
218  // Mark landing pad blocks.
219  for (BB = Fn->begin(); BB != EB; ++BB)
220    if (const InvokeInst *Invoke = dyn_cast<InvokeInst>(BB->getTerminator()))
221      MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad();
222}
223
224/// clear - Clear out all the function-specific state. This returns this
225/// FunctionLoweringInfo to an empty state, ready to be used for a
226/// different function.
227void FunctionLoweringInfo::clear() {
228  assert(CatchInfoFound.size() == CatchInfoLost.size() &&
229         "Not all catch info was assigned to a landing pad!");
230
231  MBBMap.clear();
232  ValueMap.clear();
233  StaticAllocaMap.clear();
234#ifndef NDEBUG
235  CatchInfoLost.clear();
236  CatchInfoFound.clear();
237#endif
238  LiveOutRegInfo.clear();
239  VisitedBBs.clear();
240  ArgDbgValues.clear();
241  ByValArgFrameIndexMap.clear();
242  RegFixups.clear();
243}
244
245/// CreateReg - Allocate a single virtual register for the given type.
246unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
247  return RegInfo->
248    createVirtualRegister(TM.getTargetLowering()->getRegClassFor(VT));
249}
250
251/// CreateRegs - Allocate the appropriate number of virtual registers of
252/// the correctly promoted or expanded types.  Assign these registers
253/// consecutive vreg numbers and return the first assigned number.
254///
255/// In the case that the given value has struct or array type, this function
256/// will assign registers for each member or element.
257///
258unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
259  const TargetLowering *TLI = TM.getTargetLowering();
260
261  SmallVector<EVT, 4> ValueVTs;
262  ComputeValueVTs(*TLI, Ty, ValueVTs);
263
264  unsigned FirstReg = 0;
265  for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
266    EVT ValueVT = ValueVTs[Value];
267    MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
268
269    unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
270    for (unsigned i = 0; i != NumRegs; ++i) {
271      unsigned R = CreateReg(RegisterVT);
272      if (!FirstReg) FirstReg = R;
273    }
274  }
275  return FirstReg;
276}
277
278/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
279/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
280/// the register's LiveOutInfo is for a smaller bit width, it is extended to
281/// the larger bit width by zero extension. The bit width must be no smaller
282/// than the LiveOutInfo's existing bit width.
283const FunctionLoweringInfo::LiveOutInfo *
284FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
285  if (!LiveOutRegInfo.inBounds(Reg))
286    return NULL;
287
288  LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
289  if (!LOI->IsValid)
290    return NULL;
291
292  if (BitWidth > LOI->KnownZero.getBitWidth()) {
293    LOI->NumSignBits = 1;
294    LOI->KnownZero = LOI->KnownZero.zextOrTrunc(BitWidth);
295    LOI->KnownOne = LOI->KnownOne.zextOrTrunc(BitWidth);
296  }
297
298  return LOI;
299}
300
301/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
302/// register based on the LiveOutInfo of its operands.
303void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
304  Type *Ty = PN->getType();
305  if (!Ty->isIntegerTy() || Ty->isVectorTy())
306    return;
307
308  const TargetLowering *TLI = TM.getTargetLowering();
309
310  SmallVector<EVT, 1> ValueVTs;
311  ComputeValueVTs(*TLI, Ty, ValueVTs);
312  assert(ValueVTs.size() == 1 &&
313         "PHIs with non-vector integer types should have a single VT.");
314  EVT IntVT = ValueVTs[0];
315
316  if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
317    return;
318  IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
319  unsigned BitWidth = IntVT.getSizeInBits();
320
321  unsigned DestReg = ValueMap[PN];
322  if (!TargetRegisterInfo::isVirtualRegister(DestReg))
323    return;
324  LiveOutRegInfo.grow(DestReg);
325  LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
326
327  Value *V = PN->getIncomingValue(0);
328  if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
329    DestLOI.NumSignBits = 1;
330    APInt Zero(BitWidth, 0);
331    DestLOI.KnownZero = Zero;
332    DestLOI.KnownOne = Zero;
333    return;
334  }
335
336  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
337    APInt Val = CI->getValue().zextOrTrunc(BitWidth);
338    DestLOI.NumSignBits = Val.getNumSignBits();
339    DestLOI.KnownZero = ~Val;
340    DestLOI.KnownOne = Val;
341  } else {
342    assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
343                                "CopyToReg node was created.");
344    unsigned SrcReg = ValueMap[V];
345    if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
346      DestLOI.IsValid = false;
347      return;
348    }
349    const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
350    if (!SrcLOI) {
351      DestLOI.IsValid = false;
352      return;
353    }
354    DestLOI = *SrcLOI;
355  }
356
357  assert(DestLOI.KnownZero.getBitWidth() == BitWidth &&
358         DestLOI.KnownOne.getBitWidth() == BitWidth &&
359         "Masks should have the same bit width as the type.");
360
361  for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
362    Value *V = PN->getIncomingValue(i);
363    if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
364      DestLOI.NumSignBits = 1;
365      APInt Zero(BitWidth, 0);
366      DestLOI.KnownZero = Zero;
367      DestLOI.KnownOne = Zero;
368      return;
369    }
370
371    if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
372      APInt Val = CI->getValue().zextOrTrunc(BitWidth);
373      DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
374      DestLOI.KnownZero &= ~Val;
375      DestLOI.KnownOne &= Val;
376      continue;
377    }
378
379    assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
380                                "its CopyToReg node was created.");
381    unsigned SrcReg = ValueMap[V];
382    if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
383      DestLOI.IsValid = false;
384      return;
385    }
386    const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
387    if (!SrcLOI) {
388      DestLOI.IsValid = false;
389      return;
390    }
391    DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
392    DestLOI.KnownZero &= SrcLOI->KnownZero;
393    DestLOI.KnownOne &= SrcLOI->KnownOne;
394  }
395}
396
397/// setArgumentFrameIndex - Record frame index for the byval
398/// argument. This overrides previous frame index entry for this argument,
399/// if any.
400void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
401                                                 int FI) {
402  ByValArgFrameIndexMap[A] = FI;
403}
404
405/// getArgumentFrameIndex - Get frame index for the byval argument.
406/// If the argument does not have any assigned frame index then 0 is
407/// returned.
408int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
409  DenseMap<const Argument *, int>::iterator I =
410    ByValArgFrameIndexMap.find(A);
411  if (I != ByValArgFrameIndexMap.end())
412    return I->second;
413  DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
414  return 0;
415}
416
417/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
418/// being passed to this variadic function, and set the MachineModuleInfo's
419/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
420/// reference to _fltused on Windows, which will link in MSVCRT's
421/// floating-point support.
422void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
423                                      MachineModuleInfo *MMI)
424{
425  FunctionType *FT = cast<FunctionType>(
426    I.getCalledValue()->getType()->getContainedType(0));
427  if (FT->isVarArg() && !MMI->usesVAFloatArgument()) {
428    for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
429      Type* T = I.getArgOperand(i)->getType();
430      for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
431           i != e; ++i) {
432        if (i->isFloatingPointTy()) {
433          MMI->setUsesVAFloatArgument(true);
434          return;
435        }
436      }
437    }
438  }
439}
440
441/// AddCatchInfo - Extract the personality and type infos from an eh.selector
442/// call, and add them to the specified machine basic block.
443void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
444                        MachineBasicBlock *MBB) {
445  // Inform the MachineModuleInfo of the personality for this landing pad.
446  const ConstantExpr *CE = cast<ConstantExpr>(I.getArgOperand(1));
447  assert(CE->getOpcode() == Instruction::BitCast &&
448         isa<Function>(CE->getOperand(0)) &&
449         "Personality should be a function");
450  MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
451
452  // Gather all the type infos for this landing pad and pass them along to
453  // MachineModuleInfo.
454  std::vector<const GlobalVariable *> TyInfo;
455  unsigned N = I.getNumArgOperands();
456
457  for (unsigned i = N - 1; i > 1; --i) {
458    if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(i))) {
459      unsigned FilterLength = CI->getZExtValue();
460      unsigned FirstCatch = i + FilterLength + !FilterLength;
461      assert(FirstCatch <= N && "Invalid filter length");
462
463      if (FirstCatch < N) {
464        TyInfo.reserve(N - FirstCatch);
465        for (unsigned j = FirstCatch; j < N; ++j)
466          TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
467        MMI->addCatchTypeInfo(MBB, TyInfo);
468        TyInfo.clear();
469      }
470
471      if (!FilterLength) {
472        // Cleanup.
473        MMI->addCleanup(MBB);
474      } else {
475        // Filter.
476        TyInfo.reserve(FilterLength - 1);
477        for (unsigned j = i + 1; j < FirstCatch; ++j)
478          TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
479        MMI->addFilterTypeInfo(MBB, TyInfo);
480        TyInfo.clear();
481      }
482
483      N = i;
484    }
485  }
486
487  if (N > 2) {
488    TyInfo.reserve(N - 2);
489    for (unsigned j = 2; j < N; ++j)
490      TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
491    MMI->addCatchTypeInfo(MBB, TyInfo);
492  }
493}
494
495/// AddLandingPadInfo - Extract the exception handling information from the
496/// landingpad instruction and add them to the specified machine module info.
497void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
498                             MachineBasicBlock *MBB) {
499  MMI.addPersonality(MBB,
500                     cast<Function>(I.getPersonalityFn()->stripPointerCasts()));
501
502  if (I.isCleanup())
503    MMI.addCleanup(MBB);
504
505  // FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct,
506  //        but we need to do it this way because of how the DWARF EH emitter
507  //        processes the clauses.
508  for (unsigned i = I.getNumClauses(); i != 0; --i) {
509    Value *Val = I.getClause(i - 1);
510    if (I.isCatch(i - 1)) {
511      MMI.addCatchTypeInfo(MBB,
512                           dyn_cast<GlobalVariable>(Val->stripPointerCasts()));
513    } else {
514      // Add filters in a list.
515      Constant *CVal = cast<Constant>(Val);
516      SmallVector<const GlobalVariable*, 4> FilterList;
517      for (User::op_iterator
518             II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II)
519        FilterList.push_back(cast<GlobalVariable>((*II)->stripPointerCasts()));
520
521      MMI.addFilterTypeInfo(MBB, FilterList);
522    }
523  }
524}
525