FastISel.cpp revision e0406af64fb0083eedff8f208b20d5e67d9c2879
1///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the FastISel class.
11//
12// "Fast" instruction selection is designed to emit very poor code quickly.
13// Also, it is not designed to be able to do much lowering, so most illegal
14// types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
15// also not intended to be able to do much optimization, except in a few cases
16// where doing optimizations reduces overall compile time.  For example, folding
17// constants into immediate fields is often done, because it's cheap and it
18// reduces the number of instructions later phases have to examine.
19//
20// "Fast" instruction selection is able to fail gracefully and transfer
21// control to the SelectionDAG selector for operations that it doesn't
22// support.  In many cases, this allows us to avoid duplicating a lot of
23// the complicated lowering logic that SelectionDAG currently has.
24//
25// The intended use for "fast" instruction selection is "-O0" mode
26// compilation, where the quality of the generated code is irrelevant when
27// weighed against the speed at which the code can be generated.  Also,
28// at -O0, the LLVM optimizers are not running, and this makes the
29// compile time of codegen a much higher portion of the overall compile
30// time.  Despite its limitations, "fast" instruction selection is able to
31// handle enough code on its own to provide noticeable overall speedups
32// in -O0 compiles.
33//
34// Basic operations are supported in a target-independent way, by reading
35// the same instruction descriptions that the SelectionDAG selector reads,
36// and identifying simple arithmetic operations that can be directly selected
37// from simple operators.  More complicated operations currently require
38// target-specific code.
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Function.h"
43#include "llvm/GlobalVariable.h"
44#include "llvm/Instructions.h"
45#include "llvm/IntrinsicInst.h"
46#include "llvm/CodeGen/FastISel.h"
47#include "llvm/CodeGen/MachineInstrBuilder.h"
48#include "llvm/CodeGen/MachineModuleInfo.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/DebugLoc.h"
51#include "llvm/CodeGen/DwarfWriter.h"
52#include "llvm/Analysis/DebugInfo.h"
53#include "llvm/Target/TargetData.h"
54#include "llvm/Target/TargetInstrInfo.h"
55#include "llvm/Target/TargetLowering.h"
56#include "llvm/Target/TargetMachine.h"
57#include "SelectionDAGBuild.h"
58using namespace llvm;
59
60unsigned FastISel::getRegForValue(Value *V) {
61  MVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
62  // Don't handle non-simple values in FastISel.
63  if (!RealVT.isSimple())
64    return 0;
65
66  // Ignore illegal types. We must do this before looking up the value
67  // in ValueMap because Arguments are given virtual registers regardless
68  // of whether FastISel can handle them.
69  MVT::SimpleValueType VT = RealVT.getSimpleVT();
70  if (!TLI.isTypeLegal(VT)) {
71    // Promote MVT::i1 to a legal type though, because it's common and easy.
72    if (VT == MVT::i1)
73      VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
74    else
75      return 0;
76  }
77
78  // Look up the value to see if we already have a register for it. We
79  // cache values defined by Instructions across blocks, and other values
80  // only locally. This is because Instructions already have the SSA
81  // def-dominatess-use requirement enforced.
82  if (ValueMap.count(V))
83    return ValueMap[V];
84  unsigned Reg = LocalValueMap[V];
85  if (Reg != 0)
86    return Reg;
87
88  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
89    if (CI->getValue().getActiveBits() <= 64)
90      Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
91  } else if (isa<AllocaInst>(V)) {
92    Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
93  } else if (isa<ConstantPointerNull>(V)) {
94    // Translate this as an integer zero so that it can be
95    // local-CSE'd with actual integer zeros.
96    Reg = getRegForValue(Constant::getNullValue(TD.getIntPtrType()));
97  } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
98    Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
99
100    if (!Reg) {
101      const APFloat &Flt = CF->getValueAPF();
102      MVT IntVT = TLI.getPointerTy();
103
104      uint64_t x[2];
105      uint32_t IntBitWidth = IntVT.getSizeInBits();
106      bool isExact;
107      (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
108                                APFloat::rmTowardZero, &isExact);
109      if (isExact) {
110        APInt IntVal(IntBitWidth, 2, x);
111
112        unsigned IntegerReg = getRegForValue(ConstantInt::get(IntVal));
113        if (IntegerReg != 0)
114          Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
115      }
116    }
117  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
118    if (!SelectOperator(CE, CE->getOpcode())) return 0;
119    Reg = LocalValueMap[CE];
120  } else if (isa<UndefValue>(V)) {
121    Reg = createResultReg(TLI.getRegClassFor(VT));
122    BuildMI(MBB, DL, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
123  }
124
125  // If target-independent code couldn't handle the value, give target-specific
126  // code a try.
127  if (!Reg && isa<Constant>(V))
128    Reg = TargetMaterializeConstant(cast<Constant>(V));
129
130  // Don't cache constant materializations in the general ValueMap.
131  // To do so would require tracking what uses they dominate.
132  if (Reg != 0)
133    LocalValueMap[V] = Reg;
134  return Reg;
135}
136
137unsigned FastISel::lookUpRegForValue(Value *V) {
138  // Look up the value to see if we already have a register for it. We
139  // cache values defined by Instructions across blocks, and other values
140  // only locally. This is because Instructions already have the SSA
141  // def-dominatess-use requirement enforced.
142  if (ValueMap.count(V))
143    return ValueMap[V];
144  return LocalValueMap[V];
145}
146
147/// UpdateValueMap - Update the value map to include the new mapping for this
148/// instruction, or insert an extra copy to get the result in a previous
149/// determined register.
150/// NOTE: This is only necessary because we might select a block that uses
151/// a value before we select the block that defines the value.  It might be
152/// possible to fix this by selecting blocks in reverse postorder.
153unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
154  if (!isa<Instruction>(I)) {
155    LocalValueMap[I] = Reg;
156    return Reg;
157  }
158
159  unsigned &AssignedReg = ValueMap[I];
160  if (AssignedReg == 0)
161    AssignedReg = Reg;
162  else if (Reg != AssignedReg) {
163    const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
164    TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
165                     Reg, RegClass, RegClass);
166  }
167  return AssignedReg;
168}
169
170unsigned FastISel::getRegForGEPIndex(Value *Idx) {
171  unsigned IdxN = getRegForValue(Idx);
172  if (IdxN == 0)
173    // Unhandled operand. Halt "fast" selection and bail.
174    return 0;
175
176  // If the index is smaller or larger than intptr_t, truncate or extend it.
177  MVT PtrVT = TLI.getPointerTy();
178  MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
179  if (IdxVT.bitsLT(PtrVT))
180    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
181                      ISD::SIGN_EXTEND, IdxN);
182  else if (IdxVT.bitsGT(PtrVT))
183    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
184                      ISD::TRUNCATE, IdxN);
185  return IdxN;
186}
187
188/// SelectBinaryOp - Select and emit code for a binary operator instruction,
189/// which has an opcode which directly corresponds to the given ISD opcode.
190///
191bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
192  MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
193  if (VT == MVT::Other || !VT.isSimple())
194    // Unhandled type. Halt "fast" selection and bail.
195    return false;
196
197  // We only handle legal types. For example, on x86-32 the instruction
198  // selector contains all of the 64-bit instructions from x86-64,
199  // under the assumption that i64 won't be used if the target doesn't
200  // support it.
201  if (!TLI.isTypeLegal(VT)) {
202    // MVT::i1 is special. Allow AND, OR, or XOR because they
203    // don't require additional zeroing, which makes them easy.
204    if (VT == MVT::i1 &&
205        (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
206         ISDOpcode == ISD::XOR))
207      VT = TLI.getTypeToTransformTo(VT);
208    else
209      return false;
210  }
211
212  unsigned Op0 = getRegForValue(I->getOperand(0));
213  if (Op0 == 0)
214    // Unhandled operand. Halt "fast" selection and bail.
215    return false;
216
217  // Check if the second operand is a constant and handle it appropriately.
218  if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
219    unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
220                                     ISDOpcode, Op0, CI->getZExtValue());
221    if (ResultReg != 0) {
222      // We successfully emitted code for the given LLVM Instruction.
223      UpdateValueMap(I, ResultReg);
224      return true;
225    }
226  }
227
228  // Check if the second operand is a constant float.
229  if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
230    unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
231                                     ISDOpcode, Op0, CF);
232    if (ResultReg != 0) {
233      // We successfully emitted code for the given LLVM Instruction.
234      UpdateValueMap(I, ResultReg);
235      return true;
236    }
237  }
238
239  unsigned Op1 = getRegForValue(I->getOperand(1));
240  if (Op1 == 0)
241    // Unhandled operand. Halt "fast" selection and bail.
242    return false;
243
244  // Now we have both operands in registers. Emit the instruction.
245  unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
246                                   ISDOpcode, Op0, Op1);
247  if (ResultReg == 0)
248    // Target-specific code wasn't able to find a machine opcode for
249    // the given ISD opcode and type. Halt "fast" selection and bail.
250    return false;
251
252  // We successfully emitted code for the given LLVM Instruction.
253  UpdateValueMap(I, ResultReg);
254  return true;
255}
256
257bool FastISel::SelectGetElementPtr(User *I) {
258  unsigned N = getRegForValue(I->getOperand(0));
259  if (N == 0)
260    // Unhandled operand. Halt "fast" selection and bail.
261    return false;
262
263  const Type *Ty = I->getOperand(0)->getType();
264  MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
265  for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
266       OI != E; ++OI) {
267    Value *Idx = *OI;
268    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
269      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
270      if (Field) {
271        // N = N + Offset
272        uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
273        // FIXME: This can be optimized by combining the add with a
274        // subsequent one.
275        N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
276        if (N == 0)
277          // Unhandled operand. Halt "fast" selection and bail.
278          return false;
279      }
280      Ty = StTy->getElementType(Field);
281    } else {
282      Ty = cast<SequentialType>(Ty)->getElementType();
283
284      // If this is a constant subscript, handle it quickly.
285      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
286        if (CI->getZExtValue() == 0) continue;
287        uint64_t Offs =
288          TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
289        N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
290        if (N == 0)
291          // Unhandled operand. Halt "fast" selection and bail.
292          return false;
293        continue;
294      }
295
296      // N = N + Idx * ElementSize;
297      uint64_t ElementSize = TD.getTypeAllocSize(Ty);
298      unsigned IdxN = getRegForGEPIndex(Idx);
299      if (IdxN == 0)
300        // Unhandled operand. Halt "fast" selection and bail.
301        return false;
302
303      if (ElementSize != 1) {
304        IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
305        if (IdxN == 0)
306          // Unhandled operand. Halt "fast" selection and bail.
307          return false;
308      }
309      N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
310      if (N == 0)
311        // Unhandled operand. Halt "fast" selection and bail.
312        return false;
313    }
314  }
315
316  // We successfully emitted code for the given LLVM Instruction.
317  UpdateValueMap(I, N);
318  return true;
319}
320
321bool FastISel::SelectCall(User *I) {
322  Function *F = cast<CallInst>(I)->getCalledFunction();
323  if (!F) return false;
324
325  unsigned IID = F->getIntrinsicID();
326  switch (IID) {
327  default: break;
328  case Intrinsic::dbg_stoppoint: {
329    DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
330    if (DIDescriptor::ValidDebugInfo(SPI->getContext(), CodeGenOpt::None)) {
331      DICompileUnit CU(cast<GlobalVariable>(SPI->getContext()));
332      unsigned Line = SPI->getLine();
333      unsigned Col = SPI->getColumn();
334      unsigned Idx = MF.getOrCreateDebugLocID(CU.getGV(), Line, Col);
335      setCurDebugLoc(DebugLoc::get(Idx));
336    }
337    return true;
338  }
339  case Intrinsic::dbg_region_start: {
340    DbgRegionStartInst *RSI = cast<DbgRegionStartInst>(I);
341    if (DIDescriptor::ValidDebugInfo(RSI->getContext(), CodeGenOpt::None) &&
342        DW && DW->ShouldEmitDwarfDebug()) {
343      unsigned ID =
344        DW->RecordRegionStart(cast<GlobalVariable>(RSI->getContext()));
345      const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
346      BuildMI(MBB, DL, II).addImm(ID);
347    }
348    return true;
349  }
350  case Intrinsic::dbg_region_end: {
351    DbgRegionEndInst *REI = cast<DbgRegionEndInst>(I);
352    if (DIDescriptor::ValidDebugInfo(REI->getContext(), CodeGenOpt::None) &&
353        DW && DW->ShouldEmitDwarfDebug()) {
354     unsigned ID = 0;
355     DISubprogram Subprogram(cast<GlobalVariable>(REI->getContext()));
356     if (!Subprogram.isNull() && !Subprogram.describes(MF.getFunction())) {
357        // This is end of an inlined function.
358        const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
359        ID = DW->RecordInlinedFnEnd(Subprogram);
360        if (ID)
361          // Returned ID is 0 if this is unbalanced "end of inlined
362          // scope". This could happen if optimizer eats dbg intrinsics
363          // or "beginning of inlined scope" is not recoginized due to
364          // missing location info. In such cases, ignore this region.end.
365          BuildMI(MBB, DL, II).addImm(ID);
366      } else {
367        const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
368        ID =  DW->RecordRegionEnd(cast<GlobalVariable>(REI->getContext()));
369        BuildMI(MBB, DL, II).addImm(ID);
370      }
371    }
372    return true;
373  }
374  case Intrinsic::dbg_func_start: {
375    DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
376    Value *SP = FSI->getSubprogram();
377    if (!DIDescriptor::ValidDebugInfo(SP, CodeGenOpt::None))
378      return true;
379
380    // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is what
381    // (most?) gdb expects.
382    DebugLoc PrevLoc = DL;
383    DISubprogram Subprogram(cast<GlobalVariable>(SP));
384    DICompileUnit CompileUnit = Subprogram.getCompileUnit();
385
386    if (!Subprogram.describes(MF.getFunction())) {
387      // This is a beginning of an inlined function.
388
389      // If llvm.dbg.func.start is seen in a new block before any
390      // llvm.dbg.stoppoint intrinsic then the location info is unknown.
391      // FIXME : Why DebugLoc is reset at the beginning of each block ?
392      if (PrevLoc.isUnknown())
393        return true;
394      // Record the source line.
395      unsigned Line = Subprogram.getLineNumber();
396      setCurDebugLoc(DebugLoc::get(MF.getOrCreateDebugLocID(
397                                              CompileUnit.getGV(), Line, 0)));
398
399      if (DW && DW->ShouldEmitDwarfDebug()) {
400        DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
401        unsigned LabelID = DW->RecordInlinedFnStart(Subprogram,
402                                          DICompileUnit(PrevLocTpl.CompileUnit),
403                                          PrevLocTpl.Line,
404                                          PrevLocTpl.Col);
405        const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
406        BuildMI(MBB, DL, II).addImm(LabelID);
407      }
408    } else {
409      // Record the source line.
410      unsigned Line = Subprogram.getLineNumber();
411      MF.setDefaultDebugLoc(DebugLoc::get(MF.getOrCreateDebugLocID(
412                                              CompileUnit.getGV(), Line, 0)));
413      if (DW && DW->ShouldEmitDwarfDebug()) {
414        // llvm.dbg.func_start also defines beginning of function scope.
415        DW->RecordRegionStart(cast<GlobalVariable>(FSI->getSubprogram()));
416      }
417    }
418
419    return true;
420  }
421  case Intrinsic::dbg_declare: {
422    DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
423    Value *Variable = DI->getVariable();
424    if (DIDescriptor::ValidDebugInfo(Variable, CodeGenOpt::None) &&
425        DW && DW->ShouldEmitDwarfDebug()) {
426      // Determine the address of the declared object.
427      Value *Address = DI->getAddress();
428      if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
429        Address = BCI->getOperand(0);
430      AllocaInst *AI = dyn_cast<AllocaInst>(Address);
431      // Don't handle byval struct arguments or VLAs, for example.
432      if (!AI) break;
433      DenseMap<const AllocaInst*, int>::iterator SI =
434        StaticAllocaMap.find(AI);
435      if (SI == StaticAllocaMap.end()) break; // VLAs.
436      int FI = SI->second;
437
438      // Determine the debug globalvariable.
439      GlobalValue *GV = cast<GlobalVariable>(Variable);
440
441      // Build the DECLARE instruction.
442      const TargetInstrDesc &II = TII.get(TargetInstrInfo::DECLARE);
443      MachineInstr *DeclareMI
444        = BuildMI(MBB, DL, II).addFrameIndex(FI).addGlobalAddress(GV);
445      DIVariable DV(cast<GlobalVariable>(GV));
446      if (!DV.isNull()) {
447        // This is a local variable
448        DW->RecordVariableScope(DV, DeclareMI);
449      }
450    }
451    return true;
452  }
453  case Intrinsic::eh_exception: {
454    MVT VT = TLI.getValueType(I->getType());
455    switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
456    default: break;
457    case TargetLowering::Expand: {
458      assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
459      unsigned Reg = TLI.getExceptionAddressRegister();
460      const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
461      unsigned ResultReg = createResultReg(RC);
462      bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
463                                           Reg, RC, RC);
464      assert(InsertedCopy && "Can't copy address registers!");
465      InsertedCopy = InsertedCopy;
466      UpdateValueMap(I, ResultReg);
467      return true;
468    }
469    }
470    break;
471  }
472  case Intrinsic::eh_selector_i32:
473  case Intrinsic::eh_selector_i64: {
474    MVT VT = TLI.getValueType(I->getType());
475    switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
476    default: break;
477    case TargetLowering::Expand: {
478      MVT VT = (IID == Intrinsic::eh_selector_i32 ?
479                           MVT::i32 : MVT::i64);
480
481      if (MMI) {
482        if (MBB->isLandingPad())
483          AddCatchInfo(*cast<CallInst>(I), MMI, MBB);
484        else {
485#ifndef NDEBUG
486          CatchInfoLost.insert(cast<CallInst>(I));
487#endif
488          // FIXME: Mark exception selector register as live in.  Hack for PR1508.
489          unsigned Reg = TLI.getExceptionSelectorRegister();
490          if (Reg) MBB->addLiveIn(Reg);
491        }
492
493        unsigned Reg = TLI.getExceptionSelectorRegister();
494        const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
495        unsigned ResultReg = createResultReg(RC);
496        bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
497                                             Reg, RC, RC);
498        assert(InsertedCopy && "Can't copy address registers!");
499        InsertedCopy = InsertedCopy;
500        UpdateValueMap(I, ResultReg);
501      } else {
502        unsigned ResultReg =
503          getRegForValue(Constant::getNullValue(I->getType()));
504        UpdateValueMap(I, ResultReg);
505      }
506      return true;
507    }
508    }
509    break;
510  }
511  }
512  return false;
513}
514
515bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
516  MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
517  MVT DstVT = TLI.getValueType(I->getType());
518
519  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
520      DstVT == MVT::Other || !DstVT.isSimple())
521    // Unhandled type. Halt "fast" selection and bail.
522    return false;
523
524  // Check if the destination type is legal. Or as a special case,
525  // it may be i1 if we're doing a truncate because that's
526  // easy and somewhat common.
527  if (!TLI.isTypeLegal(DstVT))
528    if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
529      // Unhandled type. Halt "fast" selection and bail.
530      return false;
531
532  // Check if the source operand is legal. Or as a special case,
533  // it may be i1 if we're doing zero-extension because that's
534  // easy and somewhat common.
535  if (!TLI.isTypeLegal(SrcVT))
536    if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
537      // Unhandled type. Halt "fast" selection and bail.
538      return false;
539
540  unsigned InputReg = getRegForValue(I->getOperand(0));
541  if (!InputReg)
542    // Unhandled operand.  Halt "fast" selection and bail.
543    return false;
544
545  // If the operand is i1, arrange for the high bits in the register to be zero.
546  if (SrcVT == MVT::i1) {
547   SrcVT = TLI.getTypeToTransformTo(SrcVT);
548   InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
549   if (!InputReg)
550     return false;
551  }
552  // If the result is i1, truncate to the target's type for i1 first.
553  if (DstVT == MVT::i1)
554    DstVT = TLI.getTypeToTransformTo(DstVT);
555
556  unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
557                                  DstVT.getSimpleVT(),
558                                  Opcode,
559                                  InputReg);
560  if (!ResultReg)
561    return false;
562
563  UpdateValueMap(I, ResultReg);
564  return true;
565}
566
567bool FastISel::SelectBitCast(User *I) {
568  // If the bitcast doesn't change the type, just use the operand value.
569  if (I->getType() == I->getOperand(0)->getType()) {
570    unsigned Reg = getRegForValue(I->getOperand(0));
571    if (Reg == 0)
572      return false;
573    UpdateValueMap(I, Reg);
574    return true;
575  }
576
577  // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
578  MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
579  MVT DstVT = TLI.getValueType(I->getType());
580
581  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
582      DstVT == MVT::Other || !DstVT.isSimple() ||
583      !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
584    // Unhandled type. Halt "fast" selection and bail.
585    return false;
586
587  unsigned Op0 = getRegForValue(I->getOperand(0));
588  if (Op0 == 0)
589    // Unhandled operand. Halt "fast" selection and bail.
590    return false;
591
592  // First, try to perform the bitcast by inserting a reg-reg copy.
593  unsigned ResultReg = 0;
594  if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
595    TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
596    TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
597    ResultReg = createResultReg(DstClass);
598
599    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
600                                         Op0, DstClass, SrcClass);
601    if (!InsertedCopy)
602      ResultReg = 0;
603  }
604
605  // If the reg-reg copy failed, select a BIT_CONVERT opcode.
606  if (!ResultReg)
607    ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
608                           ISD::BIT_CONVERT, Op0);
609
610  if (!ResultReg)
611    return false;
612
613  UpdateValueMap(I, ResultReg);
614  return true;
615}
616
617bool
618FastISel::SelectInstruction(Instruction *I) {
619  return SelectOperator(I, I->getOpcode());
620}
621
622/// FastEmitBranch - Emit an unconditional branch to the given block,
623/// unless it is the immediate (fall-through) successor, and update
624/// the CFG.
625void
626FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
627  MachineFunction::iterator NextMBB =
628     next(MachineFunction::iterator(MBB));
629
630  if (MBB->isLayoutSuccessor(MSucc)) {
631    // The unconditional fall-through case, which needs no instructions.
632  } else {
633    // The unconditional branch case.
634    TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
635  }
636  MBB->addSuccessor(MSucc);
637}
638
639bool
640FastISel::SelectOperator(User *I, unsigned Opcode) {
641  switch (Opcode) {
642  case Instruction::Add:
643    return SelectBinaryOp(I, ISD::ADD);
644  case Instruction::FAdd:
645    return SelectBinaryOp(I, ISD::FADD);
646  case Instruction::Sub:
647    return SelectBinaryOp(I, ISD::SUB);
648  case Instruction::FSub:
649    return SelectBinaryOp(I, ISD::FSUB);
650  case Instruction::Mul:
651    return SelectBinaryOp(I, ISD::MUL);
652  case Instruction::FMul:
653    return SelectBinaryOp(I, ISD::FMUL);
654  case Instruction::SDiv:
655    return SelectBinaryOp(I, ISD::SDIV);
656  case Instruction::UDiv:
657    return SelectBinaryOp(I, ISD::UDIV);
658  case Instruction::FDiv:
659    return SelectBinaryOp(I, ISD::FDIV);
660  case Instruction::SRem:
661    return SelectBinaryOp(I, ISD::SREM);
662  case Instruction::URem:
663    return SelectBinaryOp(I, ISD::UREM);
664  case Instruction::FRem:
665    return SelectBinaryOp(I, ISD::FREM);
666  case Instruction::Shl:
667    return SelectBinaryOp(I, ISD::SHL);
668  case Instruction::LShr:
669    return SelectBinaryOp(I, ISD::SRL);
670  case Instruction::AShr:
671    return SelectBinaryOp(I, ISD::SRA);
672  case Instruction::And:
673    return SelectBinaryOp(I, ISD::AND);
674  case Instruction::Or:
675    return SelectBinaryOp(I, ISD::OR);
676  case Instruction::Xor:
677    return SelectBinaryOp(I, ISD::XOR);
678
679  case Instruction::GetElementPtr:
680    return SelectGetElementPtr(I);
681
682  case Instruction::Br: {
683    BranchInst *BI = cast<BranchInst>(I);
684
685    if (BI->isUnconditional()) {
686      BasicBlock *LLVMSucc = BI->getSuccessor(0);
687      MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
688      FastEmitBranch(MSucc);
689      return true;
690    }
691
692    // Conditional branches are not handed yet.
693    // Halt "fast" selection and bail.
694    return false;
695  }
696
697  case Instruction::Unreachable:
698    // Nothing to emit.
699    return true;
700
701  case Instruction::PHI:
702    // PHI nodes are already emitted.
703    return true;
704
705  case Instruction::Alloca:
706    // FunctionLowering has the static-sized case covered.
707    if (StaticAllocaMap.count(cast<AllocaInst>(I)))
708      return true;
709
710    // Dynamic-sized alloca is not handled yet.
711    return false;
712
713  case Instruction::Call:
714    return SelectCall(I);
715
716  case Instruction::BitCast:
717    return SelectBitCast(I);
718
719  case Instruction::FPToSI:
720    return SelectCast(I, ISD::FP_TO_SINT);
721  case Instruction::ZExt:
722    return SelectCast(I, ISD::ZERO_EXTEND);
723  case Instruction::SExt:
724    return SelectCast(I, ISD::SIGN_EXTEND);
725  case Instruction::Trunc:
726    return SelectCast(I, ISD::TRUNCATE);
727  case Instruction::SIToFP:
728    return SelectCast(I, ISD::SINT_TO_FP);
729
730  case Instruction::IntToPtr: // Deliberate fall-through.
731  case Instruction::PtrToInt: {
732    MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
733    MVT DstVT = TLI.getValueType(I->getType());
734    if (DstVT.bitsGT(SrcVT))
735      return SelectCast(I, ISD::ZERO_EXTEND);
736    if (DstVT.bitsLT(SrcVT))
737      return SelectCast(I, ISD::TRUNCATE);
738    unsigned Reg = getRegForValue(I->getOperand(0));
739    if (Reg == 0) return false;
740    UpdateValueMap(I, Reg);
741    return true;
742  }
743
744  default:
745    // Unhandled instruction. Halt "fast" selection and bail.
746    return false;
747  }
748}
749
750FastISel::FastISel(MachineFunction &mf,
751                   MachineModuleInfo *mmi,
752                   DwarfWriter *dw,
753                   DenseMap<const Value *, unsigned> &vm,
754                   DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
755                   DenseMap<const AllocaInst *, int> &am
756#ifndef NDEBUG
757                   , SmallSet<Instruction*, 8> &cil
758#endif
759                   )
760  : MBB(0),
761    ValueMap(vm),
762    MBBMap(bm),
763    StaticAllocaMap(am),
764#ifndef NDEBUG
765    CatchInfoLost(cil),
766#endif
767    MF(mf),
768    MMI(mmi),
769    DW(dw),
770    MRI(MF.getRegInfo()),
771    MFI(*MF.getFrameInfo()),
772    MCP(*MF.getConstantPool()),
773    TM(MF.getTarget()),
774    TD(*TM.getTargetData()),
775    TII(*TM.getInstrInfo()),
776    TLI(*TM.getTargetLowering()) {
777}
778
779FastISel::~FastISel() {}
780
781unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
782                             ISD::NodeType) {
783  return 0;
784}
785
786unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
787                              ISD::NodeType, unsigned /*Op0*/) {
788  return 0;
789}
790
791unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
792                               ISD::NodeType, unsigned /*Op0*/,
793                               unsigned /*Op0*/) {
794  return 0;
795}
796
797unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
798                              ISD::NodeType, uint64_t /*Imm*/) {
799  return 0;
800}
801
802unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
803                              ISD::NodeType, ConstantFP * /*FPImm*/) {
804  return 0;
805}
806
807unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
808                               ISD::NodeType, unsigned /*Op0*/,
809                               uint64_t /*Imm*/) {
810  return 0;
811}
812
813unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
814                               ISD::NodeType, unsigned /*Op0*/,
815                               ConstantFP * /*FPImm*/) {
816  return 0;
817}
818
819unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
820                                ISD::NodeType,
821                                unsigned /*Op0*/, unsigned /*Op1*/,
822                                uint64_t /*Imm*/) {
823  return 0;
824}
825
826/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
827/// to emit an instruction with an immediate operand using FastEmit_ri.
828/// If that fails, it materializes the immediate into a register and try
829/// FastEmit_rr instead.
830unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
831                                unsigned Op0, uint64_t Imm,
832                                MVT::SimpleValueType ImmType) {
833  // First check if immediate type is legal. If not, we can't use the ri form.
834  unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
835  if (ResultReg != 0)
836    return ResultReg;
837  unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
838  if (MaterialReg == 0)
839    return 0;
840  return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
841}
842
843/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
844/// to emit an instruction with a floating-point immediate operand using
845/// FastEmit_rf. If that fails, it materializes the immediate into a register
846/// and try FastEmit_rr instead.
847unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
848                                unsigned Op0, ConstantFP *FPImm,
849                                MVT::SimpleValueType ImmType) {
850  // First check if immediate type is legal. If not, we can't use the rf form.
851  unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
852  if (ResultReg != 0)
853    return ResultReg;
854
855  // Materialize the constant in a register.
856  unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
857  if (MaterialReg == 0) {
858    // If the target doesn't have a way to directly enter a floating-point
859    // value into a register, use an alternate approach.
860    // TODO: The current approach only supports floating-point constants
861    // that can be constructed by conversion from integer values. This should
862    // be replaced by code that creates a load from a constant-pool entry,
863    // which will require some target-specific work.
864    const APFloat &Flt = FPImm->getValueAPF();
865    MVT IntVT = TLI.getPointerTy();
866
867    uint64_t x[2];
868    uint32_t IntBitWidth = IntVT.getSizeInBits();
869    bool isExact;
870    (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
871                             APFloat::rmTowardZero, &isExact);
872    if (!isExact)
873      return 0;
874    APInt IntVal(IntBitWidth, 2, x);
875
876    unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
877                                     ISD::Constant, IntVal.getZExtValue());
878    if (IntegerReg == 0)
879      return 0;
880    MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
881                             ISD::SINT_TO_FP, IntegerReg);
882    if (MaterialReg == 0)
883      return 0;
884  }
885  return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
886}
887
888unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
889  return MRI.createVirtualRegister(RC);
890}
891
892unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
893                                 const TargetRegisterClass* RC) {
894  unsigned ResultReg = createResultReg(RC);
895  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
896
897  BuildMI(MBB, DL, II, ResultReg);
898  return ResultReg;
899}
900
901unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
902                                  const TargetRegisterClass *RC,
903                                  unsigned Op0) {
904  unsigned ResultReg = createResultReg(RC);
905  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
906
907  if (II.getNumDefs() >= 1)
908    BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
909  else {
910    BuildMI(MBB, DL, II).addReg(Op0);
911    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
912                                         II.ImplicitDefs[0], RC, RC);
913    if (!InsertedCopy)
914      ResultReg = 0;
915  }
916
917  return ResultReg;
918}
919
920unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
921                                   const TargetRegisterClass *RC,
922                                   unsigned Op0, unsigned Op1) {
923  unsigned ResultReg = createResultReg(RC);
924  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
925
926  if (II.getNumDefs() >= 1)
927    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
928  else {
929    BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
930    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
931                                         II.ImplicitDefs[0], RC, RC);
932    if (!InsertedCopy)
933      ResultReg = 0;
934  }
935  return ResultReg;
936}
937
938unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
939                                   const TargetRegisterClass *RC,
940                                   unsigned Op0, uint64_t Imm) {
941  unsigned ResultReg = createResultReg(RC);
942  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
943
944  if (II.getNumDefs() >= 1)
945    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
946  else {
947    BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
948    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
949                                         II.ImplicitDefs[0], RC, RC);
950    if (!InsertedCopy)
951      ResultReg = 0;
952  }
953  return ResultReg;
954}
955
956unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
957                                   const TargetRegisterClass *RC,
958                                   unsigned Op0, ConstantFP *FPImm) {
959  unsigned ResultReg = createResultReg(RC);
960  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
961
962  if (II.getNumDefs() >= 1)
963    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
964  else {
965    BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
966    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
967                                         II.ImplicitDefs[0], RC, RC);
968    if (!InsertedCopy)
969      ResultReg = 0;
970  }
971  return ResultReg;
972}
973
974unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
975                                    const TargetRegisterClass *RC,
976                                    unsigned Op0, unsigned Op1, uint64_t Imm) {
977  unsigned ResultReg = createResultReg(RC);
978  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
979
980  if (II.getNumDefs() >= 1)
981    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
982  else {
983    BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
984    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
985                                         II.ImplicitDefs[0], RC, RC);
986    if (!InsertedCopy)
987      ResultReg = 0;
988  }
989  return ResultReg;
990}
991
992unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
993                                  const TargetRegisterClass *RC,
994                                  uint64_t Imm) {
995  unsigned ResultReg = createResultReg(RC);
996  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
997
998  if (II.getNumDefs() >= 1)
999    BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
1000  else {
1001    BuildMI(MBB, DL, II).addImm(Imm);
1002    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
1003                                         II.ImplicitDefs[0], RC, RC);
1004    if (!InsertedCopy)
1005      ResultReg = 0;
1006  }
1007  return ResultReg;
1008}
1009
1010unsigned FastISel::FastEmitInst_extractsubreg(MVT::SimpleValueType RetVT,
1011                                              unsigned Op0, uint32_t Idx) {
1012  const TargetRegisterClass* RC = MRI.getRegClass(Op0);
1013
1014  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1015  const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
1016
1017  if (II.getNumDefs() >= 1)
1018    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
1019  else {
1020    BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
1021    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
1022                                         II.ImplicitDefs[0], RC, RC);
1023    if (!InsertedCopy)
1024      ResultReg = 0;
1025  }
1026  return ResultReg;
1027}
1028
1029/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1030/// with all but the least significant bit set to zero.
1031unsigned FastISel::FastEmitZExtFromI1(MVT::SimpleValueType VT, unsigned Op) {
1032  return FastEmit_ri(VT, VT, ISD::AND, Op, 1);
1033}
1034