FastISel.cpp revision 5ed17ae92a9239c2ff7d3ba494bf96651598ee7a
1///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the FastISel class.
11//
12// "Fast" instruction selection is designed to emit very poor code quickly.
13// Also, it is not designed to be able to do much lowering, so most illegal
14// types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
15// also not intended to be able to do much optimization, except in a few cases
16// where doing optimizations reduces overall compile time.  For example, folding
17// constants into immediate fields is often done, because it's cheap and it
18// reduces the number of instructions later phases have to examine.
19//
20// "Fast" instruction selection is able to fail gracefully and transfer
21// control to the SelectionDAG selector for operations that it doesn't
22// support.  In many cases, this allows us to avoid duplicating a lot of
23// the complicated lowering logic that SelectionDAG currently has.
24//
25// The intended use for "fast" instruction selection is "-O0" mode
26// compilation, where the quality of the generated code is irrelevant when
27// weighed against the speed at which the code can be generated.  Also,
28// at -O0, the LLVM optimizers are not running, and this makes the
29// compile time of codegen a much higher portion of the overall compile
30// time.  Despite its limitations, "fast" instruction selection is able to
31// handle enough code on its own to provide noticeable overall speedups
32// in -O0 compiles.
33//
34// Basic operations are supported in a target-independent way, by reading
35// the same instruction descriptions that the SelectionDAG selector reads,
36// and identifying simple arithmetic operations that can be directly selected
37// from simple operators.  More complicated operations currently require
38// target-specific code.
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Function.h"
43#include "llvm/GlobalVariable.h"
44#include "llvm/Instructions.h"
45#include "llvm/IntrinsicInst.h"
46#include "llvm/CodeGen/FastISel.h"
47#include "llvm/CodeGen/MachineInstrBuilder.h"
48#include "llvm/CodeGen/MachineModuleInfo.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/DwarfWriter.h"
51#include "llvm/Analysis/DebugInfo.h"
52#include "llvm/Target/TargetData.h"
53#include "llvm/Target/TargetInstrInfo.h"
54#include "llvm/Target/TargetLowering.h"
55#include "llvm/Target/TargetMachine.h"
56#include "SelectionDAGBuilder.h"
57#include "FunctionLoweringInfo.h"
58using namespace llvm;
59
60unsigned FastISel::getRegForValue(Value *V) {
61  EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
62  // Don't handle non-simple values in FastISel.
63  if (!RealVT.isSimple())
64    return 0;
65
66  // Ignore illegal types. We must do this before looking up the value
67  // in ValueMap because Arguments are given virtual registers regardless
68  // of whether FastISel can handle them.
69  MVT VT = RealVT.getSimpleVT();
70  if (!TLI.isTypeLegal(VT)) {
71    // Promote MVT::i1 to a legal type though, because it's common and easy.
72    if (VT == MVT::i1)
73      VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
74    else
75      return 0;
76  }
77
78  // Look up the value to see if we already have a register for it. We
79  // cache values defined by Instructions across blocks, and other values
80  // only locally. This is because Instructions already have the SSA
81  // def-dominates-use requirement enforced.
82  if (ValueMap.count(V))
83    return ValueMap[V];
84  unsigned Reg = LocalValueMap[V];
85  if (Reg != 0)
86    return Reg;
87
88  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
89    if (CI->getValue().getActiveBits() <= 64)
90      Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
91  } else if (isa<AllocaInst>(V)) {
92    Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
93  } else if (isa<ConstantPointerNull>(V)) {
94    // Translate this as an integer zero so that it can be
95    // local-CSE'd with actual integer zeros.
96    Reg =
97      getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
98  } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
99    Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
100
101    if (!Reg) {
102      const APFloat &Flt = CF->getValueAPF();
103      EVT IntVT = TLI.getPointerTy();
104
105      uint64_t x[2];
106      uint32_t IntBitWidth = IntVT.getSizeInBits();
107      bool isExact;
108      (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
109                                APFloat::rmTowardZero, &isExact);
110      if (isExact) {
111        APInt IntVal(IntBitWidth, 2, x);
112
113        unsigned IntegerReg =
114          getRegForValue(ConstantInt::get(V->getContext(), IntVal));
115        if (IntegerReg != 0)
116          Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
117      }
118    }
119  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
120    if (!SelectOperator(CE, CE->getOpcode())) return 0;
121    Reg = LocalValueMap[CE];
122  } else if (isa<UndefValue>(V)) {
123    Reg = createResultReg(TLI.getRegClassFor(VT));
124    BuildMI(MBB, DL, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
125  }
126
127  // If target-independent code couldn't handle the value, give target-specific
128  // code a try.
129  if (!Reg && isa<Constant>(V))
130    Reg = TargetMaterializeConstant(cast<Constant>(V));
131
132  // Don't cache constant materializations in the general ValueMap.
133  // To do so would require tracking what uses they dominate.
134  if (Reg != 0)
135    LocalValueMap[V] = Reg;
136  return Reg;
137}
138
139unsigned FastISel::lookUpRegForValue(Value *V) {
140  // Look up the value to see if we already have a register for it. We
141  // cache values defined by Instructions across blocks, and other values
142  // only locally. This is because Instructions already have the SSA
143  // def-dominatess-use requirement enforced.
144  if (ValueMap.count(V))
145    return ValueMap[V];
146  return LocalValueMap[V];
147}
148
149/// UpdateValueMap - Update the value map to include the new mapping for this
150/// instruction, or insert an extra copy to get the result in a previous
151/// determined register.
152/// NOTE: This is only necessary because we might select a block that uses
153/// a value before we select the block that defines the value.  It might be
154/// possible to fix this by selecting blocks in reverse postorder.
155unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
156  if (!isa<Instruction>(I)) {
157    LocalValueMap[I] = Reg;
158    return Reg;
159  }
160
161  unsigned &AssignedReg = ValueMap[I];
162  if (AssignedReg == 0)
163    AssignedReg = Reg;
164  else if (Reg != AssignedReg) {
165    const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
166    TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
167                     Reg, RegClass, RegClass);
168  }
169  return AssignedReg;
170}
171
172unsigned FastISel::getRegForGEPIndex(Value *Idx) {
173  unsigned IdxN = getRegForValue(Idx);
174  if (IdxN == 0)
175    // Unhandled operand. Halt "fast" selection and bail.
176    return 0;
177
178  // If the index is smaller or larger than intptr_t, truncate or extend it.
179  MVT PtrVT = TLI.getPointerTy();
180  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
181  if (IdxVT.bitsLT(PtrVT))
182    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
183  else if (IdxVT.bitsGT(PtrVT))
184    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
185  return IdxN;
186}
187
188/// SelectBinaryOp - Select and emit code for a binary operator instruction,
189/// which has an opcode which directly corresponds to the given ISD opcode.
190///
191bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
192  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
193  if (VT == MVT::Other || !VT.isSimple())
194    // Unhandled type. Halt "fast" selection and bail.
195    return false;
196
197  // We only handle legal types. For example, on x86-32 the instruction
198  // selector contains all of the 64-bit instructions from x86-64,
199  // under the assumption that i64 won't be used if the target doesn't
200  // support it.
201  if (!TLI.isTypeLegal(VT)) {
202    // MVT::i1 is special. Allow AND, OR, or XOR because they
203    // don't require additional zeroing, which makes them easy.
204    if (VT == MVT::i1 &&
205        (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
206         ISDOpcode == ISD::XOR))
207      VT = TLI.getTypeToTransformTo(I->getContext(), VT);
208    else
209      return false;
210  }
211
212  unsigned Op0 = getRegForValue(I->getOperand(0));
213  if (Op0 == 0)
214    // Unhandled operand. Halt "fast" selection and bail.
215    return false;
216
217  // Check if the second operand is a constant and handle it appropriately.
218  if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
219    unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
220                                     ISDOpcode, Op0, CI->getZExtValue());
221    if (ResultReg != 0) {
222      // We successfully emitted code for the given LLVM Instruction.
223      UpdateValueMap(I, ResultReg);
224      return true;
225    }
226  }
227
228  // Check if the second operand is a constant float.
229  if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
230    unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
231                                     ISDOpcode, Op0, CF);
232    if (ResultReg != 0) {
233      // We successfully emitted code for the given LLVM Instruction.
234      UpdateValueMap(I, ResultReg);
235      return true;
236    }
237  }
238
239  unsigned Op1 = getRegForValue(I->getOperand(1));
240  if (Op1 == 0)
241    // Unhandled operand. Halt "fast" selection and bail.
242    return false;
243
244  // Now we have both operands in registers. Emit the instruction.
245  unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
246                                   ISDOpcode, Op0, Op1);
247  if (ResultReg == 0)
248    // Target-specific code wasn't able to find a machine opcode for
249    // the given ISD opcode and type. Halt "fast" selection and bail.
250    return false;
251
252  // We successfully emitted code for the given LLVM Instruction.
253  UpdateValueMap(I, ResultReg);
254  return true;
255}
256
257bool FastISel::SelectGetElementPtr(User *I) {
258  unsigned N = getRegForValue(I->getOperand(0));
259  if (N == 0)
260    // Unhandled operand. Halt "fast" selection and bail.
261    return false;
262
263  const Type *Ty = I->getOperand(0)->getType();
264  MVT VT = TLI.getPointerTy();
265  for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
266       OI != E; ++OI) {
267    Value *Idx = *OI;
268    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
269      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
270      if (Field) {
271        // N = N + Offset
272        uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
273        // FIXME: This can be optimized by combining the add with a
274        // subsequent one.
275        N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
276        if (N == 0)
277          // Unhandled operand. Halt "fast" selection and bail.
278          return false;
279      }
280      Ty = StTy->getElementType(Field);
281    } else {
282      Ty = cast<SequentialType>(Ty)->getElementType();
283
284      // If this is a constant subscript, handle it quickly.
285      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
286        if (CI->getZExtValue() == 0) continue;
287        uint64_t Offs =
288          TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
289        N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
290        if (N == 0)
291          // Unhandled operand. Halt "fast" selection and bail.
292          return false;
293        continue;
294      }
295
296      // N = N + Idx * ElementSize;
297      uint64_t ElementSize = TD.getTypeAllocSize(Ty);
298      unsigned IdxN = getRegForGEPIndex(Idx);
299      if (IdxN == 0)
300        // Unhandled operand. Halt "fast" selection and bail.
301        return false;
302
303      if (ElementSize != 1) {
304        IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
305        if (IdxN == 0)
306          // Unhandled operand. Halt "fast" selection and bail.
307          return false;
308      }
309      N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
310      if (N == 0)
311        // Unhandled operand. Halt "fast" selection and bail.
312        return false;
313    }
314  }
315
316  // We successfully emitted code for the given LLVM Instruction.
317  UpdateValueMap(I, N);
318  return true;
319}
320
321bool FastISel::SelectCall(User *I) {
322  Function *F = cast<CallInst>(I)->getCalledFunction();
323  if (!F) return false;
324
325  unsigned IID = F->getIntrinsicID();
326  switch (IID) {
327  default: break;
328  case Intrinsic::dbg_declare: {
329    DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
330    if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None)||!DW
331        || !DW->ShouldEmitDwarfDebug())
332      return true;
333
334    Value *Address = DI->getAddress();
335    AllocaInst *AI = dyn_cast<AllocaInst>(Address);
336    // Don't handle byval struct arguments or VLAs, for example.
337    if (!AI) break;
338    DenseMap<const AllocaInst*, int>::iterator SI =
339      StaticAllocaMap.find(AI);
340    if (SI == StaticAllocaMap.end()) break; // VLAs.
341    int FI = SI->second;
342    if (MMI) {
343      if (MDNode *Dbg = DI->getMetadata("dbg"))
344        MMI->setVariableDbgInfo(DI->getVariable(), FI, Dbg);
345    }
346    // Building the map above is target independent.  Generating DEBUG_VALUE
347    // inline is target dependent; do this now.
348    (void)TargetSelectInstruction(cast<Instruction>(I));
349    return true;
350  }
351  case Intrinsic::eh_exception: {
352    EVT VT = TLI.getValueType(I->getType());
353    switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
354    default: break;
355    case TargetLowering::Expand: {
356      assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
357      unsigned Reg = TLI.getExceptionAddressRegister();
358      const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
359      unsigned ResultReg = createResultReg(RC);
360      bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
361                                           Reg, RC, RC);
362      assert(InsertedCopy && "Can't copy address registers!");
363      InsertedCopy = InsertedCopy;
364      UpdateValueMap(I, ResultReg);
365      return true;
366    }
367    }
368    break;
369  }
370  case Intrinsic::eh_selector: {
371    EVT VT = TLI.getValueType(I->getType());
372    switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
373    default: break;
374    case TargetLowering::Expand: {
375      if (MMI) {
376        if (MBB->isLandingPad())
377          AddCatchInfo(*cast<CallInst>(I), MMI, MBB);
378        else {
379#ifndef NDEBUG
380          CatchInfoLost.insert(cast<CallInst>(I));
381#endif
382          // FIXME: Mark exception selector register as live in.  Hack for PR1508.
383          unsigned Reg = TLI.getExceptionSelectorRegister();
384          if (Reg) MBB->addLiveIn(Reg);
385        }
386
387        unsigned Reg = TLI.getExceptionSelectorRegister();
388        EVT SrcVT = TLI.getPointerTy();
389        const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
390        unsigned ResultReg = createResultReg(RC);
391        bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
392                                             RC, RC);
393        assert(InsertedCopy && "Can't copy address registers!");
394        InsertedCopy = InsertedCopy;
395
396        // Cast the register to the type of the selector.
397        if (SrcVT.bitsGT(MVT::i32))
398          ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
399                                 ResultReg);
400        else if (SrcVT.bitsLT(MVT::i32))
401          ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
402                                 ISD::SIGN_EXTEND, ResultReg);
403        if (ResultReg == 0)
404          // Unhandled operand. Halt "fast" selection and bail.
405          return false;
406
407        UpdateValueMap(I, ResultReg);
408      } else {
409        unsigned ResultReg =
410          getRegForValue(Constant::getNullValue(I->getType()));
411        UpdateValueMap(I, ResultReg);
412      }
413      return true;
414    }
415    }
416    break;
417  }
418  }
419  return false;
420}
421
422bool FastISel::SelectCast(User *I, unsigned Opcode) {
423  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
424  EVT DstVT = TLI.getValueType(I->getType());
425
426  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
427      DstVT == MVT::Other || !DstVT.isSimple())
428    // Unhandled type. Halt "fast" selection and bail.
429    return false;
430
431  // Check if the destination type is legal. Or as a special case,
432  // it may be i1 if we're doing a truncate because that's
433  // easy and somewhat common.
434  if (!TLI.isTypeLegal(DstVT))
435    if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
436      // Unhandled type. Halt "fast" selection and bail.
437      return false;
438
439  // Check if the source operand is legal. Or as a special case,
440  // it may be i1 if we're doing zero-extension because that's
441  // easy and somewhat common.
442  if (!TLI.isTypeLegal(SrcVT))
443    if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
444      // Unhandled type. Halt "fast" selection and bail.
445      return false;
446
447  unsigned InputReg = getRegForValue(I->getOperand(0));
448  if (!InputReg)
449    // Unhandled operand.  Halt "fast" selection and bail.
450    return false;
451
452  // If the operand is i1, arrange for the high bits in the register to be zero.
453  if (SrcVT == MVT::i1) {
454   SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
455   InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
456   if (!InputReg)
457     return false;
458  }
459  // If the result is i1, truncate to the target's type for i1 first.
460  if (DstVT == MVT::i1)
461    DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
462
463  unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
464                                  DstVT.getSimpleVT(),
465                                  Opcode,
466                                  InputReg);
467  if (!ResultReg)
468    return false;
469
470  UpdateValueMap(I, ResultReg);
471  return true;
472}
473
474bool FastISel::SelectBitCast(User *I) {
475  // If the bitcast doesn't change the type, just use the operand value.
476  if (I->getType() == I->getOperand(0)->getType()) {
477    unsigned Reg = getRegForValue(I->getOperand(0));
478    if (Reg == 0)
479      return false;
480    UpdateValueMap(I, Reg);
481    return true;
482  }
483
484  // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
485  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
486  EVT DstVT = TLI.getValueType(I->getType());
487
488  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
489      DstVT == MVT::Other || !DstVT.isSimple() ||
490      !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
491    // Unhandled type. Halt "fast" selection and bail.
492    return false;
493
494  unsigned Op0 = getRegForValue(I->getOperand(0));
495  if (Op0 == 0)
496    // Unhandled operand. Halt "fast" selection and bail.
497    return false;
498
499  // First, try to perform the bitcast by inserting a reg-reg copy.
500  unsigned ResultReg = 0;
501  if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
502    TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
503    TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
504    ResultReg = createResultReg(DstClass);
505
506    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
507                                         Op0, DstClass, SrcClass);
508    if (!InsertedCopy)
509      ResultReg = 0;
510  }
511
512  // If the reg-reg copy failed, select a BIT_CONVERT opcode.
513  if (!ResultReg)
514    ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
515                           ISD::BIT_CONVERT, Op0);
516
517  if (!ResultReg)
518    return false;
519
520  UpdateValueMap(I, ResultReg);
521  return true;
522}
523
524bool
525FastISel::SelectInstruction(Instruction *I) {
526  // First, try doing target-independent selection.
527  if (SelectOperator(I, I->getOpcode()))
528    return true;
529
530  // Next, try calling the target to attempt to handle the instruction.
531  if (TargetSelectInstruction(I))
532    return true;
533
534  return false;
535}
536
537/// FastEmitBranch - Emit an unconditional branch to the given block,
538/// unless it is the immediate (fall-through) successor, and update
539/// the CFG.
540void
541FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
542  if (MBB->isLayoutSuccessor(MSucc)) {
543    // The unconditional fall-through case, which needs no instructions.
544  } else {
545    // The unconditional branch case.
546    TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
547  }
548  MBB->addSuccessor(MSucc);
549}
550
551/// SelectFNeg - Emit an FNeg operation.
552///
553bool
554FastISel::SelectFNeg(User *I) {
555  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
556  if (OpReg == 0) return false;
557
558  // If the target has ISD::FNEG, use it.
559  EVT VT = TLI.getValueType(I->getType());
560  unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
561                                  ISD::FNEG, OpReg);
562  if (ResultReg != 0) {
563    UpdateValueMap(I, ResultReg);
564    return true;
565  }
566
567  // Bitcast the value to integer, twiddle the sign bit with xor,
568  // and then bitcast it back to floating-point.
569  if (VT.getSizeInBits() > 64) return false;
570  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
571  if (!TLI.isTypeLegal(IntVT))
572    return false;
573
574  unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
575                               ISD::BIT_CONVERT, OpReg);
576  if (IntReg == 0)
577    return false;
578
579  unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, IntReg,
580                                       UINT64_C(1) << (VT.getSizeInBits()-1),
581                                       IntVT.getSimpleVT());
582  if (IntResultReg == 0)
583    return false;
584
585  ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
586                         ISD::BIT_CONVERT, IntResultReg);
587  if (ResultReg == 0)
588    return false;
589
590  UpdateValueMap(I, ResultReg);
591  return true;
592}
593
594bool
595FastISel::SelectOperator(User *I, unsigned Opcode) {
596  switch (Opcode) {
597  case Instruction::Add:
598    return SelectBinaryOp(I, ISD::ADD);
599  case Instruction::FAdd:
600    return SelectBinaryOp(I, ISD::FADD);
601  case Instruction::Sub:
602    return SelectBinaryOp(I, ISD::SUB);
603  case Instruction::FSub:
604    // FNeg is currently represented in LLVM IR as a special case of FSub.
605    if (BinaryOperator::isFNeg(I))
606      return SelectFNeg(I);
607    return SelectBinaryOp(I, ISD::FSUB);
608  case Instruction::Mul:
609    return SelectBinaryOp(I, ISD::MUL);
610  case Instruction::FMul:
611    return SelectBinaryOp(I, ISD::FMUL);
612  case Instruction::SDiv:
613    return SelectBinaryOp(I, ISD::SDIV);
614  case Instruction::UDiv:
615    return SelectBinaryOp(I, ISD::UDIV);
616  case Instruction::FDiv:
617    return SelectBinaryOp(I, ISD::FDIV);
618  case Instruction::SRem:
619    return SelectBinaryOp(I, ISD::SREM);
620  case Instruction::URem:
621    return SelectBinaryOp(I, ISD::UREM);
622  case Instruction::FRem:
623    return SelectBinaryOp(I, ISD::FREM);
624  case Instruction::Shl:
625    return SelectBinaryOp(I, ISD::SHL);
626  case Instruction::LShr:
627    return SelectBinaryOp(I, ISD::SRL);
628  case Instruction::AShr:
629    return SelectBinaryOp(I, ISD::SRA);
630  case Instruction::And:
631    return SelectBinaryOp(I, ISD::AND);
632  case Instruction::Or:
633    return SelectBinaryOp(I, ISD::OR);
634  case Instruction::Xor:
635    return SelectBinaryOp(I, ISD::XOR);
636
637  case Instruction::GetElementPtr:
638    return SelectGetElementPtr(I);
639
640  case Instruction::Br: {
641    BranchInst *BI = cast<BranchInst>(I);
642
643    if (BI->isUnconditional()) {
644      BasicBlock *LLVMSucc = BI->getSuccessor(0);
645      MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
646      FastEmitBranch(MSucc);
647      return true;
648    }
649
650    // Conditional branches are not handed yet.
651    // Halt "fast" selection and bail.
652    return false;
653  }
654
655  case Instruction::Unreachable:
656    // Nothing to emit.
657    return true;
658
659  case Instruction::PHI:
660    // PHI nodes are already emitted.
661    return true;
662
663  case Instruction::Alloca:
664    // FunctionLowering has the static-sized case covered.
665    if (StaticAllocaMap.count(cast<AllocaInst>(I)))
666      return true;
667
668    // Dynamic-sized alloca is not handled yet.
669    return false;
670
671  case Instruction::Call:
672    return SelectCall(I);
673
674  case Instruction::BitCast:
675    return SelectBitCast(I);
676
677  case Instruction::FPToSI:
678    return SelectCast(I, ISD::FP_TO_SINT);
679  case Instruction::ZExt:
680    return SelectCast(I, ISD::ZERO_EXTEND);
681  case Instruction::SExt:
682    return SelectCast(I, ISD::SIGN_EXTEND);
683  case Instruction::Trunc:
684    return SelectCast(I, ISD::TRUNCATE);
685  case Instruction::SIToFP:
686    return SelectCast(I, ISD::SINT_TO_FP);
687
688  case Instruction::IntToPtr: // Deliberate fall-through.
689  case Instruction::PtrToInt: {
690    EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
691    EVT DstVT = TLI.getValueType(I->getType());
692    if (DstVT.bitsGT(SrcVT))
693      return SelectCast(I, ISD::ZERO_EXTEND);
694    if (DstVT.bitsLT(SrcVT))
695      return SelectCast(I, ISD::TRUNCATE);
696    unsigned Reg = getRegForValue(I->getOperand(0));
697    if (Reg == 0) return false;
698    UpdateValueMap(I, Reg);
699    return true;
700  }
701
702  default:
703    // Unhandled instruction. Halt "fast" selection and bail.
704    return false;
705  }
706}
707
708FastISel::FastISel(MachineFunction &mf,
709                   MachineModuleInfo *mmi,
710                   DwarfWriter *dw,
711                   DenseMap<const Value *, unsigned> &vm,
712                   DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
713                   DenseMap<const AllocaInst *, int> &am
714#ifndef NDEBUG
715                   , SmallSet<Instruction*, 8> &cil
716#endif
717                   )
718  : MBB(0),
719    ValueMap(vm),
720    MBBMap(bm),
721    StaticAllocaMap(am),
722#ifndef NDEBUG
723    CatchInfoLost(cil),
724#endif
725    MF(mf),
726    MMI(mmi),
727    DW(dw),
728    MRI(MF.getRegInfo()),
729    MFI(*MF.getFrameInfo()),
730    MCP(*MF.getConstantPool()),
731    TM(MF.getTarget()),
732    TD(*TM.getTargetData()),
733    TII(*TM.getInstrInfo()),
734    TLI(*TM.getTargetLowering()) {
735}
736
737FastISel::~FastISel() {}
738
739unsigned FastISel::FastEmit_(MVT, MVT,
740                             unsigned) {
741  return 0;
742}
743
744unsigned FastISel::FastEmit_r(MVT, MVT,
745                              unsigned, unsigned /*Op0*/) {
746  return 0;
747}
748
749unsigned FastISel::FastEmit_rr(MVT, MVT,
750                               unsigned, unsigned /*Op0*/,
751                               unsigned /*Op0*/) {
752  return 0;
753}
754
755unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
756  return 0;
757}
758
759unsigned FastISel::FastEmit_f(MVT, MVT,
760                              unsigned, ConstantFP * /*FPImm*/) {
761  return 0;
762}
763
764unsigned FastISel::FastEmit_ri(MVT, MVT,
765                               unsigned, unsigned /*Op0*/,
766                               uint64_t /*Imm*/) {
767  return 0;
768}
769
770unsigned FastISel::FastEmit_rf(MVT, MVT,
771                               unsigned, unsigned /*Op0*/,
772                               ConstantFP * /*FPImm*/) {
773  return 0;
774}
775
776unsigned FastISel::FastEmit_rri(MVT, MVT,
777                                unsigned,
778                                unsigned /*Op0*/, unsigned /*Op1*/,
779                                uint64_t /*Imm*/) {
780  return 0;
781}
782
783/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
784/// to emit an instruction with an immediate operand using FastEmit_ri.
785/// If that fails, it materializes the immediate into a register and try
786/// FastEmit_rr instead.
787unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
788                                unsigned Op0, uint64_t Imm,
789                                MVT ImmType) {
790  // First check if immediate type is legal. If not, we can't use the ri form.
791  unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
792  if (ResultReg != 0)
793    return ResultReg;
794  unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
795  if (MaterialReg == 0)
796    return 0;
797  return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
798}
799
800/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
801/// to emit an instruction with a floating-point immediate operand using
802/// FastEmit_rf. If that fails, it materializes the immediate into a register
803/// and try FastEmit_rr instead.
804unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
805                                unsigned Op0, ConstantFP *FPImm,
806                                MVT ImmType) {
807  // First check if immediate type is legal. If not, we can't use the rf form.
808  unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
809  if (ResultReg != 0)
810    return ResultReg;
811
812  // Materialize the constant in a register.
813  unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
814  if (MaterialReg == 0) {
815    // If the target doesn't have a way to directly enter a floating-point
816    // value into a register, use an alternate approach.
817    // TODO: The current approach only supports floating-point constants
818    // that can be constructed by conversion from integer values. This should
819    // be replaced by code that creates a load from a constant-pool entry,
820    // which will require some target-specific work.
821    const APFloat &Flt = FPImm->getValueAPF();
822    EVT IntVT = TLI.getPointerTy();
823
824    uint64_t x[2];
825    uint32_t IntBitWidth = IntVT.getSizeInBits();
826    bool isExact;
827    (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
828                             APFloat::rmTowardZero, &isExact);
829    if (!isExact)
830      return 0;
831    APInt IntVal(IntBitWidth, 2, x);
832
833    unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
834                                     ISD::Constant, IntVal.getZExtValue());
835    if (IntegerReg == 0)
836      return 0;
837    MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
838                             ISD::SINT_TO_FP, IntegerReg);
839    if (MaterialReg == 0)
840      return 0;
841  }
842  return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
843}
844
845unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
846  return MRI.createVirtualRegister(RC);
847}
848
849unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
850                                 const TargetRegisterClass* RC) {
851  unsigned ResultReg = createResultReg(RC);
852  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
853
854  BuildMI(MBB, DL, II, ResultReg);
855  return ResultReg;
856}
857
858unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
859                                  const TargetRegisterClass *RC,
860                                  unsigned Op0) {
861  unsigned ResultReg = createResultReg(RC);
862  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
863
864  if (II.getNumDefs() >= 1)
865    BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
866  else {
867    BuildMI(MBB, DL, II).addReg(Op0);
868    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
869                                         II.ImplicitDefs[0], RC, RC);
870    if (!InsertedCopy)
871      ResultReg = 0;
872  }
873
874  return ResultReg;
875}
876
877unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
878                                   const TargetRegisterClass *RC,
879                                   unsigned Op0, unsigned Op1) {
880  unsigned ResultReg = createResultReg(RC);
881  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
882
883  if (II.getNumDefs() >= 1)
884    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
885  else {
886    BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
887    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
888                                         II.ImplicitDefs[0], RC, RC);
889    if (!InsertedCopy)
890      ResultReg = 0;
891  }
892  return ResultReg;
893}
894
895unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
896                                   const TargetRegisterClass *RC,
897                                   unsigned Op0, uint64_t Imm) {
898  unsigned ResultReg = createResultReg(RC);
899  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
900
901  if (II.getNumDefs() >= 1)
902    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
903  else {
904    BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
905    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
906                                         II.ImplicitDefs[0], RC, RC);
907    if (!InsertedCopy)
908      ResultReg = 0;
909  }
910  return ResultReg;
911}
912
913unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
914                                   const TargetRegisterClass *RC,
915                                   unsigned Op0, ConstantFP *FPImm) {
916  unsigned ResultReg = createResultReg(RC);
917  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
918
919  if (II.getNumDefs() >= 1)
920    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
921  else {
922    BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
923    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
924                                         II.ImplicitDefs[0], RC, RC);
925    if (!InsertedCopy)
926      ResultReg = 0;
927  }
928  return ResultReg;
929}
930
931unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
932                                    const TargetRegisterClass *RC,
933                                    unsigned Op0, unsigned Op1, uint64_t Imm) {
934  unsigned ResultReg = createResultReg(RC);
935  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
936
937  if (II.getNumDefs() >= 1)
938    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
939  else {
940    BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
941    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
942                                         II.ImplicitDefs[0], RC, RC);
943    if (!InsertedCopy)
944      ResultReg = 0;
945  }
946  return ResultReg;
947}
948
949unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
950                                  const TargetRegisterClass *RC,
951                                  uint64_t Imm) {
952  unsigned ResultReg = createResultReg(RC);
953  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
954
955  if (II.getNumDefs() >= 1)
956    BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
957  else {
958    BuildMI(MBB, DL, II).addImm(Imm);
959    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
960                                         II.ImplicitDefs[0], RC, RC);
961    if (!InsertedCopy)
962      ResultReg = 0;
963  }
964  return ResultReg;
965}
966
967unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
968                                              unsigned Op0, uint32_t Idx) {
969  const TargetRegisterClass* RC = MRI.getRegClass(Op0);
970
971  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
972  const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
973
974  if (II.getNumDefs() >= 1)
975    BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
976  else {
977    BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
978    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
979                                         II.ImplicitDefs[0], RC, RC);
980    if (!InsertedCopy)
981      ResultReg = 0;
982  }
983  return ResultReg;
984}
985
986/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
987/// with all but the least significant bit set to zero.
988unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) {
989  return FastEmit_ri(VT, VT, ISD::AND, Op, 1);
990}
991