FastISel.cpp revision d762908e06b093e67af95c89d8647a527e17e2c8
1///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the FastISel class.
11//
12// "Fast" instruction selection is designed to emit very poor code quickly.
13// Also, it is not designed to be able to do much lowering, so most illegal
14// types (e.g. i64 on 32-bit targets) and operations (e.g. calls) are not
15// supported. It is also not intended to be able to do much optimization,
16// except in a few cases where doing optimizations reduces overall compile
17// time (e.g. folding constants into immediate fields, because it's cheap
18// and it reduces the number of instructions later phases have to examine).
19//
20// "Fast" instruction selection is able to fail gracefully and transfer
21// control to the SelectionDAG selector for operations that it doesn't
22// support. In many cases, this allows us to avoid duplicating a lot of
23// the complicated lowering logic that SelectionDAG currently has.
24//
25// The intended use for "fast" instruction selection is "-O0" mode
26// compilation, where the quality of the generated code is irrelevant when
27// weighed against the speed at which the code can be generated. Also,
28// at -O0, the LLVM optimizers are not running, and this makes the
29// compile time of codegen a much higher portion of the overall compile
30// time. Despite its limitations, "fast" instruction selection is able to
31// handle enough code on its own to provide noticeable overall speedups
32// in -O0 compiles.
33//
34// Basic operations are supported in a target-independent way, by reading
35// the same instruction descriptions that the SelectionDAG selector reads,
36// and identifying simple arithmetic operations that can be directly selected
37// from simple operators. More complicated operations currently require
38// target-specific code.
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Function.h"
43#include "llvm/GlobalVariable.h"
44#include "llvm/Instructions.h"
45#include "llvm/IntrinsicInst.h"
46#include "llvm/CodeGen/FastISel.h"
47#include "llvm/CodeGen/MachineInstrBuilder.h"
48#include "llvm/CodeGen/MachineModuleInfo.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/Target/TargetData.h"
51#include "llvm/Target/TargetInstrInfo.h"
52#include "llvm/Target/TargetLowering.h"
53#include "llvm/Target/TargetMachine.h"
54using namespace llvm;
55
56unsigned FastISel::getRegForValue(Value *V) {
57  // Look up the value to see if we already have a register for it. We
58  // cache values defined by Instructions across blocks, and other values
59  // only locally. This is because Instructions already have the SSA
60  // def-dominatess-use requirement enforced.
61  if (ValueMap.count(V))
62    return ValueMap[V];
63  unsigned Reg = LocalValueMap[V];
64  if (Reg != 0)
65    return Reg;
66
67  MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT();
68
69  // Ignore illegal types.
70  if (!TLI.isTypeLegal(VT)) {
71    // Promote MVT::i1 to a legal type though, because it's common and easy.
72    if (VT == MVT::i1)
73      VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
74    else
75      return 0;
76  }
77
78  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
79    if (CI->getValue().getActiveBits() <= 64)
80      Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
81  } else if (isa<AllocaInst>(V)) {
82    Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
83  } else if (isa<ConstantPointerNull>(V)) {
84    Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
85  } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
86    Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
87
88    if (!Reg) {
89      const APFloat &Flt = CF->getValueAPF();
90      MVT IntVT = TLI.getPointerTy();
91
92      uint64_t x[2];
93      uint32_t IntBitWidth = IntVT.getSizeInBits();
94      if (!Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
95                                APFloat::rmTowardZero) != APFloat::opOK) {
96        APInt IntVal(IntBitWidth, 2, x);
97
98        unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
99                                         ISD::Constant, IntVal.getZExtValue());
100        if (IntegerReg != 0)
101          Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
102      }
103    }
104  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
105    if (!SelectOperator(CE, CE->getOpcode())) return 0;
106    Reg = LocalValueMap[CE];
107  } else if (isa<UndefValue>(V)) {
108    Reg = createResultReg(TLI.getRegClassFor(VT));
109    BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
110  }
111
112  // If target-independent code couldn't handle the value, give target-specific
113  // code a try.
114  if (!Reg && isa<Constant>(V))
115    Reg = TargetMaterializeConstant(cast<Constant>(V));
116
117  // Don't cache constant materializations in the general ValueMap.
118  // To do so would require tracking what uses they dominate.
119  if (Reg != 0)
120    LocalValueMap[V] = Reg;
121  return Reg;
122}
123
124unsigned FastISel::lookUpRegForValue(Value *V) {
125  // Look up the value to see if we already have a register for it. We
126  // cache values defined by Instructions across blocks, and other values
127  // only locally. This is because Instructions already have the SSA
128  // def-dominatess-use requirement enforced.
129  if (ValueMap.count(V))
130    return ValueMap[V];
131  return LocalValueMap[V];
132}
133
134/// UpdateValueMap - Update the value map to include the new mapping for this
135/// instruction, or insert an extra copy to get the result in a previous
136/// determined register.
137/// NOTE: This is only necessary because we might select a block that uses
138/// a value before we select the block that defines the value.  It might be
139/// possible to fix this by selecting blocks in reverse postorder.
140void FastISel::UpdateValueMap(Value* I, unsigned Reg) {
141  if (!isa<Instruction>(I)) {
142    LocalValueMap[I] = Reg;
143    return;
144  }
145  if (!ValueMap.count(I))
146    ValueMap[I] = Reg;
147  else
148    TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I],
149                     Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg));
150}
151
152/// SelectBinaryOp - Select and emit code for a binary operator instruction,
153/// which has an opcode which directly corresponds to the given ISD opcode.
154///
155bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
156  MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
157  if (VT == MVT::Other || !VT.isSimple())
158    // Unhandled type. Halt "fast" selection and bail.
159    return false;
160
161  // We only handle legal types. For example, on x86-32 the instruction
162  // selector contains all of the 64-bit instructions from x86-64,
163  // under the assumption that i64 won't be used if the target doesn't
164  // support it.
165  if (!TLI.isTypeLegal(VT)) {
166    // MVT::i1 is special. Allow AND, OR, or XOR because they
167    // don't require additional zeroing, which makes them easy.
168    if (VT == MVT::i1 &&
169        (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
170         ISDOpcode == ISD::XOR))
171      VT = TLI.getTypeToTransformTo(VT);
172    else
173      return false;
174  }
175
176  unsigned Op0 = getRegForValue(I->getOperand(0));
177  if (Op0 == 0)
178    // Unhandled operand. Halt "fast" selection and bail.
179    return false;
180
181  // Check if the second operand is a constant and handle it appropriately.
182  if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
183    unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
184                                     ISDOpcode, Op0, CI->getZExtValue());
185    if (ResultReg != 0) {
186      // We successfully emitted code for the given LLVM Instruction.
187      UpdateValueMap(I, ResultReg);
188      return true;
189    }
190  }
191
192  // Check if the second operand is a constant float.
193  if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
194    unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
195                                     ISDOpcode, Op0, CF);
196    if (ResultReg != 0) {
197      // We successfully emitted code for the given LLVM Instruction.
198      UpdateValueMap(I, ResultReg);
199      return true;
200    }
201  }
202
203  unsigned Op1 = getRegForValue(I->getOperand(1));
204  if (Op1 == 0)
205    // Unhandled operand. Halt "fast" selection and bail.
206    return false;
207
208  // Now we have both operands in registers. Emit the instruction.
209  unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
210                                   ISDOpcode, Op0, Op1);
211  if (ResultReg == 0)
212    // Target-specific code wasn't able to find a machine opcode for
213    // the given ISD opcode and type. Halt "fast" selection and bail.
214    return false;
215
216  // We successfully emitted code for the given LLVM Instruction.
217  UpdateValueMap(I, ResultReg);
218  return true;
219}
220
221bool FastISel::SelectGetElementPtr(User *I) {
222  unsigned N = getRegForValue(I->getOperand(0));
223  if (N == 0)
224    // Unhandled operand. Halt "fast" selection and bail.
225    return false;
226
227  const Type *Ty = I->getOperand(0)->getType();
228  MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
229  for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
230       OI != E; ++OI) {
231    Value *Idx = *OI;
232    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
233      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
234      if (Field) {
235        // N = N + Offset
236        uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
237        // FIXME: This can be optimized by combining the add with a
238        // subsequent one.
239        N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
240        if (N == 0)
241          // Unhandled operand. Halt "fast" selection and bail.
242          return false;
243      }
244      Ty = StTy->getElementType(Field);
245    } else {
246      Ty = cast<SequentialType>(Ty)->getElementType();
247
248      // If this is a constant subscript, handle it quickly.
249      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
250        if (CI->getZExtValue() == 0) continue;
251        uint64_t Offs =
252          TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
253        N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
254        if (N == 0)
255          // Unhandled operand. Halt "fast" selection and bail.
256          return false;
257        continue;
258      }
259
260      // N = N + Idx * ElementSize;
261      uint64_t ElementSize = TD.getABITypeSize(Ty);
262      unsigned IdxN = getRegForValue(Idx);
263      if (IdxN == 0)
264        // Unhandled operand. Halt "fast" selection and bail.
265        return false;
266
267      // If the index is smaller or larger than intptr_t, truncate or extend
268      // it.
269      MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
270      if (IdxVT.bitsLT(VT))
271        IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN);
272      else if (IdxVT.bitsGT(VT))
273        IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN);
274      if (IdxN == 0)
275        // Unhandled operand. Halt "fast" selection and bail.
276        return false;
277
278      if (ElementSize != 1) {
279        IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
280        if (IdxN == 0)
281          // Unhandled operand. Halt "fast" selection and bail.
282          return false;
283      }
284      N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
285      if (N == 0)
286        // Unhandled operand. Halt "fast" selection and bail.
287        return false;
288    }
289  }
290
291  // We successfully emitted code for the given LLVM Instruction.
292  UpdateValueMap(I, N);
293  return true;
294}
295
296bool FastISel::SelectCall(User *I) {
297  Function *F = cast<CallInst>(I)->getCalledFunction();
298  if (!F) return false;
299
300  unsigned IID = F->getIntrinsicID();
301  switch (IID) {
302  default: break;
303  case Intrinsic::dbg_stoppoint: {
304    DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
305    if (MMI && SPI->getContext() && MMI->Verify(SPI->getContext())) {
306      DebugInfoDesc *DD = MMI->getDescFor(SPI->getContext());
307      assert(DD && "Not a debug information descriptor");
308      const CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
309      unsigned SrcFile = MMI->RecordSource(CompileUnit);
310      unsigned Line = SPI->getLine();
311      unsigned Col = SPI->getColumn();
312      unsigned ID = MMI->RecordSourceLine(Line, Col, SrcFile);
313      const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
314      BuildMI(MBB, II).addImm(ID);
315    }
316    return true;
317  }
318  case Intrinsic::dbg_region_start: {
319    DbgRegionStartInst *RSI = cast<DbgRegionStartInst>(I);
320    if (MMI && RSI->getContext() && MMI->Verify(RSI->getContext())) {
321      unsigned ID = MMI->RecordRegionStart(RSI->getContext());
322      const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
323      BuildMI(MBB, II).addImm(ID);
324    }
325    return true;
326  }
327  case Intrinsic::dbg_region_end: {
328    DbgRegionEndInst *REI = cast<DbgRegionEndInst>(I);
329    if (MMI && REI->getContext() && MMI->Verify(REI->getContext())) {
330      unsigned ID = MMI->RecordRegionEnd(REI->getContext());
331      const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
332      BuildMI(MBB, II).addImm(ID);
333    }
334    return true;
335  }
336  case Intrinsic::dbg_func_start: {
337    if (!MMI) return true;
338    DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
339    Value *SP = FSI->getSubprogram();
340    if (SP && MMI->Verify(SP)) {
341      // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
342      // what (most?) gdb expects.
343      DebugInfoDesc *DD = MMI->getDescFor(SP);
344      assert(DD && "Not a debug information descriptor");
345      SubprogramDesc *Subprogram = cast<SubprogramDesc>(DD);
346      const CompileUnitDesc *CompileUnit = Subprogram->getFile();
347      unsigned SrcFile = MMI->RecordSource(CompileUnit);
348      // Record the source line but does create a label. It will be emitted
349      // at asm emission time.
350      MMI->RecordSourceLine(Subprogram->getLine(), 0, SrcFile);
351    }
352    return true;
353  }
354  case Intrinsic::dbg_declare: {
355    DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
356    Value *Variable = DI->getVariable();
357    if (MMI && Variable && MMI->Verify(Variable)) {
358      // Determine the address of the declared object.
359      Value *Address = DI->getAddress();
360      if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
361        Address = BCI->getOperand(0);
362      AllocaInst *AI = dyn_cast<AllocaInst>(Address);
363      // Don't handle byval struct arguments, for example.
364      if (!AI) break;
365      DenseMap<const AllocaInst*, int>::iterator SI =
366        StaticAllocaMap.find(AI);
367      assert(SI != StaticAllocaMap.end() && "Invalid dbg.declare!");
368      int FI = SI->second;
369
370      // Determine the debug globalvariable.
371      GlobalValue *GV = cast<GlobalVariable>(Variable);
372
373      // Build the DECLARE instruction.
374      const TargetInstrDesc &II = TII.get(TargetInstrInfo::DECLARE);
375      BuildMI(MBB, II).addFrameIndex(FI).addGlobalAddress(GV);
376    }
377    return true;
378  }
379  }
380  return false;
381}
382
383bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
384  MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
385  MVT DstVT = TLI.getValueType(I->getType());
386
387  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
388      DstVT == MVT::Other || !DstVT.isSimple() ||
389      !TLI.isTypeLegal(DstVT))
390    // Unhandled type. Halt "fast" selection and bail.
391    return false;
392
393  // Check if the source operand is legal. Or as a special case,
394  // it may be i1 if we're doing zero-extension because that's
395  // trivially easy and somewhat common.
396  if (!TLI.isTypeLegal(SrcVT)) {
397    if (SrcVT == MVT::i1 && Opcode == ISD::ZERO_EXTEND)
398      SrcVT = TLI.getTypeToTransformTo(SrcVT);
399    else
400      // Unhandled type. Halt "fast" selection and bail.
401      return false;
402  }
403
404  unsigned InputReg = getRegForValue(I->getOperand(0));
405  if (!InputReg)
406    // Unhandled operand.  Halt "fast" selection and bail.
407    return false;
408
409  unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
410                                  DstVT.getSimpleVT(),
411                                  Opcode,
412                                  InputReg);
413  if (!ResultReg)
414    return false;
415
416  UpdateValueMap(I, ResultReg);
417  return true;
418}
419
420bool FastISel::SelectBitCast(User *I) {
421  // If the bitcast doesn't change the type, just use the operand value.
422  if (I->getType() == I->getOperand(0)->getType()) {
423    unsigned Reg = getRegForValue(I->getOperand(0));
424    if (Reg == 0)
425      return false;
426    UpdateValueMap(I, Reg);
427    return true;
428  }
429
430  // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
431  MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
432  MVT DstVT = TLI.getValueType(I->getType());
433
434  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
435      DstVT == MVT::Other || !DstVT.isSimple() ||
436      !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
437    // Unhandled type. Halt "fast" selection and bail.
438    return false;
439
440  unsigned Op0 = getRegForValue(I->getOperand(0));
441  if (Op0 == 0)
442    // Unhandled operand. Halt "fast" selection and bail.
443    return false;
444
445  // First, try to perform the bitcast by inserting a reg-reg copy.
446  unsigned ResultReg = 0;
447  if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
448    TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
449    TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
450    ResultReg = createResultReg(DstClass);
451
452    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
453                                         Op0, DstClass, SrcClass);
454    if (!InsertedCopy)
455      ResultReg = 0;
456  }
457
458  // If the reg-reg copy failed, select a BIT_CONVERT opcode.
459  if (!ResultReg)
460    ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
461                           ISD::BIT_CONVERT, Op0);
462
463  if (!ResultReg)
464    return false;
465
466  UpdateValueMap(I, ResultReg);
467  return true;
468}
469
470bool
471FastISel::SelectInstruction(Instruction *I) {
472  return SelectOperator(I, I->getOpcode());
473}
474
475/// FastEmitBranch - Emit an unconditional branch to the given block,
476/// unless it is the immediate (fall-through) successor, and update
477/// the CFG.
478void
479FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
480  MachineFunction::iterator NextMBB =
481     next(MachineFunction::iterator(MBB));
482
483  if (MBB->isLayoutSuccessor(MSucc)) {
484    // The unconditional fall-through case, which needs no instructions.
485  } else {
486    // The unconditional branch case.
487    TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
488  }
489  MBB->addSuccessor(MSucc);
490}
491
492bool
493FastISel::SelectOperator(User *I, unsigned Opcode) {
494  switch (Opcode) {
495  case Instruction::Add: {
496    ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
497    return SelectBinaryOp(I, Opc);
498  }
499  case Instruction::Sub: {
500    ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
501    return SelectBinaryOp(I, Opc);
502  }
503  case Instruction::Mul: {
504    ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
505    return SelectBinaryOp(I, Opc);
506  }
507  case Instruction::SDiv:
508    return SelectBinaryOp(I, ISD::SDIV);
509  case Instruction::UDiv:
510    return SelectBinaryOp(I, ISD::UDIV);
511  case Instruction::FDiv:
512    return SelectBinaryOp(I, ISD::FDIV);
513  case Instruction::SRem:
514    return SelectBinaryOp(I, ISD::SREM);
515  case Instruction::URem:
516    return SelectBinaryOp(I, ISD::UREM);
517  case Instruction::FRem:
518    return SelectBinaryOp(I, ISD::FREM);
519  case Instruction::Shl:
520    return SelectBinaryOp(I, ISD::SHL);
521  case Instruction::LShr:
522    return SelectBinaryOp(I, ISD::SRL);
523  case Instruction::AShr:
524    return SelectBinaryOp(I, ISD::SRA);
525  case Instruction::And:
526    return SelectBinaryOp(I, ISD::AND);
527  case Instruction::Or:
528    return SelectBinaryOp(I, ISD::OR);
529  case Instruction::Xor:
530    return SelectBinaryOp(I, ISD::XOR);
531
532  case Instruction::GetElementPtr:
533    return SelectGetElementPtr(I);
534
535  case Instruction::Br: {
536    BranchInst *BI = cast<BranchInst>(I);
537
538    if (BI->isUnconditional()) {
539      BasicBlock *LLVMSucc = BI->getSuccessor(0);
540      MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
541      FastEmitBranch(MSucc);
542      return true;
543    }
544
545    // Conditional branches are not handed yet.
546    // Halt "fast" selection and bail.
547    return false;
548  }
549
550  case Instruction::Unreachable:
551    // Nothing to emit.
552    return true;
553
554  case Instruction::PHI:
555    // PHI nodes are already emitted.
556    return true;
557
558  case Instruction::Alloca:
559    // FunctionLowering has the static-sized case covered.
560    if (StaticAllocaMap.count(cast<AllocaInst>(I)))
561      return true;
562
563    // Dynamic-sized alloca is not handled yet.
564    return false;
565
566  case Instruction::Call:
567    return SelectCall(I);
568
569  case Instruction::BitCast:
570    return SelectBitCast(I);
571
572  case Instruction::FPToSI:
573    return SelectCast(I, ISD::FP_TO_SINT);
574  case Instruction::ZExt:
575    return SelectCast(I, ISD::ZERO_EXTEND);
576  case Instruction::SExt:
577    return SelectCast(I, ISD::SIGN_EXTEND);
578  case Instruction::Trunc:
579    return SelectCast(I, ISD::TRUNCATE);
580  case Instruction::SIToFP:
581    return SelectCast(I, ISD::SINT_TO_FP);
582
583  case Instruction::IntToPtr: // Deliberate fall-through.
584  case Instruction::PtrToInt: {
585    MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
586    MVT DstVT = TLI.getValueType(I->getType());
587    if (DstVT.bitsGT(SrcVT))
588      return SelectCast(I, ISD::ZERO_EXTEND);
589    if (DstVT.bitsLT(SrcVT))
590      return SelectCast(I, ISD::TRUNCATE);
591    unsigned Reg = getRegForValue(I->getOperand(0));
592    if (Reg == 0) return false;
593    UpdateValueMap(I, Reg);
594    return true;
595  }
596
597  default:
598    // Unhandled instruction. Halt "fast" selection and bail.
599    return false;
600  }
601}
602
603FastISel::FastISel(MachineFunction &mf,
604                   MachineModuleInfo *mmi,
605                   DenseMap<const Value *, unsigned> &vm,
606                   DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
607                   DenseMap<const AllocaInst *, int> &am)
608  : MBB(0),
609    ValueMap(vm),
610    MBBMap(bm),
611    StaticAllocaMap(am),
612    MF(mf),
613    MMI(mmi),
614    MRI(MF.getRegInfo()),
615    MFI(*MF.getFrameInfo()),
616    MCP(*MF.getConstantPool()),
617    TM(MF.getTarget()),
618    TD(*TM.getTargetData()),
619    TII(*TM.getInstrInfo()),
620    TLI(*TM.getTargetLowering()) {
621}
622
623FastISel::~FastISel() {}
624
625unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
626                             ISD::NodeType) {
627  return 0;
628}
629
630unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
631                              ISD::NodeType, unsigned /*Op0*/) {
632  return 0;
633}
634
635unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
636                               ISD::NodeType, unsigned /*Op0*/,
637                               unsigned /*Op0*/) {
638  return 0;
639}
640
641unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
642                              ISD::NodeType, uint64_t /*Imm*/) {
643  return 0;
644}
645
646unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
647                              ISD::NodeType, ConstantFP * /*FPImm*/) {
648  return 0;
649}
650
651unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
652                               ISD::NodeType, unsigned /*Op0*/,
653                               uint64_t /*Imm*/) {
654  return 0;
655}
656
657unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
658                               ISD::NodeType, unsigned /*Op0*/,
659                               ConstantFP * /*FPImm*/) {
660  return 0;
661}
662
663unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
664                                ISD::NodeType,
665                                unsigned /*Op0*/, unsigned /*Op1*/,
666                                uint64_t /*Imm*/) {
667  return 0;
668}
669
670/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
671/// to emit an instruction with an immediate operand using FastEmit_ri.
672/// If that fails, it materializes the immediate into a register and try
673/// FastEmit_rr instead.
674unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
675                                unsigned Op0, uint64_t Imm,
676                                MVT::SimpleValueType ImmType) {
677  // First check if immediate type is legal. If not, we can't use the ri form.
678  unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
679  if (ResultReg != 0)
680    return ResultReg;
681  unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
682  if (MaterialReg == 0)
683    return 0;
684  return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
685}
686
687/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
688/// to emit an instruction with a floating-point immediate operand using
689/// FastEmit_rf. If that fails, it materializes the immediate into a register
690/// and try FastEmit_rr instead.
691unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
692                                unsigned Op0, ConstantFP *FPImm,
693                                MVT::SimpleValueType ImmType) {
694  // First check if immediate type is legal. If not, we can't use the rf form.
695  unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
696  if (ResultReg != 0)
697    return ResultReg;
698
699  // Materialize the constant in a register.
700  unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
701  if (MaterialReg == 0) {
702    // If the target doesn't have a way to directly enter a floating-point
703    // value into a register, use an alternate approach.
704    // TODO: The current approach only supports floating-point constants
705    // that can be constructed by conversion from integer values. This should
706    // be replaced by code that creates a load from a constant-pool entry,
707    // which will require some target-specific work.
708    const APFloat &Flt = FPImm->getValueAPF();
709    MVT IntVT = TLI.getPointerTy();
710
711    uint64_t x[2];
712    uint32_t IntBitWidth = IntVT.getSizeInBits();
713    if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
714                             APFloat::rmTowardZero) != APFloat::opOK)
715      return 0;
716    APInt IntVal(IntBitWidth, 2, x);
717
718    unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
719                                     ISD::Constant, IntVal.getZExtValue());
720    if (IntegerReg == 0)
721      return 0;
722    MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
723                             ISD::SINT_TO_FP, IntegerReg);
724    if (MaterialReg == 0)
725      return 0;
726  }
727  return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
728}
729
730unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
731  return MRI.createVirtualRegister(RC);
732}
733
734unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
735                                 const TargetRegisterClass* RC) {
736  unsigned ResultReg = createResultReg(RC);
737  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
738
739  BuildMI(MBB, II, ResultReg);
740  return ResultReg;
741}
742
743unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
744                                  const TargetRegisterClass *RC,
745                                  unsigned Op0) {
746  unsigned ResultReg = createResultReg(RC);
747  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
748
749  if (II.getNumDefs() >= 1)
750    BuildMI(MBB, II, ResultReg).addReg(Op0);
751  else {
752    BuildMI(MBB, II).addReg(Op0);
753    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
754                                         II.ImplicitDefs[0], RC, RC);
755    if (!InsertedCopy)
756      ResultReg = 0;
757  }
758
759  return ResultReg;
760}
761
762unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
763                                   const TargetRegisterClass *RC,
764                                   unsigned Op0, unsigned Op1) {
765  unsigned ResultReg = createResultReg(RC);
766  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
767
768  if (II.getNumDefs() >= 1)
769    BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1);
770  else {
771    BuildMI(MBB, II).addReg(Op0).addReg(Op1);
772    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
773                                         II.ImplicitDefs[0], RC, RC);
774    if (!InsertedCopy)
775      ResultReg = 0;
776  }
777  return ResultReg;
778}
779
780unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
781                                   const TargetRegisterClass *RC,
782                                   unsigned Op0, uint64_t Imm) {
783  unsigned ResultReg = createResultReg(RC);
784  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
785
786  if (II.getNumDefs() >= 1)
787    BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm);
788  else {
789    BuildMI(MBB, II).addReg(Op0).addImm(Imm);
790    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
791                                         II.ImplicitDefs[0], RC, RC);
792    if (!InsertedCopy)
793      ResultReg = 0;
794  }
795  return ResultReg;
796}
797
798unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
799                                   const TargetRegisterClass *RC,
800                                   unsigned Op0, ConstantFP *FPImm) {
801  unsigned ResultReg = createResultReg(RC);
802  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
803
804  if (II.getNumDefs() >= 1)
805    BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm);
806  else {
807    BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm);
808    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
809                                         II.ImplicitDefs[0], RC, RC);
810    if (!InsertedCopy)
811      ResultReg = 0;
812  }
813  return ResultReg;
814}
815
816unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
817                                    const TargetRegisterClass *RC,
818                                    unsigned Op0, unsigned Op1, uint64_t Imm) {
819  unsigned ResultReg = createResultReg(RC);
820  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
821
822  if (II.getNumDefs() >= 1)
823    BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
824  else {
825    BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm);
826    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
827                                         II.ImplicitDefs[0], RC, RC);
828    if (!InsertedCopy)
829      ResultReg = 0;
830  }
831  return ResultReg;
832}
833
834unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
835                                  const TargetRegisterClass *RC,
836                                  uint64_t Imm) {
837  unsigned ResultReg = createResultReg(RC);
838  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
839
840  if (II.getNumDefs() >= 1)
841    BuildMI(MBB, II, ResultReg).addImm(Imm);
842  else {
843    BuildMI(MBB, II).addImm(Imm);
844    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
845                                         II.ImplicitDefs[0], RC, RC);
846    if (!InsertedCopy)
847      ResultReg = 0;
848  }
849  return ResultReg;
850}
851
852unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) {
853  const TargetRegisterClass* RC = MRI.getRegClass(Op0);
854  const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1);
855
856  unsigned ResultReg = createResultReg(SRC);
857  const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
858
859  if (II.getNumDefs() >= 1)
860    BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx);
861  else {
862    BuildMI(MBB, II).addReg(Op0).addImm(Idx);
863    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
864                                         II.ImplicitDefs[0], RC, RC);
865    if (!InsertedCopy)
866      ResultReg = 0;
867  }
868  return ResultReg;
869}
870