FastISel.cpp revision 29e8a67092e97687732a760db9a45094e5a63a7c
1//===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the FastISel class.
11//
12// "Fast" instruction selection is designed to emit very poor code quickly.
13// Also, it is not designed to be able to do much lowering, so most illegal
14// types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
15// also not intended to be able to do much optimization, except in a few cases
16// where doing optimizations reduces overall compile time.  For example, folding
17// constants into immediate fields is often done, because it's cheap and it
18// reduces the number of instructions later phases have to examine.
19//
20// "Fast" instruction selection is able to fail gracefully and transfer
21// control to the SelectionDAG selector for operations that it doesn't
22// support.  In many cases, this allows us to avoid duplicating a lot of
23// the complicated lowering logic that SelectionDAG currently has.
24//
25// The intended use for "fast" instruction selection is "-O0" mode
26// compilation, where the quality of the generated code is irrelevant when
27// weighed against the speed at which the code can be generated.  Also,
28// at -O0, the LLVM optimizers are not running, and this makes the
29// compile time of codegen a much higher portion of the overall compile
30// time.  Despite its limitations, "fast" instruction selection is able to
31// handle enough code on its own to provide noticeable overall speedups
32// in -O0 compiles.
33//
34// Basic operations are supported in a target-independent way, by reading
35// the same instruction descriptions that the SelectionDAG selector reads,
36// and identifying simple arithmetic operations that can be directly selected
37// from simple operators.  More complicated operations currently require
38// target-specific code.
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Function.h"
43#include "llvm/GlobalVariable.h"
44#include "llvm/Instructions.h"
45#include "llvm/IntrinsicInst.h"
46#include "llvm/CodeGen/FastISel.h"
47#include "llvm/CodeGen/FunctionLoweringInfo.h"
48#include "llvm/CodeGen/MachineInstrBuilder.h"
49#include "llvm/CodeGen/MachineModuleInfo.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
51#include "llvm/Analysis/DebugInfo.h"
52#include "llvm/Analysis/Loads.h"
53#include "llvm/Target/TargetData.h"
54#include "llvm/Target/TargetInstrInfo.h"
55#include "llvm/Target/TargetLowering.h"
56#include "llvm/Target/TargetMachine.h"
57#include "llvm/Support/ErrorHandling.h"
58using namespace llvm;
59
60bool FastISel::hasTrivialKill(const Value *V) const {
61  // Don't consider constants or arguments to have trivial kills.
62  const Instruction *I = dyn_cast<Instruction>(V);
63  if (!I)
64    return false;
65
66  // No-op casts are trivially coalesced by fast-isel.
67  if (const CastInst *Cast = dyn_cast<CastInst>(I))
68    if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
69        !hasTrivialKill(Cast->getOperand(0)))
70      return false;
71
72  // Only instructions with a single use in the same basic block are considered
73  // to have trivial kills.
74  return I->hasOneUse() &&
75         !(I->getOpcode() == Instruction::BitCast ||
76           I->getOpcode() == Instruction::PtrToInt ||
77           I->getOpcode() == Instruction::IntToPtr) &&
78         cast<Instruction>(I->use_begin())->getParent() == I->getParent();
79}
80
81unsigned FastISel::getRegForValue(const Value *V) {
82  EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
83  // Don't handle non-simple values in FastISel.
84  if (!RealVT.isSimple())
85    return 0;
86
87  // Ignore illegal types. We must do this before looking up the value
88  // in ValueMap because Arguments are given virtual registers regardless
89  // of whether FastISel can handle them.
90  MVT VT = RealVT.getSimpleVT();
91  if (!TLI.isTypeLegal(VT)) {
92    // Promote MVT::i1 to a legal type though, because it's common and easy.
93    if (VT == MVT::i1)
94      VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
95    else
96      return 0;
97  }
98
99  // Look up the value to see if we already have a register for it. We
100  // cache values defined by Instructions across blocks, and other values
101  // only locally. This is because Instructions already have the SSA
102  // def-dominates-use requirement enforced.
103  DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
104  if (I != FuncInfo.ValueMap.end())
105    return I->second;
106  unsigned Reg = LocalValueMap[V];
107  if (Reg != 0)
108    return Reg;
109
110  // In bottom-up mode, just create the virtual register which will be used
111  // to hold the value. It will be materialized later.
112  if (IsBottomUp) {
113    Reg = createResultReg(TLI.getRegClassFor(VT));
114    if (isa<Instruction>(V))
115      FuncInfo.ValueMap[V] = Reg;
116    else
117      LocalValueMap[V] = Reg;
118    return Reg;
119  }
120
121  return materializeRegForValue(V, VT);
122}
123
124/// materializeRegForValue - Helper for getRegForVale. This function is
125/// called when the value isn't already available in a register and must
126/// be materialized with new instructions.
127unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
128  unsigned Reg = 0;
129
130  if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
131    if (CI->getValue().getActiveBits() <= 64)
132      Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
133  } else if (isa<AllocaInst>(V)) {
134    Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
135  } else if (isa<ConstantPointerNull>(V)) {
136    // Translate this as an integer zero so that it can be
137    // local-CSE'd with actual integer zeros.
138    Reg =
139      getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
140  } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
141    // Try to emit the constant directly.
142    Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
143
144    if (!Reg) {
145      // Try to emit the constant by using an integer constant with a cast.
146      const APFloat &Flt = CF->getValueAPF();
147      EVT IntVT = TLI.getPointerTy();
148
149      uint64_t x[2];
150      uint32_t IntBitWidth = IntVT.getSizeInBits();
151      bool isExact;
152      (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
153                                APFloat::rmTowardZero, &isExact);
154      if (isExact) {
155        APInt IntVal(IntBitWidth, 2, x);
156
157        unsigned IntegerReg =
158          getRegForValue(ConstantInt::get(V->getContext(), IntVal));
159        if (IntegerReg != 0)
160          Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
161                           IntegerReg, /*Kill=*/false);
162      }
163    }
164  } else if (const Operator *Op = dyn_cast<Operator>(V)) {
165    if (!SelectOperator(Op, Op->getOpcode()))
166      if (!isa<Instruction>(Op) ||
167          !TargetSelectInstruction(cast<Instruction>(Op)))
168        return 0;
169    Reg = lookUpRegForValue(Op);
170  } else if (isa<UndefValue>(V)) {
171    Reg = createResultReg(TLI.getRegClassFor(VT));
172    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
173            TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
174  }
175
176  // If target-independent code couldn't handle the value, give target-specific
177  // code a try.
178  if (!Reg && isa<Constant>(V))
179    Reg = TargetMaterializeConstant(cast<Constant>(V));
180
181  // Don't cache constant materializations in the general ValueMap.
182  // To do so would require tracking what uses they dominate.
183  if (Reg != 0)
184    LocalValueMap[V] = Reg;
185  return Reg;
186}
187
188unsigned FastISel::lookUpRegForValue(const Value *V) {
189  // Look up the value to see if we already have a register for it. We
190  // cache values defined by Instructions across blocks, and other values
191  // only locally. This is because Instructions already have the SSA
192  // def-dominates-use requirement enforced.
193  DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
194  if (I != FuncInfo.ValueMap.end())
195    return I->second;
196  return LocalValueMap[V];
197}
198
199/// UpdateValueMap - Update the value map to include the new mapping for this
200/// instruction, or insert an extra copy to get the result in a previous
201/// determined register.
202/// NOTE: This is only necessary because we might select a block that uses
203/// a value before we select the block that defines the value.  It might be
204/// possible to fix this by selecting blocks in reverse postorder.
205unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
206  if (!isa<Instruction>(I)) {
207    LocalValueMap[I] = Reg;
208    return Reg;
209  }
210
211  unsigned &AssignedReg = FuncInfo.ValueMap[I];
212  if (AssignedReg == 0)
213    AssignedReg = Reg;
214  else if (Reg != AssignedReg) {
215    const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
216    TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, AssignedReg,
217                     Reg, RegClass, RegClass, DL);
218  }
219  return AssignedReg;
220}
221
222std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
223  unsigned IdxN = getRegForValue(Idx);
224  if (IdxN == 0)
225    // Unhandled operand. Halt "fast" selection and bail.
226    return std::pair<unsigned, bool>(0, false);
227
228  bool IdxNIsKill = hasTrivialKill(Idx);
229
230  // If the index is smaller or larger than intptr_t, truncate or extend it.
231  MVT PtrVT = TLI.getPointerTy();
232  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
233  if (IdxVT.bitsLT(PtrVT)) {
234    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
235                      IdxN, IdxNIsKill);
236    IdxNIsKill = true;
237  }
238  else if (IdxVT.bitsGT(PtrVT)) {
239    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
240                      IdxN, IdxNIsKill);
241    IdxNIsKill = true;
242  }
243  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
244}
245
246/// SelectBinaryOp - Select and emit code for a binary operator instruction,
247/// which has an opcode which directly corresponds to the given ISD opcode.
248///
249bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
250  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
251  if (VT == MVT::Other || !VT.isSimple())
252    // Unhandled type. Halt "fast" selection and bail.
253    return false;
254
255  // We only handle legal types. For example, on x86-32 the instruction
256  // selector contains all of the 64-bit instructions from x86-64,
257  // under the assumption that i64 won't be used if the target doesn't
258  // support it.
259  if (!TLI.isTypeLegal(VT)) {
260    // MVT::i1 is special. Allow AND, OR, or XOR because they
261    // don't require additional zeroing, which makes them easy.
262    if (VT == MVT::i1 &&
263        (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
264         ISDOpcode == ISD::XOR))
265      VT = TLI.getTypeToTransformTo(I->getContext(), VT);
266    else
267      return false;
268  }
269
270  unsigned Op0 = getRegForValue(I->getOperand(0));
271  if (Op0 == 0)
272    // Unhandled operand. Halt "fast" selection and bail.
273    return false;
274
275  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
276
277  // Check if the second operand is a constant and handle it appropriately.
278  if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
279    unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
280                                     ISDOpcode, Op0, Op0IsKill,
281                                     CI->getZExtValue());
282    if (ResultReg != 0) {
283      // We successfully emitted code for the given LLVM Instruction.
284      UpdateValueMap(I, ResultReg);
285      return true;
286    }
287  }
288
289  // Check if the second operand is a constant float.
290  if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
291    unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
292                                     ISDOpcode, Op0, Op0IsKill, CF);
293    if (ResultReg != 0) {
294      // We successfully emitted code for the given LLVM Instruction.
295      UpdateValueMap(I, ResultReg);
296      return true;
297    }
298  }
299
300  unsigned Op1 = getRegForValue(I->getOperand(1));
301  if (Op1 == 0)
302    // Unhandled operand. Halt "fast" selection and bail.
303    return false;
304
305  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
306
307  // Now we have both operands in registers. Emit the instruction.
308  unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
309                                   ISDOpcode,
310                                   Op0, Op0IsKill,
311                                   Op1, Op1IsKill);
312  if (ResultReg == 0)
313    // Target-specific code wasn't able to find a machine opcode for
314    // the given ISD opcode and type. Halt "fast" selection and bail.
315    return false;
316
317  // We successfully emitted code for the given LLVM Instruction.
318  UpdateValueMap(I, ResultReg);
319  return true;
320}
321
322bool FastISel::SelectGetElementPtr(const User *I) {
323  unsigned N = getRegForValue(I->getOperand(0));
324  if (N == 0)
325    // Unhandled operand. Halt "fast" selection and bail.
326    return false;
327
328  bool NIsKill = hasTrivialKill(I->getOperand(0));
329
330  const Type *Ty = I->getOperand(0)->getType();
331  MVT VT = TLI.getPointerTy();
332  for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
333       E = I->op_end(); OI != E; ++OI) {
334    const Value *Idx = *OI;
335    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
336      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
337      if (Field) {
338        // N = N + Offset
339        uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
340        // FIXME: This can be optimized by combining the add with a
341        // subsequent one.
342        N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
343        if (N == 0)
344          // Unhandled operand. Halt "fast" selection and bail.
345          return false;
346        NIsKill = true;
347      }
348      Ty = StTy->getElementType(Field);
349    } else {
350      Ty = cast<SequentialType>(Ty)->getElementType();
351
352      // If this is a constant subscript, handle it quickly.
353      if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
354        if (CI->isZero()) continue;
355        uint64_t Offs =
356          TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
357        N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
358        if (N == 0)
359          // Unhandled operand. Halt "fast" selection and bail.
360          return false;
361        NIsKill = true;
362        continue;
363      }
364
365      // N = N + Idx * ElementSize;
366      uint64_t ElementSize = TD.getTypeAllocSize(Ty);
367      std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
368      unsigned IdxN = Pair.first;
369      bool IdxNIsKill = Pair.second;
370      if (IdxN == 0)
371        // Unhandled operand. Halt "fast" selection and bail.
372        return false;
373
374      if (ElementSize != 1) {
375        IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
376        if (IdxN == 0)
377          // Unhandled operand. Halt "fast" selection and bail.
378          return false;
379        IdxNIsKill = true;
380      }
381      N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
382      if (N == 0)
383        // Unhandled operand. Halt "fast" selection and bail.
384        return false;
385    }
386  }
387
388  // We successfully emitted code for the given LLVM Instruction.
389  UpdateValueMap(I, N);
390  return true;
391}
392
393bool FastISel::SelectCall(const User *I) {
394  const Function *F = cast<CallInst>(I)->getCalledFunction();
395  if (!F) return false;
396
397  // Handle selected intrinsic function calls.
398  unsigned IID = F->getIntrinsicID();
399  switch (IID) {
400  default: break;
401  case Intrinsic::dbg_declare: {
402    const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
403    if (!DIVariable(DI->getVariable()).Verify() ||
404        !FuncInfo.MF->getMMI().hasDebugInfo())
405      return true;
406
407    const Value *Address = DI->getAddress();
408    if (!Address)
409      return true;
410    if (isa<UndefValue>(Address))
411      return true;
412    const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
413    // Don't handle byval struct arguments or VLAs, for example.
414    // Note that if we have a byval struct argument, fast ISel is turned off;
415    // those are handled in SelectionDAGBuilder.
416    if (AI) {
417      DenseMap<const AllocaInst*, int>::iterator SI =
418        FuncInfo.StaticAllocaMap.find(AI);
419      if (SI == FuncInfo.StaticAllocaMap.end()) break; // VLAs.
420      int FI = SI->second;
421      if (!DI->getDebugLoc().isUnknown())
422        FuncInfo.MF->getMMI().setVariableDbgInfo(DI->getVariable(),
423                                                 FI, DI->getDebugLoc());
424    } else
425      // Building the map above is target independent.  Generating DBG_VALUE
426      // inline is target dependent; do this now.
427      (void)TargetSelectInstruction(cast<Instruction>(I));
428    return true;
429  }
430  case Intrinsic::dbg_value: {
431    // This form of DBG_VALUE is target-independent.
432    const DbgValueInst *DI = cast<DbgValueInst>(I);
433    const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
434    const Value *V = DI->getValue();
435    if (!V) {
436      // Currently the optimizer can produce this; insert an undef to
437      // help debugging.  Probably the optimizer should not do this.
438      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
439        .addReg(0U).addImm(DI->getOffset())
440        .addMetadata(DI->getVariable());
441    } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
442      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
443        .addImm(CI->getZExtValue()).addImm(DI->getOffset())
444        .addMetadata(DI->getVariable());
445    } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
446      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
447        .addFPImm(CF).addImm(DI->getOffset())
448        .addMetadata(DI->getVariable());
449    } else if (unsigned Reg = lookUpRegForValue(V)) {
450      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
451        .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
452        .addMetadata(DI->getVariable());
453    } else {
454      // We can't yet handle anything else here because it would require
455      // generating code, thus altering codegen because of debug info.
456      // Insert an undef so we can see what we dropped.
457      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
458        .addReg(0U).addImm(DI->getOffset())
459        .addMetadata(DI->getVariable());
460    }
461    return true;
462  }
463  case Intrinsic::eh_exception: {
464    EVT VT = TLI.getValueType(I->getType());
465    switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
466    default: break;
467    case TargetLowering::Expand: {
468      assert(FuncInfo.MBB->isLandingPad() &&
469             "Call to eh.exception not in landing pad!");
470      unsigned Reg = TLI.getExceptionAddressRegister();
471      const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
472      unsigned ResultReg = createResultReg(RC);
473      bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
474                                           ResultReg, Reg, RC, RC, DL);
475      assert(InsertedCopy && "Can't copy address registers!");
476      InsertedCopy = InsertedCopy;
477      UpdateValueMap(I, ResultReg);
478      return true;
479    }
480    }
481    break;
482  }
483  case Intrinsic::eh_selector: {
484    EVT VT = TLI.getValueType(I->getType());
485    switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
486    default: break;
487    case TargetLowering::Expand: {
488      if (FuncInfo.MBB->isLandingPad())
489        AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
490      else {
491#ifndef NDEBUG
492        FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
493#endif
494        // FIXME: Mark exception selector register as live in.  Hack for PR1508.
495        unsigned Reg = TLI.getExceptionSelectorRegister();
496        if (Reg) FuncInfo.MBB->addLiveIn(Reg);
497      }
498
499      unsigned Reg = TLI.getExceptionSelectorRegister();
500      EVT SrcVT = TLI.getPointerTy();
501      const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
502      unsigned ResultReg = createResultReg(RC);
503      bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
504                                           ResultReg, Reg, RC, RC, DL);
505      assert(InsertedCopy && "Can't copy address registers!");
506      InsertedCopy = InsertedCopy;
507
508      bool ResultRegIsKill = hasTrivialKill(I);
509
510      // Cast the register to the type of the selector.
511      if (SrcVT.bitsGT(MVT::i32))
512        ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
513                               ResultReg, ResultRegIsKill);
514      else if (SrcVT.bitsLT(MVT::i32))
515        ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
516                               ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
517      if (ResultReg == 0)
518        // Unhandled operand. Halt "fast" selection and bail.
519        return false;
520
521      UpdateValueMap(I, ResultReg);
522
523      return true;
524    }
525    }
526    break;
527  }
528  }
529
530  // An arbitrary call. Bail.
531  return false;
532}
533
534bool FastISel::SelectCast(const User *I, unsigned Opcode) {
535  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
536  EVT DstVT = TLI.getValueType(I->getType());
537
538  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
539      DstVT == MVT::Other || !DstVT.isSimple())
540    // Unhandled type. Halt "fast" selection and bail.
541    return false;
542
543  // Check if the destination type is legal. Or as a special case,
544  // it may be i1 if we're doing a truncate because that's
545  // easy and somewhat common.
546  if (!TLI.isTypeLegal(DstVT))
547    if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
548      // Unhandled type. Halt "fast" selection and bail.
549      return false;
550
551  // Check if the source operand is legal. Or as a special case,
552  // it may be i1 if we're doing zero-extension because that's
553  // easy and somewhat common.
554  if (!TLI.isTypeLegal(SrcVT))
555    if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
556      // Unhandled type. Halt "fast" selection and bail.
557      return false;
558
559  unsigned InputReg = getRegForValue(I->getOperand(0));
560  if (!InputReg)
561    // Unhandled operand.  Halt "fast" selection and bail.
562    return false;
563
564  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
565
566  // If the operand is i1, arrange for the high bits in the register to be zero.
567  if (SrcVT == MVT::i1) {
568   SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
569   InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
570   if (!InputReg)
571     return false;
572   InputRegIsKill = true;
573  }
574  // If the result is i1, truncate to the target's type for i1 first.
575  if (DstVT == MVT::i1)
576    DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
577
578  unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
579                                  DstVT.getSimpleVT(),
580                                  Opcode,
581                                  InputReg, InputRegIsKill);
582  if (!ResultReg)
583    return false;
584
585  UpdateValueMap(I, ResultReg);
586  return true;
587}
588
589bool FastISel::SelectBitCast(const User *I) {
590  // If the bitcast doesn't change the type, just use the operand value.
591  if (I->getType() == I->getOperand(0)->getType()) {
592    unsigned Reg = getRegForValue(I->getOperand(0));
593    if (Reg == 0)
594      return false;
595    UpdateValueMap(I, Reg);
596    return true;
597  }
598
599  // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
600  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
601  EVT DstVT = TLI.getValueType(I->getType());
602
603  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
604      DstVT == MVT::Other || !DstVT.isSimple() ||
605      !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
606    // Unhandled type. Halt "fast" selection and bail.
607    return false;
608
609  unsigned Op0 = getRegForValue(I->getOperand(0));
610  if (Op0 == 0)
611    // Unhandled operand. Halt "fast" selection and bail.
612    return false;
613
614  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
615
616  // First, try to perform the bitcast by inserting a reg-reg copy.
617  unsigned ResultReg = 0;
618  if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
619    TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
620    TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
621    ResultReg = createResultReg(DstClass);
622
623    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
624                                         ResultReg, Op0,
625                                         DstClass, SrcClass, DL);
626    if (!InsertedCopy)
627      ResultReg = 0;
628  }
629
630  // If the reg-reg copy failed, select a BIT_CONVERT opcode.
631  if (!ResultReg)
632    ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
633                           ISD::BIT_CONVERT, Op0, Op0IsKill);
634
635  if (!ResultReg)
636    return false;
637
638  UpdateValueMap(I, ResultReg);
639  return true;
640}
641
642bool
643FastISel::SelectInstruction(const Instruction *I) {
644  // Just before the terminator instruction, insert instructions to
645  // feed PHI nodes in successor blocks.
646  if (isa<TerminatorInst>(I))
647    if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
648      return false;
649
650  DL = I->getDebugLoc();
651
652  // First, try doing target-independent selection.
653  if (SelectOperator(I, I->getOpcode())) {
654    DL = DebugLoc();
655    return true;
656  }
657
658  // Next, try calling the target to attempt to handle the instruction.
659  if (TargetSelectInstruction(I)) {
660    DL = DebugLoc();
661    return true;
662  }
663
664  DL = DebugLoc();
665  return false;
666}
667
668/// FastEmitBranch - Emit an unconditional branch to the given block,
669/// unless it is the immediate (fall-through) successor, and update
670/// the CFG.
671void
672FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
673  if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
674    // The unconditional fall-through case, which needs no instructions.
675  } else {
676    // The unconditional branch case.
677    TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
678                     SmallVector<MachineOperand, 0>(), DL);
679  }
680  FuncInfo.MBB->addSuccessor(MSucc);
681}
682
683/// SelectFNeg - Emit an FNeg operation.
684///
685bool
686FastISel::SelectFNeg(const User *I) {
687  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
688  if (OpReg == 0) return false;
689
690  bool OpRegIsKill = hasTrivialKill(I);
691
692  // If the target has ISD::FNEG, use it.
693  EVT VT = TLI.getValueType(I->getType());
694  unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
695                                  ISD::FNEG, OpReg, OpRegIsKill);
696  if (ResultReg != 0) {
697    UpdateValueMap(I, ResultReg);
698    return true;
699  }
700
701  // Bitcast the value to integer, twiddle the sign bit with xor,
702  // and then bitcast it back to floating-point.
703  if (VT.getSizeInBits() > 64) return false;
704  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
705  if (!TLI.isTypeLegal(IntVT))
706    return false;
707
708  unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
709                               ISD::BIT_CONVERT, OpReg, OpRegIsKill);
710  if (IntReg == 0)
711    return false;
712
713  unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
714                                       IntReg, /*Kill=*/true,
715                                       UINT64_C(1) << (VT.getSizeInBits()-1),
716                                       IntVT.getSimpleVT());
717  if (IntResultReg == 0)
718    return false;
719
720  ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
721                         ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true);
722  if (ResultReg == 0)
723    return false;
724
725  UpdateValueMap(I, ResultReg);
726  return true;
727}
728
729bool
730FastISel::SelectLoad(const User *I) {
731  LoadInst *LI = const_cast<LoadInst *>(cast<LoadInst>(I));
732
733  // For a load from an alloca, make a limited effort to find the value
734  // already available in a register, avoiding redundant loads.
735  if (!LI->isVolatile() && isa<AllocaInst>(LI->getPointerOperand())) {
736    BasicBlock::iterator ScanFrom = LI;
737    if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(),
738                                                  LI->getParent(), ScanFrom)) {
739      unsigned ResultReg = getRegForValue(V);
740      if (ResultReg != 0) {
741        UpdateValueMap(I, ResultReg);
742        return true;
743      }
744    }
745  }
746
747  return false;
748}
749
750bool
751FastISel::SelectOperator(const User *I, unsigned Opcode) {
752  switch (Opcode) {
753  case Instruction::Load:
754    return SelectLoad(I);
755  case Instruction::Add:
756    return SelectBinaryOp(I, ISD::ADD);
757  case Instruction::FAdd:
758    return SelectBinaryOp(I, ISD::FADD);
759  case Instruction::Sub:
760    return SelectBinaryOp(I, ISD::SUB);
761  case Instruction::FSub:
762    // FNeg is currently represented in LLVM IR as a special case of FSub.
763    if (BinaryOperator::isFNeg(I))
764      return SelectFNeg(I);
765    return SelectBinaryOp(I, ISD::FSUB);
766  case Instruction::Mul:
767    return SelectBinaryOp(I, ISD::MUL);
768  case Instruction::FMul:
769    return SelectBinaryOp(I, ISD::FMUL);
770  case Instruction::SDiv:
771    return SelectBinaryOp(I, ISD::SDIV);
772  case Instruction::UDiv:
773    return SelectBinaryOp(I, ISD::UDIV);
774  case Instruction::FDiv:
775    return SelectBinaryOp(I, ISD::FDIV);
776  case Instruction::SRem:
777    return SelectBinaryOp(I, ISD::SREM);
778  case Instruction::URem:
779    return SelectBinaryOp(I, ISD::UREM);
780  case Instruction::FRem:
781    return SelectBinaryOp(I, ISD::FREM);
782  case Instruction::Shl:
783    return SelectBinaryOp(I, ISD::SHL);
784  case Instruction::LShr:
785    return SelectBinaryOp(I, ISD::SRL);
786  case Instruction::AShr:
787    return SelectBinaryOp(I, ISD::SRA);
788  case Instruction::And:
789    return SelectBinaryOp(I, ISD::AND);
790  case Instruction::Or:
791    return SelectBinaryOp(I, ISD::OR);
792  case Instruction::Xor:
793    return SelectBinaryOp(I, ISD::XOR);
794
795  case Instruction::GetElementPtr:
796    return SelectGetElementPtr(I);
797
798  case Instruction::Br: {
799    const BranchInst *BI = cast<BranchInst>(I);
800
801    if (BI->isUnconditional()) {
802      const BasicBlock *LLVMSucc = BI->getSuccessor(0);
803      MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
804      FastEmitBranch(MSucc, BI->getDebugLoc());
805      return true;
806    }
807
808    // Conditional branches are not handed yet.
809    // Halt "fast" selection and bail.
810    return false;
811  }
812
813  case Instruction::Unreachable:
814    // Nothing to emit.
815    return true;
816
817  case Instruction::Alloca:
818    // FunctionLowering has the static-sized case covered.
819    if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
820      return true;
821
822    // Dynamic-sized alloca is not handled yet.
823    return false;
824
825  case Instruction::Call:
826    return SelectCall(I);
827
828  case Instruction::BitCast:
829    return SelectBitCast(I);
830
831  case Instruction::FPToSI:
832    return SelectCast(I, ISD::FP_TO_SINT);
833  case Instruction::ZExt:
834    return SelectCast(I, ISD::ZERO_EXTEND);
835  case Instruction::SExt:
836    return SelectCast(I, ISD::SIGN_EXTEND);
837  case Instruction::Trunc:
838    return SelectCast(I, ISD::TRUNCATE);
839  case Instruction::SIToFP:
840    return SelectCast(I, ISD::SINT_TO_FP);
841
842  case Instruction::IntToPtr: // Deliberate fall-through.
843  case Instruction::PtrToInt: {
844    EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
845    EVT DstVT = TLI.getValueType(I->getType());
846    if (DstVT.bitsGT(SrcVT))
847      return SelectCast(I, ISD::ZERO_EXTEND);
848    if (DstVT.bitsLT(SrcVT))
849      return SelectCast(I, ISD::TRUNCATE);
850    unsigned Reg = getRegForValue(I->getOperand(0));
851    if (Reg == 0) return false;
852    UpdateValueMap(I, Reg);
853    return true;
854  }
855
856  case Instruction::PHI:
857    llvm_unreachable("FastISel shouldn't visit PHI nodes!");
858
859  default:
860    // Unhandled instruction. Halt "fast" selection and bail.
861    return false;
862  }
863}
864
865FastISel::FastISel(FunctionLoweringInfo &funcInfo)
866  : FuncInfo(funcInfo),
867    MRI(FuncInfo.MF->getRegInfo()),
868    MFI(*FuncInfo.MF->getFrameInfo()),
869    MCP(*FuncInfo.MF->getConstantPool()),
870    TM(FuncInfo.MF->getTarget()),
871    TD(*TM.getTargetData()),
872    TII(*TM.getInstrInfo()),
873    TLI(*TM.getTargetLowering()),
874    TRI(*TM.getRegisterInfo()),
875    IsBottomUp(false) {
876}
877
878FastISel::~FastISel() {}
879
880unsigned FastISel::FastEmit_(MVT, MVT,
881                             unsigned) {
882  return 0;
883}
884
885unsigned FastISel::FastEmit_r(MVT, MVT,
886                              unsigned,
887                              unsigned /*Op0*/, bool /*Op0IsKill*/) {
888  return 0;
889}
890
891unsigned FastISel::FastEmit_rr(MVT, MVT,
892                               unsigned,
893                               unsigned /*Op0*/, bool /*Op0IsKill*/,
894                               unsigned /*Op1*/, bool /*Op1IsKill*/) {
895  return 0;
896}
897
898unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
899  return 0;
900}
901
902unsigned FastISel::FastEmit_f(MVT, MVT,
903                              unsigned, const ConstantFP * /*FPImm*/) {
904  return 0;
905}
906
907unsigned FastISel::FastEmit_ri(MVT, MVT,
908                               unsigned,
909                               unsigned /*Op0*/, bool /*Op0IsKill*/,
910                               uint64_t /*Imm*/) {
911  return 0;
912}
913
914unsigned FastISel::FastEmit_rf(MVT, MVT,
915                               unsigned,
916                               unsigned /*Op0*/, bool /*Op0IsKill*/,
917                               const ConstantFP * /*FPImm*/) {
918  return 0;
919}
920
921unsigned FastISel::FastEmit_rri(MVT, MVT,
922                                unsigned,
923                                unsigned /*Op0*/, bool /*Op0IsKill*/,
924                                unsigned /*Op1*/, bool /*Op1IsKill*/,
925                                uint64_t /*Imm*/) {
926  return 0;
927}
928
929/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
930/// to emit an instruction with an immediate operand using FastEmit_ri.
931/// If that fails, it materializes the immediate into a register and try
932/// FastEmit_rr instead.
933unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
934                                unsigned Op0, bool Op0IsKill,
935                                uint64_t Imm, MVT ImmType) {
936  // First check if immediate type is legal. If not, we can't use the ri form.
937  unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
938  if (ResultReg != 0)
939    return ResultReg;
940  unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
941  if (MaterialReg == 0)
942    return 0;
943  return FastEmit_rr(VT, VT, Opcode,
944                     Op0, Op0IsKill,
945                     MaterialReg, /*Kill=*/true);
946}
947
948/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
949/// to emit an instruction with a floating-point immediate operand using
950/// FastEmit_rf. If that fails, it materializes the immediate into a register
951/// and try FastEmit_rr instead.
952unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
953                                unsigned Op0, bool Op0IsKill,
954                                const ConstantFP *FPImm, MVT ImmType) {
955  // First check if immediate type is legal. If not, we can't use the rf form.
956  unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, Op0IsKill, FPImm);
957  if (ResultReg != 0)
958    return ResultReg;
959
960  // Materialize the constant in a register.
961  unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
962  if (MaterialReg == 0) {
963    // If the target doesn't have a way to directly enter a floating-point
964    // value into a register, use an alternate approach.
965    // TODO: The current approach only supports floating-point constants
966    // that can be constructed by conversion from integer values. This should
967    // be replaced by code that creates a load from a constant-pool entry,
968    // which will require some target-specific work.
969    const APFloat &Flt = FPImm->getValueAPF();
970    EVT IntVT = TLI.getPointerTy();
971
972    uint64_t x[2];
973    uint32_t IntBitWidth = IntVT.getSizeInBits();
974    bool isExact;
975    (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
976                             APFloat::rmTowardZero, &isExact);
977    if (!isExact)
978      return 0;
979    APInt IntVal(IntBitWidth, 2, x);
980
981    unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
982                                     ISD::Constant, IntVal.getZExtValue());
983    if (IntegerReg == 0)
984      return 0;
985    MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
986                             ISD::SINT_TO_FP, IntegerReg, /*Kill=*/true);
987    if (MaterialReg == 0)
988      return 0;
989  }
990  return FastEmit_rr(VT, VT, Opcode,
991                     Op0, Op0IsKill,
992                     MaterialReg, /*Kill=*/true);
993}
994
995unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
996  return MRI.createVirtualRegister(RC);
997}
998
999unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1000                                 const TargetRegisterClass* RC) {
1001  unsigned ResultReg = createResultReg(RC);
1002  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1003
1004  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1005  return ResultReg;
1006}
1007
1008unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1009                                  const TargetRegisterClass *RC,
1010                                  unsigned Op0, bool Op0IsKill) {
1011  unsigned ResultReg = createResultReg(RC);
1012  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1013
1014  if (II.getNumDefs() >= 1)
1015    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1016      .addReg(Op0, Op0IsKill * RegState::Kill);
1017  else {
1018    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1019      .addReg(Op0, Op0IsKill * RegState::Kill);
1020    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1021                                         ResultReg, II.ImplicitDefs[0],
1022                                         RC, RC, DL);
1023    if (!InsertedCopy)
1024      ResultReg = 0;
1025  }
1026
1027  return ResultReg;
1028}
1029
1030unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1031                                   const TargetRegisterClass *RC,
1032                                   unsigned Op0, bool Op0IsKill,
1033                                   unsigned Op1, bool Op1IsKill) {
1034  unsigned ResultReg = createResultReg(RC);
1035  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1036
1037  if (II.getNumDefs() >= 1)
1038    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1039      .addReg(Op0, Op0IsKill * RegState::Kill)
1040      .addReg(Op1, Op1IsKill * RegState::Kill);
1041  else {
1042    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1043      .addReg(Op0, Op0IsKill * RegState::Kill)
1044      .addReg(Op1, Op1IsKill * RegState::Kill);
1045    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1046                                         ResultReg, II.ImplicitDefs[0],
1047                                         RC, RC, DL);
1048    if (!InsertedCopy)
1049      ResultReg = 0;
1050  }
1051  return ResultReg;
1052}
1053
1054unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1055                                   const TargetRegisterClass *RC,
1056                                   unsigned Op0, bool Op0IsKill,
1057                                   uint64_t Imm) {
1058  unsigned ResultReg = createResultReg(RC);
1059  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1060
1061  if (II.getNumDefs() >= 1)
1062    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1063      .addReg(Op0, Op0IsKill * RegState::Kill)
1064      .addImm(Imm);
1065  else {
1066    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1067      .addReg(Op0, Op0IsKill * RegState::Kill)
1068      .addImm(Imm);
1069    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1070                                         ResultReg, II.ImplicitDefs[0],
1071                                         RC, RC, DL);
1072    if (!InsertedCopy)
1073      ResultReg = 0;
1074  }
1075  return ResultReg;
1076}
1077
1078unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1079                                   const TargetRegisterClass *RC,
1080                                   unsigned Op0, bool Op0IsKill,
1081                                   const ConstantFP *FPImm) {
1082  unsigned ResultReg = createResultReg(RC);
1083  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1084
1085  if (II.getNumDefs() >= 1)
1086    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1087      .addReg(Op0, Op0IsKill * RegState::Kill)
1088      .addFPImm(FPImm);
1089  else {
1090    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1091      .addReg(Op0, Op0IsKill * RegState::Kill)
1092      .addFPImm(FPImm);
1093    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1094                                         ResultReg, II.ImplicitDefs[0],
1095                                         RC, RC, DL);
1096    if (!InsertedCopy)
1097      ResultReg = 0;
1098  }
1099  return ResultReg;
1100}
1101
1102unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1103                                    const TargetRegisterClass *RC,
1104                                    unsigned Op0, bool Op0IsKill,
1105                                    unsigned Op1, bool Op1IsKill,
1106                                    uint64_t Imm) {
1107  unsigned ResultReg = createResultReg(RC);
1108  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1109
1110  if (II.getNumDefs() >= 1)
1111    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1112      .addReg(Op0, Op0IsKill * RegState::Kill)
1113      .addReg(Op1, Op1IsKill * RegState::Kill)
1114      .addImm(Imm);
1115  else {
1116    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1117      .addReg(Op0, Op0IsKill * RegState::Kill)
1118      .addReg(Op1, Op1IsKill * RegState::Kill)
1119      .addImm(Imm);
1120    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1121                                         ResultReg, II.ImplicitDefs[0],
1122                                         RC, RC, DL);
1123    if (!InsertedCopy)
1124      ResultReg = 0;
1125  }
1126  return ResultReg;
1127}
1128
1129unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1130                                  const TargetRegisterClass *RC,
1131                                  uint64_t Imm) {
1132  unsigned ResultReg = createResultReg(RC);
1133  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1134
1135  if (II.getNumDefs() >= 1)
1136    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1137  else {
1138    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1139    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1140                                         ResultReg, II.ImplicitDefs[0],
1141                                         RC, RC, DL);
1142    if (!InsertedCopy)
1143      ResultReg = 0;
1144  }
1145  return ResultReg;
1146}
1147
1148unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1149                                              unsigned Op0, bool Op0IsKill,
1150                                              uint32_t Idx) {
1151  const TargetRegisterClass* RC = MRI.getRegClass(Op0);
1152
1153  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1154  const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
1155
1156  if (II.getNumDefs() >= 1)
1157    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1158      .addReg(Op0, Op0IsKill * RegState::Kill)
1159      .addImm(Idx);
1160  else {
1161    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1162      .addReg(Op0, Op0IsKill * RegState::Kill)
1163      .addImm(Idx);
1164    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1165                                         ResultReg, II.ImplicitDefs[0],
1166                                         RC, RC, DL);
1167    if (!InsertedCopy)
1168      ResultReg = 0;
1169  }
1170  return ResultReg;
1171}
1172
1173/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1174/// with all but the least significant bit set to zero.
1175unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1176  return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1177}
1178
1179/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1180/// Emit code to ensure constants are copied into registers when needed.
1181/// Remember the virtual registers that need to be added to the Machine PHI
1182/// nodes as input.  We cannot just directly add them, because expansion
1183/// might result in multiple MBB's for one BB.  As such, the start of the
1184/// BB might correspond to a different MBB than the end.
1185bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1186  const TerminatorInst *TI = LLVMBB->getTerminator();
1187
1188  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1189  unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1190
1191  // Check successor nodes' PHI nodes that expect a constant to be available
1192  // from this block.
1193  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1194    const BasicBlock *SuccBB = TI->getSuccessor(succ);
1195    if (!isa<PHINode>(SuccBB->begin())) continue;
1196    MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1197
1198    // If this terminator has multiple identical successors (common for
1199    // switches), only handle each succ once.
1200    if (!SuccsHandled.insert(SuccMBB)) continue;
1201
1202    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1203
1204    // At this point we know that there is a 1-1 correspondence between LLVM PHI
1205    // nodes and Machine PHI nodes, but the incoming operands have not been
1206    // emitted yet.
1207    for (BasicBlock::const_iterator I = SuccBB->begin();
1208         const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1209
1210      // Ignore dead phi's.
1211      if (PN->use_empty()) continue;
1212
1213      // Only handle legal types. Two interesting things to note here. First,
1214      // by bailing out early, we may leave behind some dead instructions,
1215      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1216      // own moves. Second, this check is necessary becuase FastISel doesn't
1217      // use CreateRegs to create registers, so it always creates
1218      // exactly one register for each non-void instruction.
1219      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1220      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1221        // Promote MVT::i1.
1222        if (VT == MVT::i1)
1223          VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1224        else {
1225          FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1226          return false;
1227        }
1228      }
1229
1230      const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1231
1232      // Set the DebugLoc for the copy. Prefer the location of the operand
1233      // if there is one; use the location of the PHI otherwise.
1234      DL = PN->getDebugLoc();
1235      if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1236        DL = Inst->getDebugLoc();
1237
1238      unsigned Reg = getRegForValue(PHIOp);
1239      if (Reg == 0) {
1240        FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1241        return false;
1242      }
1243      FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
1244      DL = DebugLoc();
1245    }
1246  }
1247
1248  return true;
1249}
1250