FastISel.cpp revision 1dac4614d6666137fab13240cbd1988227389164
1//===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the FastISel class.
11//
12// "Fast" instruction selection is designed to emit very poor code quickly.
13// Also, it is not designed to be able to do much lowering, so most illegal
14// types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
15// also not intended to be able to do much optimization, except in a few cases
16// where doing optimizations reduces overall compile time.  For example, folding
17// constants into immediate fields is often done, because it's cheap and it
18// reduces the number of instructions later phases have to examine.
19//
20// "Fast" instruction selection is able to fail gracefully and transfer
21// control to the SelectionDAG selector for operations that it doesn't
22// support.  In many cases, this allows us to avoid duplicating a lot of
23// the complicated lowering logic that SelectionDAG currently has.
24//
25// The intended use for "fast" instruction selection is "-O0" mode
26// compilation, where the quality of the generated code is irrelevant when
27// weighed against the speed at which the code can be generated.  Also,
28// at -O0, the LLVM optimizers are not running, and this makes the
29// compile time of codegen a much higher portion of the overall compile
30// time.  Despite its limitations, "fast" instruction selection is able to
31// handle enough code on its own to provide noticeable overall speedups
32// in -O0 compiles.
33//
34// Basic operations are supported in a target-independent way, by reading
35// the same instruction descriptions that the SelectionDAG selector reads,
36// and identifying simple arithmetic operations that can be directly selected
37// from simple operators.  More complicated operations currently require
38// target-specific code.
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Function.h"
43#include "llvm/GlobalVariable.h"
44#include "llvm/Instructions.h"
45#include "llvm/IntrinsicInst.h"
46#include "llvm/CodeGen/FastISel.h"
47#include "llvm/CodeGen/FunctionLoweringInfo.h"
48#include "llvm/CodeGen/MachineInstrBuilder.h"
49#include "llvm/CodeGen/MachineModuleInfo.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
51#include "llvm/Analysis/DebugInfo.h"
52#include "llvm/Analysis/Loads.h"
53#include "llvm/Target/TargetData.h"
54#include "llvm/Target/TargetInstrInfo.h"
55#include "llvm/Target/TargetLowering.h"
56#include "llvm/Target/TargetMachine.h"
57#include "llvm/Support/ErrorHandling.h"
58using namespace llvm;
59
60/// startNewBlock - Set the current block to which generated machine
61/// instructions will be appended, and clear the local CSE map.
62///
63void FastISel::startNewBlock() {
64  LocalValueMap.clear();
65
66  // Start out as null, meaining no local-value instructions have
67  // been emitted.
68  LastLocalValue = 0;
69
70  // Advance the last local value past any EH_LABEL instructions.
71  MachineBasicBlock::iterator
72    I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
73  while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
74    LastLocalValue = I;
75    ++I;
76  }
77}
78
79bool FastISel::hasTrivialKill(const Value *V) const {
80  // Don't consider constants or arguments to have trivial kills.
81  const Instruction *I = dyn_cast<Instruction>(V);
82  if (!I)
83    return false;
84
85  // No-op casts are trivially coalesced by fast-isel.
86  if (const CastInst *Cast = dyn_cast<CastInst>(I))
87    if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
88        !hasTrivialKill(Cast->getOperand(0)))
89      return false;
90
91  // Only instructions with a single use in the same basic block are considered
92  // to have trivial kills.
93  return I->hasOneUse() &&
94         !(I->getOpcode() == Instruction::BitCast ||
95           I->getOpcode() == Instruction::PtrToInt ||
96           I->getOpcode() == Instruction::IntToPtr) &&
97         cast<Instruction>(I->use_begin())->getParent() == I->getParent();
98}
99
100unsigned FastISel::getRegForValue(const Value *V) {
101  EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
102  // Don't handle non-simple values in FastISel.
103  if (!RealVT.isSimple())
104    return 0;
105
106  // Ignore illegal types. We must do this before looking up the value
107  // in ValueMap because Arguments are given virtual registers regardless
108  // of whether FastISel can handle them.
109  MVT VT = RealVT.getSimpleVT();
110  if (!TLI.isTypeLegal(VT)) {
111    // Promote MVT::i1 to a legal type though, because it's common and easy.
112    if (VT == MVT::i1)
113      VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
114    else
115      return 0;
116  }
117
118  // Look up the value to see if we already have a register for it. We
119  // cache values defined by Instructions across blocks, and other values
120  // only locally. This is because Instructions already have the SSA
121  // def-dominates-use requirement enforced.
122  DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
123  if (I != FuncInfo.ValueMap.end()) {
124    unsigned Reg = I->second;
125    return Reg;
126  }
127  unsigned Reg = LocalValueMap[V];
128  if (Reg != 0)
129    return Reg;
130
131  // In bottom-up mode, just create the virtual register which will be used
132  // to hold the value. It will be materialized later.
133  if (isa<Instruction>(V) &&
134      (!isa<AllocaInst>(V) ||
135       !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
136    return FuncInfo.InitializeRegForValue(V);
137
138  MachineBasicBlock::iterator SaveInsertPt = enterLocalValueArea();
139
140  // Materialize the value in a register. Emit any instructions in the
141  // local value area.
142  Reg = materializeRegForValue(V, VT);
143
144  leaveLocalValueArea(SaveInsertPt);
145
146  return Reg;
147}
148
149/// materializeRegForValue - Helper for getRegForVale. This function is
150/// called when the value isn't already available in a register and must
151/// be materialized with new instructions.
152unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
153  unsigned Reg = 0;
154
155  if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
156    if (CI->getValue().getActiveBits() <= 64)
157      Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
158  } else if (isa<AllocaInst>(V)) {
159    Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
160  } else if (isa<ConstantPointerNull>(V)) {
161    // Translate this as an integer zero so that it can be
162    // local-CSE'd with actual integer zeros.
163    Reg =
164      getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
165  } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
166    // Try to emit the constant directly.
167    Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
168
169    if (!Reg) {
170      // Try to emit the constant by using an integer constant with a cast.
171      const APFloat &Flt = CF->getValueAPF();
172      EVT IntVT = TLI.getPointerTy();
173
174      uint64_t x[2];
175      uint32_t IntBitWidth = IntVT.getSizeInBits();
176      bool isExact;
177      (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
178                                APFloat::rmTowardZero, &isExact);
179      if (isExact) {
180        APInt IntVal(IntBitWidth, 2, x);
181
182        unsigned IntegerReg =
183          getRegForValue(ConstantInt::get(V->getContext(), IntVal));
184        if (IntegerReg != 0)
185          Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
186                           IntegerReg, /*Kill=*/false);
187      }
188    }
189  } else if (const Operator *Op = dyn_cast<Operator>(V)) {
190    if (!SelectOperator(Op, Op->getOpcode()))
191      if (!isa<Instruction>(Op) ||
192          !TargetSelectInstruction(cast<Instruction>(Op)))
193        return 0;
194    Reg = lookUpRegForValue(Op);
195  } else if (isa<UndefValue>(V)) {
196    Reg = createResultReg(TLI.getRegClassFor(VT));
197    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
198            TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
199  }
200
201  // If target-independent code couldn't handle the value, give target-specific
202  // code a try.
203  if (!Reg && isa<Constant>(V))
204    Reg = TargetMaterializeConstant(cast<Constant>(V));
205
206  // Don't cache constant materializations in the general ValueMap.
207  // To do so would require tracking what uses they dominate.
208  if (Reg != 0) {
209    LocalValueMap[V] = Reg;
210    LastLocalValue = MRI.getVRegDef(Reg);
211  }
212  return Reg;
213}
214
215unsigned FastISel::lookUpRegForValue(const Value *V) {
216  // Look up the value to see if we already have a register for it. We
217  // cache values defined by Instructions across blocks, and other values
218  // only locally. This is because Instructions already have the SSA
219  // def-dominates-use requirement enforced.
220  DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
221  if (I != FuncInfo.ValueMap.end())
222    return I->second;
223  return LocalValueMap[V];
224}
225
226/// UpdateValueMap - Update the value map to include the new mapping for this
227/// instruction, or insert an extra copy to get the result in a previous
228/// determined register.
229/// NOTE: This is only necessary because we might select a block that uses
230/// a value before we select the block that defines the value.  It might be
231/// possible to fix this by selecting blocks in reverse postorder.
232unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
233  if (!isa<Instruction>(I)) {
234    LocalValueMap[I] = Reg;
235    return Reg;
236  }
237
238  unsigned &AssignedReg = FuncInfo.ValueMap[I];
239  if (AssignedReg == 0)
240    // Use the new register.
241    AssignedReg = Reg;
242  else if (Reg != AssignedReg) {
243    // Arrange for uses of AssignedReg to be replaced by uses of Reg.
244    FuncInfo.RegFixups[AssignedReg] = Reg;
245
246    AssignedReg = Reg;
247  }
248
249  return AssignedReg;
250}
251
252std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
253  unsigned IdxN = getRegForValue(Idx);
254  if (IdxN == 0)
255    // Unhandled operand. Halt "fast" selection and bail.
256    return std::pair<unsigned, bool>(0, false);
257
258  bool IdxNIsKill = hasTrivialKill(Idx);
259
260  // If the index is smaller or larger than intptr_t, truncate or extend it.
261  MVT PtrVT = TLI.getPointerTy();
262  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
263  if (IdxVT.bitsLT(PtrVT)) {
264    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
265                      IdxN, IdxNIsKill);
266    IdxNIsKill = true;
267  }
268  else if (IdxVT.bitsGT(PtrVT)) {
269    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
270                      IdxN, IdxNIsKill);
271    IdxNIsKill = true;
272  }
273  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
274}
275
276void FastISel::recomputeInsertPt() {
277  if (getLastLocalValue()) {
278    FuncInfo.InsertPt = getLastLocalValue();
279    ++FuncInfo.InsertPt;
280  } else
281    FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
282
283  // Now skip past any EH_LABELs, which must remain at the beginning.
284  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
285         FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
286    ++FuncInfo.InsertPt;
287}
288
289MachineBasicBlock::iterator FastISel::enterLocalValueArea() {
290  MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
291  recomputeInsertPt();
292  return OldInsertPt;
293}
294
295void FastISel::leaveLocalValueArea(MachineBasicBlock::iterator OldInsertPt) {
296  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
297    LastLocalValue = llvm::prior(FuncInfo.InsertPt);
298
299  // Restore the previous insert position.
300  FuncInfo.InsertPt = OldInsertPt;
301}
302
303/// SelectBinaryOp - Select and emit code for a binary operator instruction,
304/// which has an opcode which directly corresponds to the given ISD opcode.
305///
306bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
307  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
308  if (VT == MVT::Other || !VT.isSimple())
309    // Unhandled type. Halt "fast" selection and bail.
310    return false;
311
312  // We only handle legal types. For example, on x86-32 the instruction
313  // selector contains all of the 64-bit instructions from x86-64,
314  // under the assumption that i64 won't be used if the target doesn't
315  // support it.
316  if (!TLI.isTypeLegal(VT)) {
317    // MVT::i1 is special. Allow AND, OR, or XOR because they
318    // don't require additional zeroing, which makes them easy.
319    if (VT == MVT::i1 &&
320        (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
321         ISDOpcode == ISD::XOR))
322      VT = TLI.getTypeToTransformTo(I->getContext(), VT);
323    else
324      return false;
325  }
326
327  unsigned Op0 = getRegForValue(I->getOperand(0));
328  if (Op0 == 0)
329    // Unhandled operand. Halt "fast" selection and bail.
330    return false;
331
332  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
333
334  // Check if the second operand is a constant and handle it appropriately.
335  if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
336    unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
337                                     ISDOpcode, Op0, Op0IsKill,
338                                     CI->getZExtValue());
339    if (ResultReg != 0) {
340      // We successfully emitted code for the given LLVM Instruction.
341      UpdateValueMap(I, ResultReg);
342      return true;
343    }
344  }
345
346  // Check if the second operand is a constant float.
347  if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
348    unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
349                                     ISDOpcode, Op0, Op0IsKill, CF);
350    if (ResultReg != 0) {
351      // We successfully emitted code for the given LLVM Instruction.
352      UpdateValueMap(I, ResultReg);
353      return true;
354    }
355  }
356
357  unsigned Op1 = getRegForValue(I->getOperand(1));
358  if (Op1 == 0)
359    // Unhandled operand. Halt "fast" selection and bail.
360    return false;
361
362  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
363
364  // Now we have both operands in registers. Emit the instruction.
365  unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
366                                   ISDOpcode,
367                                   Op0, Op0IsKill,
368                                   Op1, Op1IsKill);
369  if (ResultReg == 0)
370    // Target-specific code wasn't able to find a machine opcode for
371    // the given ISD opcode and type. Halt "fast" selection and bail.
372    return false;
373
374  // We successfully emitted code for the given LLVM Instruction.
375  UpdateValueMap(I, ResultReg);
376  return true;
377}
378
379bool FastISel::SelectGetElementPtr(const User *I) {
380  unsigned N = getRegForValue(I->getOperand(0));
381  if (N == 0)
382    // Unhandled operand. Halt "fast" selection and bail.
383    return false;
384
385  bool NIsKill = hasTrivialKill(I->getOperand(0));
386
387  const Type *Ty = I->getOperand(0)->getType();
388  MVT VT = TLI.getPointerTy();
389  for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
390       E = I->op_end(); OI != E; ++OI) {
391    const Value *Idx = *OI;
392    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
393      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
394      if (Field) {
395        // N = N + Offset
396        uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
397        // FIXME: This can be optimized by combining the add with a
398        // subsequent one.
399        N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
400        if (N == 0)
401          // Unhandled operand. Halt "fast" selection and bail.
402          return false;
403        NIsKill = true;
404      }
405      Ty = StTy->getElementType(Field);
406    } else {
407      Ty = cast<SequentialType>(Ty)->getElementType();
408
409      // If this is a constant subscript, handle it quickly.
410      if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
411        if (CI->isZero()) continue;
412        uint64_t Offs =
413          TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
414        N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
415        if (N == 0)
416          // Unhandled operand. Halt "fast" selection and bail.
417          return false;
418        NIsKill = true;
419        continue;
420      }
421
422      // N = N + Idx * ElementSize;
423      uint64_t ElementSize = TD.getTypeAllocSize(Ty);
424      std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
425      unsigned IdxN = Pair.first;
426      bool IdxNIsKill = Pair.second;
427      if (IdxN == 0)
428        // Unhandled operand. Halt "fast" selection and bail.
429        return false;
430
431      if (ElementSize != 1) {
432        IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
433        if (IdxN == 0)
434          // Unhandled operand. Halt "fast" selection and bail.
435          return false;
436        IdxNIsKill = true;
437      }
438      N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
439      if (N == 0)
440        // Unhandled operand. Halt "fast" selection and bail.
441        return false;
442    }
443  }
444
445  // We successfully emitted code for the given LLVM Instruction.
446  UpdateValueMap(I, N);
447  return true;
448}
449
450bool FastISel::SelectCall(const User *I) {
451  const Function *F = cast<CallInst>(I)->getCalledFunction();
452  if (!F) return false;
453
454  // Handle selected intrinsic function calls.
455  unsigned IID = F->getIntrinsicID();
456  switch (IID) {
457  default: break;
458  case Intrinsic::dbg_declare: {
459    const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
460    if (!DIVariable(DI->getVariable()).Verify() ||
461        !FuncInfo.MF->getMMI().hasDebugInfo())
462      return true;
463
464    const Value *Address = DI->getAddress();
465    if (!Address)
466      return true;
467    if (isa<UndefValue>(Address))
468      return true;
469    const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
470    // Don't handle byval struct arguments or VLAs, for example.
471    // Note that if we have a byval struct argument, fast ISel is turned off;
472    // those are handled in SelectionDAGBuilder.
473    if (AI) {
474      DenseMap<const AllocaInst*, int>::iterator SI =
475        FuncInfo.StaticAllocaMap.find(AI);
476      if (SI == FuncInfo.StaticAllocaMap.end()) break; // VLAs.
477      int FI = SI->second;
478      if (!DI->getDebugLoc().isUnknown())
479        FuncInfo.MF->getMMI().setVariableDbgInfo(DI->getVariable(),
480                                                 FI, DI->getDebugLoc());
481    } else
482      // Building the map above is target independent.  Generating DBG_VALUE
483      // inline is target dependent; do this now.
484      (void)TargetSelectInstruction(cast<Instruction>(I));
485    return true;
486  }
487  case Intrinsic::dbg_value: {
488    // This form of DBG_VALUE is target-independent.
489    const DbgValueInst *DI = cast<DbgValueInst>(I);
490    const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
491    const Value *V = DI->getValue();
492    if (!V) {
493      // Currently the optimizer can produce this; insert an undef to
494      // help debugging.  Probably the optimizer should not do this.
495      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
496        .addReg(0U).addImm(DI->getOffset())
497        .addMetadata(DI->getVariable());
498    } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
499      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
500        .addImm(CI->getZExtValue()).addImm(DI->getOffset())
501        .addMetadata(DI->getVariable());
502    } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
503      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
504        .addFPImm(CF).addImm(DI->getOffset())
505        .addMetadata(DI->getVariable());
506    } else if (unsigned Reg = lookUpRegForValue(V)) {
507      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
508        .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
509        .addMetadata(DI->getVariable());
510    } else {
511      // We can't yet handle anything else here because it would require
512      // generating code, thus altering codegen because of debug info.
513      // Insert an undef so we can see what we dropped.
514      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
515        .addReg(0U).addImm(DI->getOffset())
516        .addMetadata(DI->getVariable());
517    }
518    return true;
519  }
520  case Intrinsic::eh_exception: {
521    EVT VT = TLI.getValueType(I->getType());
522    switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
523    default: break;
524    case TargetLowering::Expand: {
525      assert(FuncInfo.MBB->isLandingPad() &&
526             "Call to eh.exception not in landing pad!");
527      unsigned Reg = TLI.getExceptionAddressRegister();
528      const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
529      unsigned ResultReg = createResultReg(RC);
530      bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
531                                           ResultReg, Reg, RC, RC, DL);
532      assert(InsertedCopy && "Can't copy address registers!");
533      InsertedCopy = InsertedCopy;
534      UpdateValueMap(I, ResultReg);
535      return true;
536    }
537    }
538    break;
539  }
540  case Intrinsic::eh_selector: {
541    EVT VT = TLI.getValueType(I->getType());
542    switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
543    default: break;
544    case TargetLowering::Expand: {
545      if (FuncInfo.MBB->isLandingPad())
546        AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
547      else {
548#ifndef NDEBUG
549        FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
550#endif
551        // FIXME: Mark exception selector register as live in.  Hack for PR1508.
552        unsigned Reg = TLI.getExceptionSelectorRegister();
553        if (Reg) FuncInfo.MBB->addLiveIn(Reg);
554      }
555
556      unsigned Reg = TLI.getExceptionSelectorRegister();
557      EVT SrcVT = TLI.getPointerTy();
558      const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
559      unsigned ResultReg = createResultReg(RC);
560      bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
561                                           ResultReg, Reg, RC, RC, DL);
562      assert(InsertedCopy && "Can't copy address registers!");
563      InsertedCopy = InsertedCopy;
564
565      bool ResultRegIsKill = hasTrivialKill(I);
566
567      // Cast the register to the type of the selector.
568      if (SrcVT.bitsGT(MVT::i32))
569        ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
570                               ResultReg, ResultRegIsKill);
571      else if (SrcVT.bitsLT(MVT::i32))
572        ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
573                               ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
574      if (ResultReg == 0)
575        // Unhandled operand. Halt "fast" selection and bail.
576        return false;
577
578      UpdateValueMap(I, ResultReg);
579
580      return true;
581    }
582    }
583    break;
584  }
585  }
586
587  // An arbitrary call. Bail.
588  return false;
589}
590
591bool FastISel::SelectCast(const User *I, unsigned Opcode) {
592  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
593  EVT DstVT = TLI.getValueType(I->getType());
594
595  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
596      DstVT == MVT::Other || !DstVT.isSimple())
597    // Unhandled type. Halt "fast" selection and bail.
598    return false;
599
600  // Check if the destination type is legal. Or as a special case,
601  // it may be i1 if we're doing a truncate because that's
602  // easy and somewhat common.
603  if (!TLI.isTypeLegal(DstVT))
604    if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
605      // Unhandled type. Halt "fast" selection and bail.
606      return false;
607
608  // Check if the source operand is legal. Or as a special case,
609  // it may be i1 if we're doing zero-extension because that's
610  // easy and somewhat common.
611  if (!TLI.isTypeLegal(SrcVT))
612    if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
613      // Unhandled type. Halt "fast" selection and bail.
614      return false;
615
616  unsigned InputReg = getRegForValue(I->getOperand(0));
617  if (!InputReg)
618    // Unhandled operand.  Halt "fast" selection and bail.
619    return false;
620
621  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
622
623  // If the operand is i1, arrange for the high bits in the register to be zero.
624  if (SrcVT == MVT::i1) {
625   SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
626   InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
627   if (!InputReg)
628     return false;
629   InputRegIsKill = true;
630  }
631  // If the result is i1, truncate to the target's type for i1 first.
632  if (DstVT == MVT::i1)
633    DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
634
635  unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
636                                  DstVT.getSimpleVT(),
637                                  Opcode,
638                                  InputReg, InputRegIsKill);
639  if (!ResultReg)
640    return false;
641
642  UpdateValueMap(I, ResultReg);
643  return true;
644}
645
646bool FastISel::SelectBitCast(const User *I) {
647  // If the bitcast doesn't change the type, just use the operand value.
648  if (I->getType() == I->getOperand(0)->getType()) {
649    unsigned Reg = getRegForValue(I->getOperand(0));
650    if (Reg == 0)
651      return false;
652    UpdateValueMap(I, Reg);
653    return true;
654  }
655
656  // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
657  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
658  EVT DstVT = TLI.getValueType(I->getType());
659
660  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
661      DstVT == MVT::Other || !DstVT.isSimple() ||
662      !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
663    // Unhandled type. Halt "fast" selection and bail.
664    return false;
665
666  unsigned Op0 = getRegForValue(I->getOperand(0));
667  if (Op0 == 0)
668    // Unhandled operand. Halt "fast" selection and bail.
669    return false;
670
671  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
672
673  // First, try to perform the bitcast by inserting a reg-reg copy.
674  unsigned ResultReg = 0;
675  if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
676    TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
677    TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
678    ResultReg = createResultReg(DstClass);
679
680    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
681                                         ResultReg, Op0,
682                                         DstClass, SrcClass, DL);
683    if (!InsertedCopy)
684      ResultReg = 0;
685  }
686
687  // If the reg-reg copy failed, select a BIT_CONVERT opcode.
688  if (!ResultReg)
689    ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
690                           ISD::BIT_CONVERT, Op0, Op0IsKill);
691
692  if (!ResultReg)
693    return false;
694
695  UpdateValueMap(I, ResultReg);
696  return true;
697}
698
699bool
700FastISel::SelectInstruction(const Instruction *I) {
701  // Just before the terminator instruction, insert instructions to
702  // feed PHI nodes in successor blocks.
703  if (isa<TerminatorInst>(I))
704    if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
705      return false;
706
707  DL = I->getDebugLoc();
708
709  // First, try doing target-independent selection.
710  if (SelectOperator(I, I->getOpcode())) {
711    DL = DebugLoc();
712    return true;
713  }
714
715  // Next, try calling the target to attempt to handle the instruction.
716  if (TargetSelectInstruction(I)) {
717    DL = DebugLoc();
718    return true;
719  }
720
721  DL = DebugLoc();
722  return false;
723}
724
725/// FastEmitBranch - Emit an unconditional branch to the given block,
726/// unless it is the immediate (fall-through) successor, and update
727/// the CFG.
728void
729FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
730  if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
731    // The unconditional fall-through case, which needs no instructions.
732  } else {
733    // The unconditional branch case.
734    TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
735                     SmallVector<MachineOperand, 0>(), DL);
736  }
737  FuncInfo.MBB->addSuccessor(MSucc);
738}
739
740/// SelectFNeg - Emit an FNeg operation.
741///
742bool
743FastISel::SelectFNeg(const User *I) {
744  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
745  if (OpReg == 0) return false;
746
747  bool OpRegIsKill = hasTrivialKill(I);
748
749  // If the target has ISD::FNEG, use it.
750  EVT VT = TLI.getValueType(I->getType());
751  unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
752                                  ISD::FNEG, OpReg, OpRegIsKill);
753  if (ResultReg != 0) {
754    UpdateValueMap(I, ResultReg);
755    return true;
756  }
757
758  // Bitcast the value to integer, twiddle the sign bit with xor,
759  // and then bitcast it back to floating-point.
760  if (VT.getSizeInBits() > 64) return false;
761  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
762  if (!TLI.isTypeLegal(IntVT))
763    return false;
764
765  unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
766                               ISD::BIT_CONVERT, OpReg, OpRegIsKill);
767  if (IntReg == 0)
768    return false;
769
770  unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
771                                       IntReg, /*Kill=*/true,
772                                       UINT64_C(1) << (VT.getSizeInBits()-1),
773                                       IntVT.getSimpleVT());
774  if (IntResultReg == 0)
775    return false;
776
777  ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
778                         ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true);
779  if (ResultReg == 0)
780    return false;
781
782  UpdateValueMap(I, ResultReg);
783  return true;
784}
785
786bool
787FastISel::SelectLoad(const User *I) {
788  LoadInst *LI = const_cast<LoadInst *>(cast<LoadInst>(I));
789
790  // For a load from an alloca, make a limited effort to find the value
791  // already available in a register, avoiding redundant loads.
792  if (!LI->isVolatile() && isa<AllocaInst>(LI->getPointerOperand())) {
793    BasicBlock::iterator ScanFrom = LI;
794    if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(),
795                                                  LI->getParent(), ScanFrom)) {
796      if (!V->use_empty() &&
797          (!isa<Instruction>(V) ||
798           cast<Instruction>(V)->getParent() == LI->getParent() ||
799           (isa<AllocaInst>(V) &&
800            FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) &&
801          (!isa<Argument>(V) ||
802           LI->getParent() == &LI->getParent()->getParent()->getEntryBlock())) {
803      unsigned ResultReg = getRegForValue(V);
804      if (ResultReg != 0) {
805        UpdateValueMap(I, ResultReg);
806        return true;
807      }
808      }
809    }
810  }
811
812  return false;
813}
814
815bool
816FastISel::SelectOperator(const User *I, unsigned Opcode) {
817  switch (Opcode) {
818  case Instruction::Load:
819    return SelectLoad(I);
820  case Instruction::Add:
821    return SelectBinaryOp(I, ISD::ADD);
822  case Instruction::FAdd:
823    return SelectBinaryOp(I, ISD::FADD);
824  case Instruction::Sub:
825    return SelectBinaryOp(I, ISD::SUB);
826  case Instruction::FSub:
827    // FNeg is currently represented in LLVM IR as a special case of FSub.
828    if (BinaryOperator::isFNeg(I))
829      return SelectFNeg(I);
830    return SelectBinaryOp(I, ISD::FSUB);
831  case Instruction::Mul:
832    return SelectBinaryOp(I, ISD::MUL);
833  case Instruction::FMul:
834    return SelectBinaryOp(I, ISD::FMUL);
835  case Instruction::SDiv:
836    return SelectBinaryOp(I, ISD::SDIV);
837  case Instruction::UDiv:
838    return SelectBinaryOp(I, ISD::UDIV);
839  case Instruction::FDiv:
840    return SelectBinaryOp(I, ISD::FDIV);
841  case Instruction::SRem:
842    return SelectBinaryOp(I, ISD::SREM);
843  case Instruction::URem:
844    return SelectBinaryOp(I, ISD::UREM);
845  case Instruction::FRem:
846    return SelectBinaryOp(I, ISD::FREM);
847  case Instruction::Shl:
848    return SelectBinaryOp(I, ISD::SHL);
849  case Instruction::LShr:
850    return SelectBinaryOp(I, ISD::SRL);
851  case Instruction::AShr:
852    return SelectBinaryOp(I, ISD::SRA);
853  case Instruction::And:
854    return SelectBinaryOp(I, ISD::AND);
855  case Instruction::Or:
856    return SelectBinaryOp(I, ISD::OR);
857  case Instruction::Xor:
858    return SelectBinaryOp(I, ISD::XOR);
859
860  case Instruction::GetElementPtr:
861    return SelectGetElementPtr(I);
862
863  case Instruction::Br: {
864    const BranchInst *BI = cast<BranchInst>(I);
865
866    if (BI->isUnconditional()) {
867      const BasicBlock *LLVMSucc = BI->getSuccessor(0);
868      MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
869      FastEmitBranch(MSucc, BI->getDebugLoc());
870      return true;
871    }
872
873    // Conditional branches are not handed yet.
874    // Halt "fast" selection and bail.
875    return false;
876  }
877
878  case Instruction::Unreachable:
879    // Nothing to emit.
880    return true;
881
882  case Instruction::Alloca:
883    // FunctionLowering has the static-sized case covered.
884    if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
885      return true;
886
887    // Dynamic-sized alloca is not handled yet.
888    return false;
889
890  case Instruction::Call:
891    return SelectCall(I);
892
893  case Instruction::BitCast:
894    return SelectBitCast(I);
895
896  case Instruction::FPToSI:
897    return SelectCast(I, ISD::FP_TO_SINT);
898  case Instruction::ZExt:
899    return SelectCast(I, ISD::ZERO_EXTEND);
900  case Instruction::SExt:
901    return SelectCast(I, ISD::SIGN_EXTEND);
902  case Instruction::Trunc:
903    return SelectCast(I, ISD::TRUNCATE);
904  case Instruction::SIToFP:
905    return SelectCast(I, ISD::SINT_TO_FP);
906
907  case Instruction::IntToPtr: // Deliberate fall-through.
908  case Instruction::PtrToInt: {
909    EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
910    EVT DstVT = TLI.getValueType(I->getType());
911    if (DstVT.bitsGT(SrcVT))
912      return SelectCast(I, ISD::ZERO_EXTEND);
913    if (DstVT.bitsLT(SrcVT))
914      return SelectCast(I, ISD::TRUNCATE);
915    unsigned Reg = getRegForValue(I->getOperand(0));
916    if (Reg == 0) return false;
917    UpdateValueMap(I, Reg);
918    return true;
919  }
920
921  case Instruction::PHI:
922    llvm_unreachable("FastISel shouldn't visit PHI nodes!");
923
924  default:
925    // Unhandled instruction. Halt "fast" selection and bail.
926    return false;
927  }
928}
929
930FastISel::FastISel(FunctionLoweringInfo &funcInfo)
931  : FuncInfo(funcInfo),
932    MRI(FuncInfo.MF->getRegInfo()),
933    MFI(*FuncInfo.MF->getFrameInfo()),
934    MCP(*FuncInfo.MF->getConstantPool()),
935    TM(FuncInfo.MF->getTarget()),
936    TD(*TM.getTargetData()),
937    TII(*TM.getInstrInfo()),
938    TLI(*TM.getTargetLowering()),
939    TRI(*TM.getRegisterInfo()) {
940}
941
942FastISel::~FastISel() {}
943
944unsigned FastISel::FastEmit_(MVT, MVT,
945                             unsigned) {
946  return 0;
947}
948
949unsigned FastISel::FastEmit_r(MVT, MVT,
950                              unsigned,
951                              unsigned /*Op0*/, bool /*Op0IsKill*/) {
952  return 0;
953}
954
955unsigned FastISel::FastEmit_rr(MVT, MVT,
956                               unsigned,
957                               unsigned /*Op0*/, bool /*Op0IsKill*/,
958                               unsigned /*Op1*/, bool /*Op1IsKill*/) {
959  return 0;
960}
961
962unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
963  return 0;
964}
965
966unsigned FastISel::FastEmit_f(MVT, MVT,
967                              unsigned, const ConstantFP * /*FPImm*/) {
968  return 0;
969}
970
971unsigned FastISel::FastEmit_ri(MVT, MVT,
972                               unsigned,
973                               unsigned /*Op0*/, bool /*Op0IsKill*/,
974                               uint64_t /*Imm*/) {
975  return 0;
976}
977
978unsigned FastISel::FastEmit_rf(MVT, MVT,
979                               unsigned,
980                               unsigned /*Op0*/, bool /*Op0IsKill*/,
981                               const ConstantFP * /*FPImm*/) {
982  return 0;
983}
984
985unsigned FastISel::FastEmit_rri(MVT, MVT,
986                                unsigned,
987                                unsigned /*Op0*/, bool /*Op0IsKill*/,
988                                unsigned /*Op1*/, bool /*Op1IsKill*/,
989                                uint64_t /*Imm*/) {
990  return 0;
991}
992
993/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
994/// to emit an instruction with an immediate operand using FastEmit_ri.
995/// If that fails, it materializes the immediate into a register and try
996/// FastEmit_rr instead.
997unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
998                                unsigned Op0, bool Op0IsKill,
999                                uint64_t Imm, MVT ImmType) {
1000  // First check if immediate type is legal. If not, we can't use the ri form.
1001  unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1002  if (ResultReg != 0)
1003    return ResultReg;
1004  unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1005  if (MaterialReg == 0)
1006    return 0;
1007  return FastEmit_rr(VT, VT, Opcode,
1008                     Op0, Op0IsKill,
1009                     MaterialReg, /*Kill=*/true);
1010}
1011
1012/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
1013/// to emit an instruction with a floating-point immediate operand using
1014/// FastEmit_rf. If that fails, it materializes the immediate into a register
1015/// and try FastEmit_rr instead.
1016unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
1017                                unsigned Op0, bool Op0IsKill,
1018                                const ConstantFP *FPImm, MVT ImmType) {
1019  // First check if immediate type is legal. If not, we can't use the rf form.
1020  unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, Op0IsKill, FPImm);
1021  if (ResultReg != 0)
1022    return ResultReg;
1023
1024  // Materialize the constant in a register.
1025  unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
1026  if (MaterialReg == 0) {
1027    // If the target doesn't have a way to directly enter a floating-point
1028    // value into a register, use an alternate approach.
1029    // TODO: The current approach only supports floating-point constants
1030    // that can be constructed by conversion from integer values. This should
1031    // be replaced by code that creates a load from a constant-pool entry,
1032    // which will require some target-specific work.
1033    const APFloat &Flt = FPImm->getValueAPF();
1034    EVT IntVT = TLI.getPointerTy();
1035
1036    uint64_t x[2];
1037    uint32_t IntBitWidth = IntVT.getSizeInBits();
1038    bool isExact;
1039    (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
1040                             APFloat::rmTowardZero, &isExact);
1041    if (!isExact)
1042      return 0;
1043    APInt IntVal(IntBitWidth, 2, x);
1044
1045    unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
1046                                     ISD::Constant, IntVal.getZExtValue());
1047    if (IntegerReg == 0)
1048      return 0;
1049    MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
1050                             ISD::SINT_TO_FP, IntegerReg, /*Kill=*/true);
1051    if (MaterialReg == 0)
1052      return 0;
1053  }
1054  return FastEmit_rr(VT, VT, Opcode,
1055                     Op0, Op0IsKill,
1056                     MaterialReg, /*Kill=*/true);
1057}
1058
1059unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1060  return MRI.createVirtualRegister(RC);
1061}
1062
1063unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1064                                 const TargetRegisterClass* RC) {
1065  unsigned ResultReg = createResultReg(RC);
1066  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1067
1068  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1069  return ResultReg;
1070}
1071
1072unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1073                                  const TargetRegisterClass *RC,
1074                                  unsigned Op0, bool Op0IsKill) {
1075  unsigned ResultReg = createResultReg(RC);
1076  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1077
1078  if (II.getNumDefs() >= 1)
1079    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1080      .addReg(Op0, Op0IsKill * RegState::Kill);
1081  else {
1082    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1083      .addReg(Op0, Op0IsKill * RegState::Kill);
1084    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1085                                         ResultReg, II.ImplicitDefs[0],
1086                                         RC, RC, DL);
1087    if (!InsertedCopy)
1088      ResultReg = 0;
1089  }
1090
1091  return ResultReg;
1092}
1093
1094unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1095                                   const TargetRegisterClass *RC,
1096                                   unsigned Op0, bool Op0IsKill,
1097                                   unsigned Op1, bool Op1IsKill) {
1098  unsigned ResultReg = createResultReg(RC);
1099  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1100
1101  if (II.getNumDefs() >= 1)
1102    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1103      .addReg(Op0, Op0IsKill * RegState::Kill)
1104      .addReg(Op1, Op1IsKill * RegState::Kill);
1105  else {
1106    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1107      .addReg(Op0, Op0IsKill * RegState::Kill)
1108      .addReg(Op1, Op1IsKill * RegState::Kill);
1109    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1110                                         ResultReg, II.ImplicitDefs[0],
1111                                         RC, RC, DL);
1112    if (!InsertedCopy)
1113      ResultReg = 0;
1114  }
1115  return ResultReg;
1116}
1117
1118unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1119                                   const TargetRegisterClass *RC,
1120                                   unsigned Op0, bool Op0IsKill,
1121                                   uint64_t Imm) {
1122  unsigned ResultReg = createResultReg(RC);
1123  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1124
1125  if (II.getNumDefs() >= 1)
1126    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1127      .addReg(Op0, Op0IsKill * RegState::Kill)
1128      .addImm(Imm);
1129  else {
1130    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1131      .addReg(Op0, Op0IsKill * RegState::Kill)
1132      .addImm(Imm);
1133    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1134                                         ResultReg, II.ImplicitDefs[0],
1135                                         RC, RC, DL);
1136    if (!InsertedCopy)
1137      ResultReg = 0;
1138  }
1139  return ResultReg;
1140}
1141
1142unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1143                                   const TargetRegisterClass *RC,
1144                                   unsigned Op0, bool Op0IsKill,
1145                                   const ConstantFP *FPImm) {
1146  unsigned ResultReg = createResultReg(RC);
1147  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1148
1149  if (II.getNumDefs() >= 1)
1150    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1151      .addReg(Op0, Op0IsKill * RegState::Kill)
1152      .addFPImm(FPImm);
1153  else {
1154    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1155      .addReg(Op0, Op0IsKill * RegState::Kill)
1156      .addFPImm(FPImm);
1157    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1158                                         ResultReg, II.ImplicitDefs[0],
1159                                         RC, RC, DL);
1160    if (!InsertedCopy)
1161      ResultReg = 0;
1162  }
1163  return ResultReg;
1164}
1165
1166unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1167                                    const TargetRegisterClass *RC,
1168                                    unsigned Op0, bool Op0IsKill,
1169                                    unsigned Op1, bool Op1IsKill,
1170                                    uint64_t Imm) {
1171  unsigned ResultReg = createResultReg(RC);
1172  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1173
1174  if (II.getNumDefs() >= 1)
1175    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1176      .addReg(Op0, Op0IsKill * RegState::Kill)
1177      .addReg(Op1, Op1IsKill * RegState::Kill)
1178      .addImm(Imm);
1179  else {
1180    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1181      .addReg(Op0, Op0IsKill * RegState::Kill)
1182      .addReg(Op1, Op1IsKill * RegState::Kill)
1183      .addImm(Imm);
1184    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1185                                         ResultReg, II.ImplicitDefs[0],
1186                                         RC, RC, DL);
1187    if (!InsertedCopy)
1188      ResultReg = 0;
1189  }
1190  return ResultReg;
1191}
1192
1193unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1194                                  const TargetRegisterClass *RC,
1195                                  uint64_t Imm) {
1196  unsigned ResultReg = createResultReg(RC);
1197  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1198
1199  if (II.getNumDefs() >= 1)
1200    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1201  else {
1202    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1203    bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
1204                                         ResultReg, II.ImplicitDefs[0],
1205                                         RC, RC, DL);
1206    if (!InsertedCopy)
1207      ResultReg = 0;
1208  }
1209  return ResultReg;
1210}
1211
1212unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1213                                              unsigned Op0, bool Op0IsKill,
1214                                              uint32_t Idx) {
1215  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1216  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1217         "Cannot yet extract from physregs");
1218  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1219          DL, TII.get(TargetOpcode::COPY), ResultReg)
1220    .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1221  return ResultReg;
1222}
1223
1224/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1225/// with all but the least significant bit set to zero.
1226unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1227  return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1228}
1229
1230/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1231/// Emit code to ensure constants are copied into registers when needed.
1232/// Remember the virtual registers that need to be added to the Machine PHI
1233/// nodes as input.  We cannot just directly add them, because expansion
1234/// might result in multiple MBB's for one BB.  As such, the start of the
1235/// BB might correspond to a different MBB than the end.
1236bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1237  const TerminatorInst *TI = LLVMBB->getTerminator();
1238
1239  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1240  unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1241
1242  // Check successor nodes' PHI nodes that expect a constant to be available
1243  // from this block.
1244  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1245    const BasicBlock *SuccBB = TI->getSuccessor(succ);
1246    if (!isa<PHINode>(SuccBB->begin())) continue;
1247    MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1248
1249    // If this terminator has multiple identical successors (common for
1250    // switches), only handle each succ once.
1251    if (!SuccsHandled.insert(SuccMBB)) continue;
1252
1253    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1254
1255    // At this point we know that there is a 1-1 correspondence between LLVM PHI
1256    // nodes and Machine PHI nodes, but the incoming operands have not been
1257    // emitted yet.
1258    for (BasicBlock::const_iterator I = SuccBB->begin();
1259         const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1260
1261      // Ignore dead phi's.
1262      if (PN->use_empty()) continue;
1263
1264      // Only handle legal types. Two interesting things to note here. First,
1265      // by bailing out early, we may leave behind some dead instructions,
1266      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1267      // own moves. Second, this check is necessary becuase FastISel doesn't
1268      // use CreateRegs to create registers, so it always creates
1269      // exactly one register for each non-void instruction.
1270      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1271      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1272        // Promote MVT::i1.
1273        if (VT == MVT::i1)
1274          VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1275        else {
1276          FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1277          return false;
1278        }
1279      }
1280
1281      const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1282
1283      // Set the DebugLoc for the copy. Prefer the location of the operand
1284      // if there is one; use the location of the PHI otherwise.
1285      DL = PN->getDebugLoc();
1286      if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1287        DL = Inst->getDebugLoc();
1288
1289      unsigned Reg = getRegForValue(PHIOp);
1290      if (Reg == 0) {
1291        FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1292        return false;
1293      }
1294      FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
1295      DL = DebugLoc();
1296    }
1297  }
1298
1299  return true;
1300}
1301