FastISel.cpp revision 2586b8f9366aed5a1efa44d3f18d095511601642
1//===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the FastISel class.
11//
12// "Fast" instruction selection is designed to emit very poor code quickly.
13// Also, it is not designed to be able to do much lowering, so most illegal
14// types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
15// also not intended to be able to do much optimization, except in a few cases
16// where doing optimizations reduces overall compile time.  For example, folding
17// constants into immediate fields is often done, because it's cheap and it
18// reduces the number of instructions later phases have to examine.
19//
20// "Fast" instruction selection is able to fail gracefully and transfer
21// control to the SelectionDAG selector for operations that it doesn't
22// support.  In many cases, this allows us to avoid duplicating a lot of
23// the complicated lowering logic that SelectionDAG currently has.
24//
25// The intended use for "fast" instruction selection is "-O0" mode
26// compilation, where the quality of the generated code is irrelevant when
27// weighed against the speed at which the code can be generated.  Also,
28// at -O0, the LLVM optimizers are not running, and this makes the
29// compile time of codegen a much higher portion of the overall compile
30// time.  Despite its limitations, "fast" instruction selection is able to
31// handle enough code on its own to provide noticeable overall speedups
32// in -O0 compiles.
33//
34// Basic operations are supported in a target-independent way, by reading
35// the same instruction descriptions that the SelectionDAG selector reads,
36// and identifying simple arithmetic operations that can be directly selected
37// from simple operators.  More complicated operations currently require
38// target-specific code.
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Function.h"
43#include "llvm/GlobalVariable.h"
44#include "llvm/Instructions.h"
45#include "llvm/IntrinsicInst.h"
46#include "llvm/Operator.h"
47#include "llvm/CodeGen/Analysis.h"
48#include "llvm/CodeGen/FastISel.h"
49#include "llvm/CodeGen/FunctionLoweringInfo.h"
50#include "llvm/CodeGen/MachineInstrBuilder.h"
51#include "llvm/CodeGen/MachineModuleInfo.h"
52#include "llvm/CodeGen/MachineRegisterInfo.h"
53#include "llvm/Analysis/DebugInfo.h"
54#include "llvm/Analysis/Loads.h"
55#include "llvm/Target/TargetData.h"
56#include "llvm/Target/TargetInstrInfo.h"
57#include "llvm/Target/TargetLowering.h"
58#include "llvm/Target/TargetMachine.h"
59#include "llvm/Support/ErrorHandling.h"
60#include "llvm/Support/Debug.h"
61using namespace llvm;
62
63/// startNewBlock - Set the current block to which generated machine
64/// instructions will be appended, and clear the local CSE map.
65///
66void FastISel::startNewBlock() {
67  LocalValueMap.clear();
68
69  // Start out as null, meaining no local-value instructions have
70  // been emitted.
71  LastLocalValue = 0;
72
73  // Advance the last local value past any EH_LABEL instructions.
74  MachineBasicBlock::iterator
75    I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
76  while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
77    LastLocalValue = I;
78    ++I;
79  }
80}
81
82bool FastISel::hasTrivialKill(const Value *V) const {
83  // Don't consider constants or arguments to have trivial kills.
84  const Instruction *I = dyn_cast<Instruction>(V);
85  if (!I)
86    return false;
87
88  // No-op casts are trivially coalesced by fast-isel.
89  if (const CastInst *Cast = dyn_cast<CastInst>(I))
90    if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
91        !hasTrivialKill(Cast->getOperand(0)))
92      return false;
93
94  // Only instructions with a single use in the same basic block are considered
95  // to have trivial kills.
96  return I->hasOneUse() &&
97         !(I->getOpcode() == Instruction::BitCast ||
98           I->getOpcode() == Instruction::PtrToInt ||
99           I->getOpcode() == Instruction::IntToPtr) &&
100         cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
101}
102
103unsigned FastISel::getRegForValue(const Value *V) {
104  EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
105  // Don't handle non-simple values in FastISel.
106  if (!RealVT.isSimple())
107    return 0;
108
109  // Ignore illegal types. We must do this before looking up the value
110  // in ValueMap because Arguments are given virtual registers regardless
111  // of whether FastISel can handle them.
112  MVT VT = RealVT.getSimpleVT();
113  if (!TLI.isTypeLegal(VT)) {
114    // Promote MVT::i1 to a legal type though, because it's common and easy.
115    if (VT == MVT::i1)
116      VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
117    else
118      return 0;
119  }
120
121  // Look up the value to see if we already have a register for it. We
122  // cache values defined by Instructions across blocks, and other values
123  // only locally. This is because Instructions already have the SSA
124  // def-dominates-use requirement enforced.
125  DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
126  if (I != FuncInfo.ValueMap.end())
127    return I->second;
128
129  unsigned Reg = LocalValueMap[V];
130  if (Reg != 0)
131    return Reg;
132
133  // In bottom-up mode, just create the virtual register which will be used
134  // to hold the value. It will be materialized later.
135  if (isa<Instruction>(V) &&
136      (!isa<AllocaInst>(V) ||
137       !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
138    return FuncInfo.InitializeRegForValue(V);
139
140  SavePoint SaveInsertPt = enterLocalValueArea();
141
142  // Materialize the value in a register. Emit any instructions in the
143  // local value area.
144  Reg = materializeRegForValue(V, VT);
145
146  leaveLocalValueArea(SaveInsertPt);
147
148  return Reg;
149}
150
151/// materializeRegForValue - Helper for getRegForValue. This function is
152/// called when the value isn't already available in a register and must
153/// be materialized with new instructions.
154unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
155  unsigned Reg = 0;
156
157  if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
158    if (CI->getValue().getActiveBits() <= 64)
159      Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
160  } else if (isa<AllocaInst>(V)) {
161    Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
162  } else if (isa<ConstantPointerNull>(V)) {
163    // Translate this as an integer zero so that it can be
164    // local-CSE'd with actual integer zeros.
165    Reg =
166      getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
167  } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
168    if (CF->isNullValue()) {
169      Reg = TargetMaterializeFloatZero(CF);
170    } else {
171      // Try to emit the constant directly.
172      Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
173    }
174
175    if (!Reg) {
176      // Try to emit the constant by using an integer constant with a cast.
177      const APFloat &Flt = CF->getValueAPF();
178      EVT IntVT = TLI.getPointerTy();
179
180      uint64_t x[2];
181      uint32_t IntBitWidth = IntVT.getSizeInBits();
182      bool isExact;
183      (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
184                                APFloat::rmTowardZero, &isExact);
185      if (isExact) {
186        APInt IntVal(IntBitWidth, 2, x);
187
188        unsigned IntegerReg =
189          getRegForValue(ConstantInt::get(V->getContext(), IntVal));
190        if (IntegerReg != 0)
191          Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
192                           IntegerReg, /*Kill=*/false);
193      }
194    }
195  } else if (const Operator *Op = dyn_cast<Operator>(V)) {
196    if (!SelectOperator(Op, Op->getOpcode()))
197      if (!isa<Instruction>(Op) ||
198          !TargetSelectInstruction(cast<Instruction>(Op)))
199        return 0;
200    Reg = lookUpRegForValue(Op);
201  } else if (isa<UndefValue>(V)) {
202    Reg = createResultReg(TLI.getRegClassFor(VT));
203    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
204            TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
205  }
206
207  // If target-independent code couldn't handle the value, give target-specific
208  // code a try.
209  if (!Reg && isa<Constant>(V))
210    Reg = TargetMaterializeConstant(cast<Constant>(V));
211
212  // Don't cache constant materializations in the general ValueMap.
213  // To do so would require tracking what uses they dominate.
214  if (Reg != 0) {
215    LocalValueMap[V] = Reg;
216    LastLocalValue = MRI.getVRegDef(Reg);
217  }
218  return Reg;
219}
220
221unsigned FastISel::lookUpRegForValue(const Value *V) {
222  // Look up the value to see if we already have a register for it. We
223  // cache values defined by Instructions across blocks, and other values
224  // only locally. This is because Instructions already have the SSA
225  // def-dominates-use requirement enforced.
226  DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
227  if (I != FuncInfo.ValueMap.end())
228    return I->second;
229  return LocalValueMap[V];
230}
231
232/// UpdateValueMap - Update the value map to include the new mapping for this
233/// instruction, or insert an extra copy to get the result in a previous
234/// determined register.
235/// NOTE: This is only necessary because we might select a block that uses
236/// a value before we select the block that defines the value.  It might be
237/// possible to fix this by selecting blocks in reverse postorder.
238unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
239  if (!isa<Instruction>(I)) {
240    LocalValueMap[I] = Reg;
241    return Reg;
242  }
243
244  unsigned &AssignedReg = FuncInfo.ValueMap[I];
245  if (AssignedReg == 0)
246    // Use the new register.
247    AssignedReg = Reg;
248  else if (Reg != AssignedReg) {
249    // Arrange for uses of AssignedReg to be replaced by uses of Reg.
250    FuncInfo.RegFixups[AssignedReg] = Reg;
251
252    AssignedReg = Reg;
253  }
254
255  return AssignedReg;
256}
257
258std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
259  unsigned IdxN = getRegForValue(Idx);
260  if (IdxN == 0)
261    // Unhandled operand. Halt "fast" selection and bail.
262    return std::pair<unsigned, bool>(0, false);
263
264  bool IdxNIsKill = hasTrivialKill(Idx);
265
266  // If the index is smaller or larger than intptr_t, truncate or extend it.
267  MVT PtrVT = TLI.getPointerTy();
268  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
269  if (IdxVT.bitsLT(PtrVT)) {
270    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
271                      IdxN, IdxNIsKill);
272    IdxNIsKill = true;
273  }
274  else if (IdxVT.bitsGT(PtrVT)) {
275    IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
276                      IdxN, IdxNIsKill);
277    IdxNIsKill = true;
278  }
279  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
280}
281
282void FastISel::recomputeInsertPt() {
283  if (getLastLocalValue()) {
284    FuncInfo.InsertPt = getLastLocalValue();
285    FuncInfo.MBB = FuncInfo.InsertPt->getParent();
286    ++FuncInfo.InsertPt;
287  } else
288    FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
289
290  // Now skip past any EH_LABELs, which must remain at the beginning.
291  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
292         FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
293    ++FuncInfo.InsertPt;
294}
295
296FastISel::SavePoint FastISel::enterLocalValueArea() {
297  MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
298  DebugLoc OldDL = DL;
299  recomputeInsertPt();
300  DL = DebugLoc();
301  SavePoint SP = { OldInsertPt, OldDL };
302  return SP;
303}
304
305void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
306  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
307    LastLocalValue = llvm::prior(FuncInfo.InsertPt);
308
309  // Restore the previous insert position.
310  FuncInfo.InsertPt = OldInsertPt.InsertPt;
311  DL = OldInsertPt.DL;
312}
313
314/// SelectBinaryOp - Select and emit code for a binary operator instruction,
315/// which has an opcode which directly corresponds to the given ISD opcode.
316///
317bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
318  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
319  if (VT == MVT::Other || !VT.isSimple())
320    // Unhandled type. Halt "fast" selection and bail.
321    return false;
322
323  // We only handle legal types. For example, on x86-32 the instruction
324  // selector contains all of the 64-bit instructions from x86-64,
325  // under the assumption that i64 won't be used if the target doesn't
326  // support it.
327  if (!TLI.isTypeLegal(VT)) {
328    // MVT::i1 is special. Allow AND, OR, or XOR because they
329    // don't require additional zeroing, which makes them easy.
330    if (VT == MVT::i1 &&
331        (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
332         ISDOpcode == ISD::XOR))
333      VT = TLI.getTypeToTransformTo(I->getContext(), VT);
334    else
335      return false;
336  }
337
338  // Check if the first operand is a constant, and handle it as "ri".  At -O0,
339  // we don't have anything that canonicalizes operand order.
340  if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
341    if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
342      unsigned Op1 = getRegForValue(I->getOperand(1));
343      if (Op1 == 0) return false;
344
345      bool Op1IsKill = hasTrivialKill(I->getOperand(1));
346
347      unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
348                                        Op1IsKill, CI->getZExtValue(),
349                                        VT.getSimpleVT());
350      if (ResultReg == 0) return false;
351
352      // We successfully emitted code for the given LLVM Instruction.
353      UpdateValueMap(I, ResultReg);
354      return true;
355    }
356
357
358  unsigned Op0 = getRegForValue(I->getOperand(0));
359  if (Op0 == 0)   // Unhandled operand. Halt "fast" selection and bail.
360    return false;
361
362  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
363
364  // Check if the second operand is a constant and handle it appropriately.
365  if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
366    uint64_t Imm = CI->getZExtValue();
367
368    // Transform "sdiv exact X, 8" -> "sra X, 3".
369    if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
370        cast<BinaryOperator>(I)->isExact() &&
371        isPowerOf2_64(Imm)) {
372      Imm = Log2_64(Imm);
373      ISDOpcode = ISD::SRA;
374    }
375
376    unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
377                                      Op0IsKill, Imm, VT.getSimpleVT());
378    if (ResultReg == 0) return false;
379
380    // We successfully emitted code for the given LLVM Instruction.
381    UpdateValueMap(I, ResultReg);
382    return true;
383  }
384
385  // Check if the second operand is a constant float.
386  if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
387    unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
388                                     ISDOpcode, Op0, Op0IsKill, CF);
389    if (ResultReg != 0) {
390      // We successfully emitted code for the given LLVM Instruction.
391      UpdateValueMap(I, ResultReg);
392      return true;
393    }
394  }
395
396  unsigned Op1 = getRegForValue(I->getOperand(1));
397  if (Op1 == 0)
398    // Unhandled operand. Halt "fast" selection and bail.
399    return false;
400
401  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
402
403  // Now we have both operands in registers. Emit the instruction.
404  unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
405                                   ISDOpcode,
406                                   Op0, Op0IsKill,
407                                   Op1, Op1IsKill);
408  if (ResultReg == 0)
409    // Target-specific code wasn't able to find a machine opcode for
410    // the given ISD opcode and type. Halt "fast" selection and bail.
411    return false;
412
413  // We successfully emitted code for the given LLVM Instruction.
414  UpdateValueMap(I, ResultReg);
415  return true;
416}
417
418bool FastISel::SelectGetElementPtr(const User *I) {
419  unsigned N = getRegForValue(I->getOperand(0));
420  if (N == 0)
421    // Unhandled operand. Halt "fast" selection and bail.
422    return false;
423
424  bool NIsKill = hasTrivialKill(I->getOperand(0));
425
426  const Type *Ty = I->getOperand(0)->getType();
427  MVT VT = TLI.getPointerTy();
428  for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
429       E = I->op_end(); OI != E; ++OI) {
430    const Value *Idx = *OI;
431    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
432      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
433      if (Field) {
434        // N = N + Offset
435        uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
436        // FIXME: This can be optimized by combining the add with a
437        // subsequent one.
438        N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
439        if (N == 0)
440          // Unhandled operand. Halt "fast" selection and bail.
441          return false;
442        NIsKill = true;
443      }
444      Ty = StTy->getElementType(Field);
445    } else {
446      Ty = cast<SequentialType>(Ty)->getElementType();
447
448      // If this is a constant subscript, handle it quickly.
449      if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
450        if (CI->isZero()) continue;
451        uint64_t Offs =
452          TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
453        N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
454        if (N == 0)
455          // Unhandled operand. Halt "fast" selection and bail.
456          return false;
457        NIsKill = true;
458        continue;
459      }
460
461      // N = N + Idx * ElementSize;
462      uint64_t ElementSize = TD.getTypeAllocSize(Ty);
463      std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
464      unsigned IdxN = Pair.first;
465      bool IdxNIsKill = Pair.second;
466      if (IdxN == 0)
467        // Unhandled operand. Halt "fast" selection and bail.
468        return false;
469
470      if (ElementSize != 1) {
471        IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
472        if (IdxN == 0)
473          // Unhandled operand. Halt "fast" selection and bail.
474          return false;
475        IdxNIsKill = true;
476      }
477      N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
478      if (N == 0)
479        // Unhandled operand. Halt "fast" selection and bail.
480        return false;
481    }
482  }
483
484  // We successfully emitted code for the given LLVM Instruction.
485  UpdateValueMap(I, N);
486  return true;
487}
488
489bool FastISel::SelectCall(const User *I) {
490  const CallInst *Call = cast<CallInst>(I);
491
492  // Handle simple inline asms.
493  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getArgOperand(0))) {
494    // Don't attempt to handle constraints.
495    if (!IA->getConstraintString().empty())
496      return false;
497
498    unsigned ExtraInfo = 0;
499    if (IA->hasSideEffects())
500      ExtraInfo |= InlineAsm::Extra_HasSideEffects;
501    if (IA->isAlignStack())
502      ExtraInfo |= InlineAsm::Extra_IsAlignStack;
503
504    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
505            TII.get(TargetOpcode::INLINEASM))
506      .addExternalSymbol(IA->getAsmString().c_str())
507      .addImm(ExtraInfo);
508    return true;
509  }
510
511  const Function *F = Call->getCalledFunction();
512  if (!F) return false;
513
514  // Handle selected intrinsic function calls.
515  switch (F->getIntrinsicID()) {
516  default: break;
517  case Intrinsic::dbg_declare: {
518    const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
519    if (!DIVariable(DI->getVariable()).Verify() ||
520        !FuncInfo.MF->getMMI().hasDebugInfo())
521      return true;
522
523    const Value *Address = DI->getAddress();
524    if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
525      return true;
526
527    unsigned Reg = 0;
528    unsigned Offset = 0;
529    if (const Argument *Arg = dyn_cast<Argument>(Address)) {
530      if (Arg->hasByValAttr()) {
531        // Byval arguments' frame index is recorded during argument lowering.
532        // Use this info directly.
533        Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
534        if (Offset)
535          Reg = TRI.getFrameRegister(*FuncInfo.MF);
536      }
537    }
538    if (!Reg)
539      Reg = getRegForValue(Address);
540
541    if (Reg)
542      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
543              TII.get(TargetOpcode::DBG_VALUE))
544        .addReg(Reg, RegState::Debug).addImm(Offset)
545        .addMetadata(DI->getVariable());
546    return true;
547  }
548  case Intrinsic::dbg_value: {
549    // This form of DBG_VALUE is target-independent.
550    const DbgValueInst *DI = cast<DbgValueInst>(Call);
551    const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
552    const Value *V = DI->getValue();
553    if (!V) {
554      // Currently the optimizer can produce this; insert an undef to
555      // help debugging.  Probably the optimizer should not do this.
556      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
557        .addReg(0U).addImm(DI->getOffset())
558        .addMetadata(DI->getVariable());
559    } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
560      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
561        .addImm(CI->getZExtValue()).addImm(DI->getOffset())
562        .addMetadata(DI->getVariable());
563    } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
564      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
565        .addFPImm(CF).addImm(DI->getOffset())
566        .addMetadata(DI->getVariable());
567    } else if (unsigned Reg = lookUpRegForValue(V)) {
568      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
569        .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
570        .addMetadata(DI->getVariable());
571    } else {
572      // We can't yet handle anything else here because it would require
573      // generating code, thus altering codegen because of debug info.
574      DEBUG(dbgs() << "Dropping debug info for " << DI);
575    }
576    return true;
577  }
578  case Intrinsic::eh_exception: {
579    EVT VT = TLI.getValueType(Call->getType());
580    if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
581      break;
582
583    assert(FuncInfo.MBB->isLandingPad() &&
584           "Call to eh.exception not in landing pad!");
585    unsigned Reg = TLI.getExceptionAddressRegister();
586    const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
587    unsigned ResultReg = createResultReg(RC);
588    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
589            ResultReg).addReg(Reg);
590    UpdateValueMap(Call, ResultReg);
591    return true;
592  }
593  case Intrinsic::eh_selector: {
594    EVT VT = TLI.getValueType(Call->getType());
595    if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
596      break;
597    if (FuncInfo.MBB->isLandingPad())
598      AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
599    else {
600#ifndef NDEBUG
601      FuncInfo.CatchInfoLost.insert(Call);
602#endif
603      // FIXME: Mark exception selector register as live in.  Hack for PR1508.
604      unsigned Reg = TLI.getExceptionSelectorRegister();
605      if (Reg) FuncInfo.MBB->addLiveIn(Reg);
606    }
607
608    unsigned Reg = TLI.getExceptionSelectorRegister();
609    EVT SrcVT = TLI.getPointerTy();
610    const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
611    unsigned ResultReg = createResultReg(RC);
612    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
613            ResultReg).addReg(Reg);
614
615    bool ResultRegIsKill = hasTrivialKill(Call);
616
617    // Cast the register to the type of the selector.
618    if (SrcVT.bitsGT(MVT::i32))
619      ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
620                             ResultReg, ResultRegIsKill);
621    else if (SrcVT.bitsLT(MVT::i32))
622      ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
623                             ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
624    if (ResultReg == 0)
625      // Unhandled operand. Halt "fast" selection and bail.
626      return false;
627
628    UpdateValueMap(Call, ResultReg);
629
630    return true;
631  }
632  case Intrinsic::objectsize: {
633    ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
634    unsigned long long Res = CI->isZero() ? -1ULL : 0;
635    Constant *ResCI = ConstantInt::get(Call->getType(), Res);
636    unsigned ResultReg = getRegForValue(ResCI);
637    if (ResultReg == 0)
638      return false;
639    UpdateValueMap(Call, ResultReg);
640    return true;
641  }
642  }
643
644  // An arbitrary call. Bail.
645  return false;
646}
647
648bool FastISel::SelectCast(const User *I, unsigned Opcode) {
649  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
650  EVT DstVT = TLI.getValueType(I->getType());
651
652  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
653      DstVT == MVT::Other || !DstVT.isSimple())
654    // Unhandled type. Halt "fast" selection and bail.
655    return false;
656
657  // Check if the destination type is legal. Or as a special case,
658  // it may be i1 if we're doing a truncate because that's
659  // easy and somewhat common.
660  if (!TLI.isTypeLegal(DstVT))
661    if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
662      // Unhandled type. Halt "fast" selection and bail.
663      return false;
664
665  // Check if the source operand is legal. Or as a special case,
666  // it may be i1 if we're doing zero-extension because that's
667  // easy and somewhat common.
668  if (!TLI.isTypeLegal(SrcVT))
669    if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
670      // Unhandled type. Halt "fast" selection and bail.
671      return false;
672
673  unsigned InputReg = getRegForValue(I->getOperand(0));
674  if (!InputReg)
675    // Unhandled operand.  Halt "fast" selection and bail.
676    return false;
677
678  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
679
680  // If the operand is i1, arrange for the high bits in the register to be zero.
681  if (SrcVT == MVT::i1) {
682   SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
683   InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
684   if (!InputReg)
685     return false;
686   InputRegIsKill = true;
687  }
688  // If the result is i1, truncate to the target's type for i1 first.
689  if (DstVT == MVT::i1)
690    DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
691
692  unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
693                                  DstVT.getSimpleVT(),
694                                  Opcode,
695                                  InputReg, InputRegIsKill);
696  if (!ResultReg)
697    return false;
698
699  UpdateValueMap(I, ResultReg);
700  return true;
701}
702
703bool FastISel::SelectBitCast(const User *I) {
704  // If the bitcast doesn't change the type, just use the operand value.
705  if (I->getType() == I->getOperand(0)->getType()) {
706    unsigned Reg = getRegForValue(I->getOperand(0));
707    if (Reg == 0)
708      return false;
709    UpdateValueMap(I, Reg);
710    return true;
711  }
712
713  // Bitcasts of other values become reg-reg copies or BITCAST operators.
714  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
715  EVT DstVT = TLI.getValueType(I->getType());
716
717  if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
718      DstVT == MVT::Other || !DstVT.isSimple() ||
719      !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
720    // Unhandled type. Halt "fast" selection and bail.
721    return false;
722
723  unsigned Op0 = getRegForValue(I->getOperand(0));
724  if (Op0 == 0)
725    // Unhandled operand. Halt "fast" selection and bail.
726    return false;
727
728  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
729
730  // First, try to perform the bitcast by inserting a reg-reg copy.
731  unsigned ResultReg = 0;
732  if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
733    TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
734    TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
735    // Don't attempt a cross-class copy. It will likely fail.
736    if (SrcClass == DstClass) {
737      ResultReg = createResultReg(DstClass);
738      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
739              ResultReg).addReg(Op0);
740    }
741  }
742
743  // If the reg-reg copy failed, select a BITCAST opcode.
744  if (!ResultReg)
745    ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
746                           ISD::BITCAST, Op0, Op0IsKill);
747
748  if (!ResultReg)
749    return false;
750
751  UpdateValueMap(I, ResultReg);
752  return true;
753}
754
755bool
756FastISel::SelectInstruction(const Instruction *I) {
757  // Just before the terminator instruction, insert instructions to
758  // feed PHI nodes in successor blocks.
759  if (isa<TerminatorInst>(I))
760    if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
761      return false;
762
763  DL = I->getDebugLoc();
764
765  // First, try doing target-independent selection.
766  if (SelectOperator(I, I->getOpcode())) {
767    DL = DebugLoc();
768    return true;
769  }
770
771  // Next, try calling the target to attempt to handle the instruction.
772  if (TargetSelectInstruction(I)) {
773    DL = DebugLoc();
774    return true;
775  }
776
777  DL = DebugLoc();
778  return false;
779}
780
781/// FastEmitBranch - Emit an unconditional branch to the given block,
782/// unless it is the immediate (fall-through) successor, and update
783/// the CFG.
784void
785FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
786  if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
787    // The unconditional fall-through case, which needs no instructions.
788  } else {
789    // The unconditional branch case.
790    TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
791                     SmallVector<MachineOperand, 0>(), DL);
792  }
793  FuncInfo.MBB->addSuccessor(MSucc);
794}
795
796/// SelectFNeg - Emit an FNeg operation.
797///
798bool
799FastISel::SelectFNeg(const User *I) {
800  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
801  if (OpReg == 0) return false;
802
803  bool OpRegIsKill = hasTrivialKill(I);
804
805  // If the target has ISD::FNEG, use it.
806  EVT VT = TLI.getValueType(I->getType());
807  unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
808                                  ISD::FNEG, OpReg, OpRegIsKill);
809  if (ResultReg != 0) {
810    UpdateValueMap(I, ResultReg);
811    return true;
812  }
813
814  // Bitcast the value to integer, twiddle the sign bit with xor,
815  // and then bitcast it back to floating-point.
816  if (VT.getSizeInBits() > 64) return false;
817  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
818  if (!TLI.isTypeLegal(IntVT))
819    return false;
820
821  unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
822                               ISD::BITCAST, OpReg, OpRegIsKill);
823  if (IntReg == 0)
824    return false;
825
826  unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
827                                       IntReg, /*Kill=*/true,
828                                       UINT64_C(1) << (VT.getSizeInBits()-1),
829                                       IntVT.getSimpleVT());
830  if (IntResultReg == 0)
831    return false;
832
833  ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
834                         ISD::BITCAST, IntResultReg, /*Kill=*/true);
835  if (ResultReg == 0)
836    return false;
837
838  UpdateValueMap(I, ResultReg);
839  return true;
840}
841
842bool
843FastISel::SelectExtractValue(const User *U) {
844  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
845  if (!U)
846    return false;
847
848  // Make sure we only try to handle extracts with a legal result.
849  EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
850  if (!RealVT.isSimple())
851    return false;
852  MVT VT = RealVT.getSimpleVT();
853  if (!TLI.isTypeLegal(VT))
854    return false;
855
856  const Value *Op0 = EVI->getOperand(0);
857  const Type *AggTy = Op0->getType();
858
859  // Get the base result register.
860  unsigned ResultReg;
861  DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
862  if (I != FuncInfo.ValueMap.end())
863    ResultReg = I->second;
864  else
865    ResultReg = FuncInfo.InitializeRegForValue(Op0);
866
867  // Get the actual result register, which is an offset from the base register.
868  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->idx_begin(), EVI->idx_end());
869
870  SmallVector<EVT, 4> AggValueVTs;
871  ComputeValueVTs(TLI, AggTy, AggValueVTs);
872
873  for (unsigned i = 0; i < VTIndex; i++)
874    ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
875
876  UpdateValueMap(EVI, ResultReg);
877  return true;
878}
879
880bool
881FastISel::SelectOperator(const User *I, unsigned Opcode) {
882  switch (Opcode) {
883  case Instruction::Add:
884    return SelectBinaryOp(I, ISD::ADD);
885  case Instruction::FAdd:
886    return SelectBinaryOp(I, ISD::FADD);
887  case Instruction::Sub:
888    return SelectBinaryOp(I, ISD::SUB);
889  case Instruction::FSub:
890    // FNeg is currently represented in LLVM IR as a special case of FSub.
891    if (BinaryOperator::isFNeg(I))
892      return SelectFNeg(I);
893    return SelectBinaryOp(I, ISD::FSUB);
894  case Instruction::Mul:
895    return SelectBinaryOp(I, ISD::MUL);
896  case Instruction::FMul:
897    return SelectBinaryOp(I, ISD::FMUL);
898  case Instruction::SDiv:
899    return SelectBinaryOp(I, ISD::SDIV);
900  case Instruction::UDiv:
901    return SelectBinaryOp(I, ISD::UDIV);
902  case Instruction::FDiv:
903    return SelectBinaryOp(I, ISD::FDIV);
904  case Instruction::SRem:
905    return SelectBinaryOp(I, ISD::SREM);
906  case Instruction::URem:
907    return SelectBinaryOp(I, ISD::UREM);
908  case Instruction::FRem:
909    return SelectBinaryOp(I, ISD::FREM);
910  case Instruction::Shl:
911    return SelectBinaryOp(I, ISD::SHL);
912  case Instruction::LShr:
913    return SelectBinaryOp(I, ISD::SRL);
914  case Instruction::AShr:
915    return SelectBinaryOp(I, ISD::SRA);
916  case Instruction::And:
917    return SelectBinaryOp(I, ISD::AND);
918  case Instruction::Or:
919    return SelectBinaryOp(I, ISD::OR);
920  case Instruction::Xor:
921    return SelectBinaryOp(I, ISD::XOR);
922
923  case Instruction::GetElementPtr:
924    return SelectGetElementPtr(I);
925
926  case Instruction::Br: {
927    const BranchInst *BI = cast<BranchInst>(I);
928
929    if (BI->isUnconditional()) {
930      const BasicBlock *LLVMSucc = BI->getSuccessor(0);
931      MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
932      FastEmitBranch(MSucc, BI->getDebugLoc());
933      return true;
934    }
935
936    // Conditional branches are not handed yet.
937    // Halt "fast" selection and bail.
938    return false;
939  }
940
941  case Instruction::Unreachable:
942    // Nothing to emit.
943    return true;
944
945  case Instruction::Alloca:
946    // FunctionLowering has the static-sized case covered.
947    if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
948      return true;
949
950    // Dynamic-sized alloca is not handled yet.
951    return false;
952
953  case Instruction::Call:
954    return SelectCall(I);
955
956  case Instruction::BitCast:
957    return SelectBitCast(I);
958
959  case Instruction::FPToSI:
960    return SelectCast(I, ISD::FP_TO_SINT);
961  case Instruction::ZExt:
962    return SelectCast(I, ISD::ZERO_EXTEND);
963  case Instruction::SExt:
964    return SelectCast(I, ISD::SIGN_EXTEND);
965  case Instruction::Trunc:
966    return SelectCast(I, ISD::TRUNCATE);
967  case Instruction::SIToFP:
968    return SelectCast(I, ISD::SINT_TO_FP);
969
970  case Instruction::IntToPtr: // Deliberate fall-through.
971  case Instruction::PtrToInt: {
972    EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
973    EVT DstVT = TLI.getValueType(I->getType());
974    if (DstVT.bitsGT(SrcVT))
975      return SelectCast(I, ISD::ZERO_EXTEND);
976    if (DstVT.bitsLT(SrcVT))
977      return SelectCast(I, ISD::TRUNCATE);
978    unsigned Reg = getRegForValue(I->getOperand(0));
979    if (Reg == 0) return false;
980    UpdateValueMap(I, Reg);
981    return true;
982  }
983
984  case Instruction::ExtractValue:
985    return SelectExtractValue(I);
986
987  case Instruction::PHI:
988    llvm_unreachable("FastISel shouldn't visit PHI nodes!");
989
990  default:
991    // Unhandled instruction. Halt "fast" selection and bail.
992    return false;
993  }
994}
995
996FastISel::FastISel(FunctionLoweringInfo &funcInfo)
997  : FuncInfo(funcInfo),
998    MRI(FuncInfo.MF->getRegInfo()),
999    MFI(*FuncInfo.MF->getFrameInfo()),
1000    MCP(*FuncInfo.MF->getConstantPool()),
1001    TM(FuncInfo.MF->getTarget()),
1002    TD(*TM.getTargetData()),
1003    TII(*TM.getInstrInfo()),
1004    TLI(*TM.getTargetLowering()),
1005    TRI(*TM.getRegisterInfo()) {
1006}
1007
1008FastISel::~FastISel() {}
1009
1010unsigned FastISel::FastEmit_(MVT, MVT,
1011                             unsigned) {
1012  return 0;
1013}
1014
1015unsigned FastISel::FastEmit_r(MVT, MVT,
1016                              unsigned,
1017                              unsigned /*Op0*/, bool /*Op0IsKill*/) {
1018  return 0;
1019}
1020
1021unsigned FastISel::FastEmit_rr(MVT, MVT,
1022                               unsigned,
1023                               unsigned /*Op0*/, bool /*Op0IsKill*/,
1024                               unsigned /*Op1*/, bool /*Op1IsKill*/) {
1025  return 0;
1026}
1027
1028unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1029  return 0;
1030}
1031
1032unsigned FastISel::FastEmit_f(MVT, MVT,
1033                              unsigned, const ConstantFP * /*FPImm*/) {
1034  return 0;
1035}
1036
1037unsigned FastISel::FastEmit_ri(MVT, MVT,
1038                               unsigned,
1039                               unsigned /*Op0*/, bool /*Op0IsKill*/,
1040                               uint64_t /*Imm*/) {
1041  return 0;
1042}
1043
1044unsigned FastISel::FastEmit_rf(MVT, MVT,
1045                               unsigned,
1046                               unsigned /*Op0*/, bool /*Op0IsKill*/,
1047                               const ConstantFP * /*FPImm*/) {
1048  return 0;
1049}
1050
1051unsigned FastISel::FastEmit_rri(MVT, MVT,
1052                                unsigned,
1053                                unsigned /*Op0*/, bool /*Op0IsKill*/,
1054                                unsigned /*Op1*/, bool /*Op1IsKill*/,
1055                                uint64_t /*Imm*/) {
1056  return 0;
1057}
1058
1059/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1060/// to emit an instruction with an immediate operand using FastEmit_ri.
1061/// If that fails, it materializes the immediate into a register and try
1062/// FastEmit_rr instead.
1063unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1064                                unsigned Op0, bool Op0IsKill,
1065                                uint64_t Imm, MVT ImmType) {
1066  // If this is a multiply by a power of two, emit this as a shift left.
1067  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1068    Opcode = ISD::SHL;
1069    Imm = Log2_64(Imm);
1070  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1071    // div x, 8 -> srl x, 3
1072    Opcode = ISD::SRL;
1073    Imm = Log2_64(Imm);
1074  }
1075
1076  // Horrible hack (to be removed), check to make sure shift amounts are
1077  // in-range.
1078  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1079      Imm >= VT.getSizeInBits())
1080    return 0;
1081
1082  // First check if immediate type is legal. If not, we can't use the ri form.
1083  unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1084  if (ResultReg != 0)
1085    return ResultReg;
1086  unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1087  if (MaterialReg == 0) {
1088    // This is a bit ugly/slow, but failing here means falling out of
1089    // fast-isel, which would be very slow.
1090    const IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1091                                              VT.getSizeInBits());
1092    MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1093  }
1094  return FastEmit_rr(VT, VT, Opcode,
1095                     Op0, Op0IsKill,
1096                     MaterialReg, /*Kill=*/true);
1097}
1098
1099unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1100  return MRI.createVirtualRegister(RC);
1101}
1102
1103unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1104                                 const TargetRegisterClass* RC) {
1105  unsigned ResultReg = createResultReg(RC);
1106  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1107
1108  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1109  return ResultReg;
1110}
1111
1112unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1113                                  const TargetRegisterClass *RC,
1114                                  unsigned Op0, bool Op0IsKill) {
1115  unsigned ResultReg = createResultReg(RC);
1116  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1117
1118  if (II.getNumDefs() >= 1)
1119    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1120      .addReg(Op0, Op0IsKill * RegState::Kill);
1121  else {
1122    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1123      .addReg(Op0, Op0IsKill * RegState::Kill);
1124    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1125            ResultReg).addReg(II.ImplicitDefs[0]);
1126  }
1127
1128  return ResultReg;
1129}
1130
1131unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1132                                   const TargetRegisterClass *RC,
1133                                   unsigned Op0, bool Op0IsKill,
1134                                   unsigned Op1, bool Op1IsKill) {
1135  unsigned ResultReg = createResultReg(RC);
1136  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1137
1138  if (II.getNumDefs() >= 1)
1139    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1140      .addReg(Op0, Op0IsKill * RegState::Kill)
1141      .addReg(Op1, Op1IsKill * RegState::Kill);
1142  else {
1143    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1144      .addReg(Op0, Op0IsKill * RegState::Kill)
1145      .addReg(Op1, Op1IsKill * RegState::Kill);
1146    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1147            ResultReg).addReg(II.ImplicitDefs[0]);
1148  }
1149  return ResultReg;
1150}
1151
1152unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1153                                   const TargetRegisterClass *RC,
1154                                   unsigned Op0, bool Op0IsKill,
1155                                   unsigned Op1, bool Op1IsKill,
1156                                   unsigned Op2, bool Op2IsKill) {
1157  unsigned ResultReg = createResultReg(RC);
1158  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1159
1160  if (II.getNumDefs() >= 1)
1161    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1162      .addReg(Op0, Op0IsKill * RegState::Kill)
1163      .addReg(Op1, Op1IsKill * RegState::Kill)
1164      .addReg(Op2, Op2IsKill * RegState::Kill);
1165  else {
1166    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1167      .addReg(Op0, Op0IsKill * RegState::Kill)
1168      .addReg(Op1, Op1IsKill * RegState::Kill)
1169      .addReg(Op2, Op2IsKill * RegState::Kill);
1170    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1171            ResultReg).addReg(II.ImplicitDefs[0]);
1172  }
1173  return ResultReg;
1174}
1175
1176unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1177                                   const TargetRegisterClass *RC,
1178                                   unsigned Op0, bool Op0IsKill,
1179                                   uint64_t Imm) {
1180  unsigned ResultReg = createResultReg(RC);
1181  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1182
1183  if (II.getNumDefs() >= 1)
1184    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1185      .addReg(Op0, Op0IsKill * RegState::Kill)
1186      .addImm(Imm);
1187  else {
1188    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1189      .addReg(Op0, Op0IsKill * RegState::Kill)
1190      .addImm(Imm);
1191    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1192            ResultReg).addReg(II.ImplicitDefs[0]);
1193  }
1194  return ResultReg;
1195}
1196
1197unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1198                                   const TargetRegisterClass *RC,
1199                                   unsigned Op0, bool Op0IsKill,
1200                                   uint64_t Imm1, uint64_t Imm2) {
1201  unsigned ResultReg = createResultReg(RC);
1202  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1203
1204  if (II.getNumDefs() >= 1)
1205    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1206      .addReg(Op0, Op0IsKill * RegState::Kill)
1207      .addImm(Imm1)
1208      .addImm(Imm2);
1209  else {
1210    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1211      .addReg(Op0, Op0IsKill * RegState::Kill)
1212      .addImm(Imm1)
1213      .addImm(Imm2);
1214    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1215            ResultReg).addReg(II.ImplicitDefs[0]);
1216  }
1217  return ResultReg;
1218}
1219
1220unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1221                                   const TargetRegisterClass *RC,
1222                                   unsigned Op0, bool Op0IsKill,
1223                                   const ConstantFP *FPImm) {
1224  unsigned ResultReg = createResultReg(RC);
1225  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1226
1227  if (II.getNumDefs() >= 1)
1228    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1229      .addReg(Op0, Op0IsKill * RegState::Kill)
1230      .addFPImm(FPImm);
1231  else {
1232    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1233      .addReg(Op0, Op0IsKill * RegState::Kill)
1234      .addFPImm(FPImm);
1235    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1236            ResultReg).addReg(II.ImplicitDefs[0]);
1237  }
1238  return ResultReg;
1239}
1240
1241unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1242                                    const TargetRegisterClass *RC,
1243                                    unsigned Op0, bool Op0IsKill,
1244                                    unsigned Op1, bool Op1IsKill,
1245                                    uint64_t Imm) {
1246  unsigned ResultReg = createResultReg(RC);
1247  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1248
1249  if (II.getNumDefs() >= 1)
1250    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1251      .addReg(Op0, Op0IsKill * RegState::Kill)
1252      .addReg(Op1, Op1IsKill * RegState::Kill)
1253      .addImm(Imm);
1254  else {
1255    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1256      .addReg(Op0, Op0IsKill * RegState::Kill)
1257      .addReg(Op1, Op1IsKill * RegState::Kill)
1258      .addImm(Imm);
1259    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1260            ResultReg).addReg(II.ImplicitDefs[0]);
1261  }
1262  return ResultReg;
1263}
1264
1265unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1266                                  const TargetRegisterClass *RC,
1267                                  uint64_t Imm) {
1268  unsigned ResultReg = createResultReg(RC);
1269  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1270
1271  if (II.getNumDefs() >= 1)
1272    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1273  else {
1274    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1275    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1276            ResultReg).addReg(II.ImplicitDefs[0]);
1277  }
1278  return ResultReg;
1279}
1280
1281unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1282                                  const TargetRegisterClass *RC,
1283                                  uint64_t Imm1, uint64_t Imm2) {
1284  unsigned ResultReg = createResultReg(RC);
1285  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1286
1287  if (II.getNumDefs() >= 1)
1288    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1289      .addImm(Imm1).addImm(Imm2);
1290  else {
1291    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1292    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1293            ResultReg).addReg(II.ImplicitDefs[0]);
1294  }
1295  return ResultReg;
1296}
1297
1298unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1299                                              unsigned Op0, bool Op0IsKill,
1300                                              uint32_t Idx) {
1301  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1302  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1303         "Cannot yet extract from physregs");
1304  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1305          DL, TII.get(TargetOpcode::COPY), ResultReg)
1306    .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1307  return ResultReg;
1308}
1309
1310/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1311/// with all but the least significant bit set to zero.
1312unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1313  return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1314}
1315
1316/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1317/// Emit code to ensure constants are copied into registers when needed.
1318/// Remember the virtual registers that need to be added to the Machine PHI
1319/// nodes as input.  We cannot just directly add them, because expansion
1320/// might result in multiple MBB's for one BB.  As such, the start of the
1321/// BB might correspond to a different MBB than the end.
1322bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1323  const TerminatorInst *TI = LLVMBB->getTerminator();
1324
1325  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1326  unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1327
1328  // Check successor nodes' PHI nodes that expect a constant to be available
1329  // from this block.
1330  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1331    const BasicBlock *SuccBB = TI->getSuccessor(succ);
1332    if (!isa<PHINode>(SuccBB->begin())) continue;
1333    MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1334
1335    // If this terminator has multiple identical successors (common for
1336    // switches), only handle each succ once.
1337    if (!SuccsHandled.insert(SuccMBB)) continue;
1338
1339    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1340
1341    // At this point we know that there is a 1-1 correspondence between LLVM PHI
1342    // nodes and Machine PHI nodes, but the incoming operands have not been
1343    // emitted yet.
1344    for (BasicBlock::const_iterator I = SuccBB->begin();
1345         const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1346
1347      // Ignore dead phi's.
1348      if (PN->use_empty()) continue;
1349
1350      // Only handle legal types. Two interesting things to note here. First,
1351      // by bailing out early, we may leave behind some dead instructions,
1352      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1353      // own moves. Second, this check is necessary because FastISel doesn't
1354      // use CreateRegs to create registers, so it always creates
1355      // exactly one register for each non-void instruction.
1356      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1357      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1358        // Promote MVT::i1.
1359        if (VT == MVT::i1)
1360          VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1361        else {
1362          FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1363          return false;
1364        }
1365      }
1366
1367      const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1368
1369      // Set the DebugLoc for the copy. Prefer the location of the operand
1370      // if there is one; use the location of the PHI otherwise.
1371      DL = PN->getDebugLoc();
1372      if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1373        DL = Inst->getDebugLoc();
1374
1375      unsigned Reg = getRegForValue(PHIOp);
1376      if (Reg == 0) {
1377        FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1378        return false;
1379      }
1380      FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
1381      DL = DebugLoc();
1382    }
1383  }
1384
1385  return true;
1386}
1387