ARMFastISel.cpp revision 9ed58dff86f09699946641ba87f6c4f04a3773c8
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the ARM-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// ARMGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
17#include "ARMBaseInstrInfo.h"
18#include "ARMRegisterInfo.h"
19#include "ARMTargetMachine.h"
20#include "ARMSubtarget.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Instructions.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/CodeGen/Analysis.h"
27#include "llvm/CodeGen/FastISel.h"
28#include "llvm/CodeGen/FunctionLoweringInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineModuleInfo.h"
31#include "llvm/CodeGen/MachineConstantPool.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/Support/CallSite.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/GetElementPtrTypeIterator.h"
38#include "llvm/Target/TargetData.h"
39#include "llvm/Target/TargetInstrInfo.h"
40#include "llvm/Target/TargetLowering.h"
41#include "llvm/Target/TargetMachine.h"
42#include "llvm/Target/TargetOptions.h"
43using namespace llvm;
44
45static cl::opt<bool>
46EnableARMFastISel("arm-fast-isel",
47                  cl::desc("Turn on experimental ARM fast-isel support"),
48                  cl::init(false), cl::Hidden);
49
50namespace {
51
52class ARMFastISel : public FastISel {
53
54  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
55  /// make the right decision when generating code for different targets.
56  const ARMSubtarget *Subtarget;
57  const TargetMachine &TM;
58  const TargetInstrInfo &TII;
59  const TargetLowering &TLI;
60  const ARMFunctionInfo *AFI;
61
62  // Convenience variable to avoid checking all the time.
63  bool isThumb;
64
65  public:
66    explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
67    : FastISel(funcInfo),
68      TM(funcInfo.MF->getTarget()),
69      TII(*TM.getInstrInfo()),
70      TLI(*TM.getTargetLowering()) {
71      Subtarget = &TM.getSubtarget<ARMSubtarget>();
72      AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
73      isThumb = AFI->isThumbFunction();
74    }
75
76    // Code from FastISel.cpp.
77    virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
78                                   const TargetRegisterClass *RC);
79    virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
80                                    const TargetRegisterClass *RC,
81                                    unsigned Op0, bool Op0IsKill);
82    virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
83                                     const TargetRegisterClass *RC,
84                                     unsigned Op0, bool Op0IsKill,
85                                     unsigned Op1, bool Op1IsKill);
86    virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
87                                     const TargetRegisterClass *RC,
88                                     unsigned Op0, bool Op0IsKill,
89                                     uint64_t Imm);
90    virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
91                                     const TargetRegisterClass *RC,
92                                     unsigned Op0, bool Op0IsKill,
93                                     const ConstantFP *FPImm);
94    virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
95                                    const TargetRegisterClass *RC,
96                                    uint64_t Imm);
97    virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
98                                      const TargetRegisterClass *RC,
99                                      unsigned Op0, bool Op0IsKill,
100                                      unsigned Op1, bool Op1IsKill,
101                                      uint64_t Imm);
102    virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
103                                                unsigned Op0, bool Op0IsKill,
104                                                uint32_t Idx);
105
106    // Backend specific FastISel code.
107    virtual bool TargetSelectInstruction(const Instruction *I);
108    virtual unsigned TargetMaterializeConstant(const Constant *C);
109
110  #include "ARMGenFastISel.inc"
111
112    // Instruction selection routines.
113    virtual bool ARMSelectLoad(const Instruction *I);
114    virtual bool ARMSelectStore(const Instruction *I);
115    virtual bool ARMSelectBranch(const Instruction *I);
116    virtual bool ARMSelectCmp(const Instruction *I);
117
118    // Utility routines.
119  private:
120    bool isTypeLegal(const Type *Ty, EVT &VT);
121    bool isLoadTypeLegal(const Type *Ty, EVT &VT);
122    bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset);
123    bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset);
124    bool ARMLoadAlloca(const Instruction *I, EVT VT);
125    bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT);
126    bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
127    unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
128    unsigned ARMMaterializeInt(const Constant *C);
129
130    bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
131    const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
132};
133
134} // end anonymous namespace
135
136// #include "ARMGenCallingConv.inc"
137
138// DefinesOptionalPredicate - This is different from DefinesPredicate in that
139// we don't care about implicit defs here, just places we'll need to add a
140// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
141bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
142  const TargetInstrDesc &TID = MI->getDesc();
143  if (!TID.hasOptionalDef())
144    return false;
145
146  // Look to see if our OptionalDef is defining CPSR or CCR.
147  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
148    const MachineOperand &MO = MI->getOperand(i);
149    if (!MO.isReg() || !MO.isDef()) continue;
150    if (MO.getReg() == ARM::CPSR)
151      *CPSR = true;
152  }
153  return true;
154}
155
156// If the machine is predicable go ahead and add the predicate operands, if
157// it needs default CC operands add those.
158const MachineInstrBuilder &
159ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
160  MachineInstr *MI = &*MIB;
161
162  // Do we use a predicate?
163  if (TII.isPredicable(MI))
164    AddDefaultPred(MIB);
165
166  // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
167  // defines CPSR. All other OptionalDefines in ARM are the CCR register.
168  bool CPSR = false;
169  if (DefinesOptionalPredicate(MI, &CPSR)) {
170    if (CPSR)
171      AddDefaultT1CC(MIB);
172    else
173      AddDefaultCC(MIB);
174  }
175  return MIB;
176}
177
178unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
179                                    const TargetRegisterClass* RC) {
180  unsigned ResultReg = createResultReg(RC);
181  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
182
183  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
184  return ResultReg;
185}
186
187unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
188                                     const TargetRegisterClass *RC,
189                                     unsigned Op0, bool Op0IsKill) {
190  unsigned ResultReg = createResultReg(RC);
191  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
192
193  if (II.getNumDefs() >= 1)
194    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
195                   .addReg(Op0, Op0IsKill * RegState::Kill));
196  else {
197    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
198                   .addReg(Op0, Op0IsKill * RegState::Kill));
199    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
200                   TII.get(TargetOpcode::COPY), ResultReg)
201                   .addReg(II.ImplicitDefs[0]));
202  }
203  return ResultReg;
204}
205
206unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
207                                      const TargetRegisterClass *RC,
208                                      unsigned Op0, bool Op0IsKill,
209                                      unsigned Op1, bool Op1IsKill) {
210  unsigned ResultReg = createResultReg(RC);
211  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
212
213  if (II.getNumDefs() >= 1)
214    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
215                   .addReg(Op0, Op0IsKill * RegState::Kill)
216                   .addReg(Op1, Op1IsKill * RegState::Kill));
217  else {
218    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
219                   .addReg(Op0, Op0IsKill * RegState::Kill)
220                   .addReg(Op1, Op1IsKill * RegState::Kill));
221    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
222                           TII.get(TargetOpcode::COPY), ResultReg)
223                   .addReg(II.ImplicitDefs[0]));
224  }
225  return ResultReg;
226}
227
228unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
229                                      const TargetRegisterClass *RC,
230                                      unsigned Op0, bool Op0IsKill,
231                                      uint64_t Imm) {
232  unsigned ResultReg = createResultReg(RC);
233  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
234
235  if (II.getNumDefs() >= 1)
236    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
237                   .addReg(Op0, Op0IsKill * RegState::Kill)
238                   .addImm(Imm));
239  else {
240    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
241                   .addReg(Op0, Op0IsKill * RegState::Kill)
242                   .addImm(Imm));
243    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
244                           TII.get(TargetOpcode::COPY), ResultReg)
245                   .addReg(II.ImplicitDefs[0]));
246  }
247  return ResultReg;
248}
249
250unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
251                                      const TargetRegisterClass *RC,
252                                      unsigned Op0, bool Op0IsKill,
253                                      const ConstantFP *FPImm) {
254  unsigned ResultReg = createResultReg(RC);
255  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
256
257  if (II.getNumDefs() >= 1)
258    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
259                   .addReg(Op0, Op0IsKill * RegState::Kill)
260                   .addFPImm(FPImm));
261  else {
262    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
263                   .addReg(Op0, Op0IsKill * RegState::Kill)
264                   .addFPImm(FPImm));
265    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
266                           TII.get(TargetOpcode::COPY), ResultReg)
267                   .addReg(II.ImplicitDefs[0]));
268  }
269  return ResultReg;
270}
271
272unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
273                                       const TargetRegisterClass *RC,
274                                       unsigned Op0, bool Op0IsKill,
275                                       unsigned Op1, bool Op1IsKill,
276                                       uint64_t Imm) {
277  unsigned ResultReg = createResultReg(RC);
278  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
279
280  if (II.getNumDefs() >= 1)
281    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
282                   .addReg(Op0, Op0IsKill * RegState::Kill)
283                   .addReg(Op1, Op1IsKill * RegState::Kill)
284                   .addImm(Imm));
285  else {
286    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
287                   .addReg(Op0, Op0IsKill * RegState::Kill)
288                   .addReg(Op1, Op1IsKill * RegState::Kill)
289                   .addImm(Imm));
290    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
291                           TII.get(TargetOpcode::COPY), ResultReg)
292                   .addReg(II.ImplicitDefs[0]));
293  }
294  return ResultReg;
295}
296
297unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
298                                     const TargetRegisterClass *RC,
299                                     uint64_t Imm) {
300  unsigned ResultReg = createResultReg(RC);
301  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
302
303  if (II.getNumDefs() >= 1)
304    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
305                   .addImm(Imm));
306  else {
307    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
308                   .addImm(Imm));
309    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
310                           TII.get(TargetOpcode::COPY), ResultReg)
311                   .addReg(II.ImplicitDefs[0]));
312  }
313  return ResultReg;
314}
315
316unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
317                                                 unsigned Op0, bool Op0IsKill,
318                                                 uint32_t Idx) {
319  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
320  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
321         "Cannot yet extract from physregs");
322  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
323                         DL, TII.get(TargetOpcode::COPY), ResultReg)
324                 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
325  return ResultReg;
326}
327
328// For double width floating point we need to materialize two constants
329// (the high and the low) into integer registers then use a move to get
330// the combined constant into an FP reg.
331unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
332  const APFloat Val = CFP->getValueAPF();
333  bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64;
334
335  // This checks to see if we can use VFP3 instructions to materialize
336  // a constant, otherwise we have to go through the constant pool.
337  if (TLI.isFPImmLegal(Val, VT)) {
338    unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS;
339    unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
340    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
341                            DestReg)
342                    .addFPImm(CFP));
343    return DestReg;
344  }
345
346  // No 64-bit at the moment.
347  if (is64bit) return 0;
348
349  // Load this from the constant pool.
350  unsigned DestReg = ARMMaterializeInt(cast<Constant>(CFP));
351
352  // If we have a floating point constant we expect it in a floating point
353  // register.
354  unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
355  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
356                          TII.get(ARM::VMOVRS), MoveReg)
357                  .addReg(DestReg));
358  return MoveReg;
359}
360
361unsigned ARMFastISel::ARMMaterializeInt(const Constant *C) {
362  // MachineConstantPool wants an explicit alignment.
363  unsigned Align = TD.getPrefTypeAlignment(C->getType());
364  if (Align == 0) {
365    // TODO: Figure out if this is correct.
366    Align = TD.getTypeAllocSize(C->getType());
367  }
368  unsigned Idx = MCP.getConstantPoolIndex(C, Align);
369
370  unsigned DestReg = createResultReg(TLI.getRegClassFor(MVT::i32));
371  if (isThumb)
372    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
373                            TII.get(ARM::t2LDRpci))
374                    .addReg(DestReg).addConstantPoolIndex(Idx));
375  else
376    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
377                            TII.get(ARM::LDRcp))
378                            .addReg(DestReg).addConstantPoolIndex(Idx)
379                    .addReg(0).addImm(0));
380
381  return DestReg;
382}
383
384unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
385  EVT VT = TLI.getValueType(C->getType(), true);
386
387  // Only handle simple types.
388  if (!VT.isSimple()) return 0;
389
390  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
391    return ARMMaterializeFP(CFP, VT);
392  return ARMMaterializeInt(C);
393}
394
395bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
396  VT = TLI.getValueType(Ty, true);
397
398  // Only handle simple types.
399  if (VT == MVT::Other || !VT.isSimple()) return false;
400
401  // Handle all legal types, i.e. a register that will directly hold this
402  // value.
403  return TLI.isTypeLegal(VT);
404}
405
406bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
407  if (isTypeLegal(Ty, VT)) return true;
408
409  // If this is a type than can be sign or zero-extended to a basic operation
410  // go ahead and accept it now.
411  if (VT == MVT::i8 || VT == MVT::i16)
412    return true;
413
414  return false;
415}
416
417// Computes the Reg+Offset to get to an object.
418bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
419                                      int &Offset) {
420  // Some boilerplate from the X86 FastISel.
421  const User *U = NULL;
422  unsigned Opcode = Instruction::UserOp1;
423  if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
424    // Don't walk into other basic blocks; it's possible we haven't
425    // visited them yet, so the instructions may not yet be assigned
426    // virtual registers.
427    if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
428      return false;
429
430    Opcode = I->getOpcode();
431    U = I;
432  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
433    Opcode = C->getOpcode();
434    U = C;
435  }
436
437  if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
438    if (Ty->getAddressSpace() > 255)
439      // Fast instruction selection doesn't support the special
440      // address spaces.
441      return false;
442
443  switch (Opcode) {
444    default:
445    //errs() << "Failing Opcode is: " << *Op1 << "\n";
446    break;
447    case Instruction::Alloca: {
448      assert(false && "Alloca should have been handled earlier!");
449      return false;
450    }
451  }
452
453  if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
454    //errs() << "Failing GV is: " << GV << "\n";
455    (void)GV;
456    return false;
457  }
458
459  // Try to get this in a register if nothing else has worked.
460  Reg = getRegForValue(Obj);
461  if (Reg == 0) return false;
462
463  // Since the offset may be too large for the load instruction
464  // get the reg+offset into a register.
465  // TODO: Verify the additions work, otherwise we'll need to add the
466  // offset instead of 0 to the instructions and do all sorts of operand
467  // munging.
468  // TODO: Optimize this somewhat.
469  if (Offset != 0) {
470    ARMCC::CondCodes Pred = ARMCC::AL;
471    unsigned PredReg = 0;
472
473    if (!isThumb)
474      emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
475                              Reg, Reg, Offset, Pred, PredReg,
476                              static_cast<const ARMBaseInstrInfo&>(TII));
477    else {
478      assert(AFI->isThumb2Function());
479      emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
480                             Reg, Reg, Offset, Pred, PredReg,
481                             static_cast<const ARMBaseInstrInfo&>(TII));
482    }
483  }
484
485  return true;
486}
487
488bool ARMFastISel::ARMLoadAlloca(const Instruction *I, EVT VT) {
489  Value *Op0 = I->getOperand(0);
490
491  // Verify it's an alloca.
492  if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op0)) {
493    DenseMap<const AllocaInst*, int>::iterator SI =
494      FuncInfo.StaticAllocaMap.find(AI);
495
496    if (SI != FuncInfo.StaticAllocaMap.end()) {
497      TargetRegisterClass* RC = TLI.getRegClassFor(VT);
498      unsigned ResultReg = createResultReg(RC);
499      TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
500                               ResultReg, SI->second, RC,
501                               TM.getRegisterInfo());
502      UpdateValueMap(I, ResultReg);
503      return true;
504    }
505  }
506  return false;
507}
508
509bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
510                              unsigned Reg, int Offset) {
511
512  assert(VT.isSimple() && "Non-simple types are invalid here!");
513  unsigned Opc;
514
515  switch (VT.getSimpleVT().SimpleTy) {
516    default:
517      assert(false && "Trying to emit for an unhandled type!");
518      return false;
519    case MVT::i16:
520      Opc = isThumb ? ARM::tLDRH : ARM::LDRH;
521      VT = MVT::i32;
522      break;
523    case MVT::i8:
524      Opc = isThumb ? ARM::tLDRB : ARM::LDRB;
525      VT = MVT::i32;
526      break;
527    case MVT::i32:
528      Opc = isThumb ? ARM::tLDR : ARM::LDR;
529      break;
530  }
531
532  ResultReg = createResultReg(TLI.getRegClassFor(VT));
533
534  // TODO: Fix the Addressing modes so that these can share some code.
535  // Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
536  if (isThumb)
537    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
538                            TII.get(Opc), ResultReg)
539                    .addReg(Reg).addImm(Offset).addReg(0));
540  else
541    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
542                            TII.get(Opc), ResultReg)
543                    .addReg(Reg).addReg(0).addImm(Offset));
544
545  return true;
546}
547
548bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT){
549  Value *Op1 = I->getOperand(1);
550
551  // Verify it's an alloca.
552  if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
553    DenseMap<const AllocaInst*, int>::iterator SI =
554      FuncInfo.StaticAllocaMap.find(AI);
555
556    if (SI != FuncInfo.StaticAllocaMap.end()) {
557      TargetRegisterClass* RC = TLI.getRegClassFor(VT);
558      assert(SrcReg != 0 && "Nothing to store!");
559      TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
560                              SrcReg, true /*isKill*/, SI->second, RC,
561                              TM.getRegisterInfo());
562      return true;
563    }
564  }
565  return false;
566}
567
568bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
569                               unsigned DstReg, int Offset) {
570  unsigned StrOpc;
571  switch (VT.getSimpleVT().SimpleTy) {
572    default: return false;
573    case MVT::i1:
574    case MVT::i8: StrOpc = isThumb ? ARM::tSTRB : ARM::STRB; break;
575    case MVT::i16: StrOpc = isThumb ? ARM::tSTRH : ARM::STRH; break;
576    case MVT::i32: StrOpc = isThumb ? ARM::tSTR : ARM::STR; break;
577    case MVT::f32:
578      if (!Subtarget->hasVFP2()) return false;
579      StrOpc = ARM::VSTRS;
580      break;
581    case MVT::f64:
582      if (!Subtarget->hasVFP2()) return false;
583      StrOpc = ARM::VSTRD;
584      break;
585  }
586
587  if (isThumb)
588    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
589                            TII.get(StrOpc), SrcReg)
590                    .addReg(DstReg).addImm(Offset).addReg(0));
591  else
592    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
593                            TII.get(StrOpc), SrcReg)
594                    .addReg(DstReg).addReg(0).addImm(Offset));
595
596  return true;
597}
598
599bool ARMFastISel::ARMSelectStore(const Instruction *I) {
600  Value *Op0 = I->getOperand(0);
601  unsigned SrcReg = 0;
602
603  // Yay type legalization
604  EVT VT;
605  if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
606    return false;
607
608  // Get the value to be stored into a register.
609  SrcReg = getRegForValue(Op0);
610  if (SrcReg == 0)
611    return false;
612
613  // If we're an alloca we know we have a frame index and can emit the store
614  // quickly.
615  if (ARMStoreAlloca(I, SrcReg, VT))
616    return true;
617
618  // Our register and offset with innocuous defaults.
619  unsigned Reg = 0;
620  int Offset = 0;
621
622  // See if we can handle this as Reg + Offset
623  if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
624    return false;
625
626  if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
627
628  return false;
629
630}
631
632bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
633  // Verify we have a legal type before going any further.
634  EVT VT;
635  if (!isLoadTypeLegal(I->getType(), VT))
636    return false;
637
638  // If we're an alloca we know we have a frame index and can emit the load
639  // directly in short order.
640  if (ARMLoadAlloca(I, VT))
641    return true;
642
643  // Our register and offset with innocuous defaults.
644  unsigned Reg = 0;
645  int Offset = 0;
646
647  // See if we can handle this as Reg + Offset
648  if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
649    return false;
650
651  unsigned ResultReg;
652  if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
653
654  UpdateValueMap(I, ResultReg);
655  return true;
656}
657
658bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
659  const BranchInst *BI = cast<BranchInst>(I);
660  MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
661  MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
662
663  // Simple branch support.
664  unsigned CondReg = getRegForValue(BI->getCondition());
665  if (CondReg == 0) return false;
666
667  unsigned CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
668  unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
669  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
670                  .addReg(CondReg).addReg(CondReg));
671  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
672                  .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
673  FastEmitBranch(FBB, DL);
674  FuncInfo.MBB->addSuccessor(TBB);
675  return true;
676}
677
678bool ARMFastISel::ARMSelectCmp(const Instruction *I) {
679  const CmpInst *CI = cast<CmpInst>(I);
680
681  EVT VT;
682  const Type *Ty = CI->getOperand(0)->getType();
683  if (!isTypeLegal(Ty, VT))
684    return false;
685
686  bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
687  if (isFloat && !Subtarget->hasVFP2())
688    return false;
689
690  unsigned CmpOpc;
691  switch (VT.getSimpleVT().SimpleTy) {
692    default: return false;
693    // TODO: Verify compares.
694    case MVT::f32:
695      CmpOpc = ARM::VCMPES;
696      break;
697    case MVT::f64:
698      CmpOpc = ARM::VCMPED;
699      break;
700    case MVT::i32:
701      CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
702      break;
703  }
704
705  unsigned Arg1 = getRegForValue(CI->getOperand(0));
706  if (Arg1 == 0) return false;
707
708  unsigned Arg2 = getRegForValue(CI->getOperand(1));
709  if (Arg2 == 0) return false;
710
711  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
712                  .addReg(Arg1).addReg(Arg2));
713
714  // For floating point we need to move the result to a register we can
715  // actually do something with.
716  if (isFloat)
717    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
718                            TII.get(ARM::FMSTAT)));
719  return true;
720}
721
722// TODO: SoftFP support.
723bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
724  // No Thumb-1 for now.
725  if (isThumb && !AFI->isThumb2Function()) return false;
726
727  switch (I->getOpcode()) {
728    case Instruction::Load:
729      return ARMSelectLoad(I);
730    case Instruction::Store:
731      return ARMSelectStore(I);
732    case Instruction::Br:
733      return ARMSelectBranch(I);
734    case Instruction::ICmp:
735    case Instruction::FCmp:
736        return ARMSelectCmp(I);
737    default: break;
738  }
739  return false;
740}
741
742namespace llvm {
743  llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
744    if (EnableARMFastISel) return new ARMFastISel(funcInfo);
745    return 0;
746  }
747}
748