ARMFastISel.cpp revision 30b663339e4e76981a2fc4dee84959298c0a1dc8
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the ARM-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// ARMGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
17#include "ARMBaseInstrInfo.h"
18#include "ARMRegisterInfo.h"
19#include "ARMTargetMachine.h"
20#include "ARMSubtarget.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Instructions.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/CodeGen/Analysis.h"
27#include "llvm/CodeGen/FastISel.h"
28#include "llvm/CodeGen/FunctionLoweringInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineModuleInfo.h"
31#include "llvm/CodeGen/MachineConstantPool.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/Support/CallSite.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/GetElementPtrTypeIterator.h"
38#include "llvm/Target/TargetData.h"
39#include "llvm/Target/TargetInstrInfo.h"
40#include "llvm/Target/TargetLowering.h"
41#include "llvm/Target/TargetMachine.h"
42#include "llvm/Target/TargetOptions.h"
43using namespace llvm;
44
45static cl::opt<bool>
46EnableARMFastISel("arm-fast-isel",
47                  cl::desc("Turn on experimental ARM fast-isel support"),
48                  cl::init(false), cl::Hidden);
49
50namespace {
51
52class ARMFastISel : public FastISel {
53
54  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
55  /// make the right decision when generating code for different targets.
56  const ARMSubtarget *Subtarget;
57  const TargetMachine &TM;
58  const TargetInstrInfo &TII;
59  const TargetLowering &TLI;
60  const ARMFunctionInfo *AFI;
61
62  // Convenience variable to avoid checking all the time.
63  bool isThumb;
64
65  public:
66    explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
67    : FastISel(funcInfo),
68      TM(funcInfo.MF->getTarget()),
69      TII(*TM.getInstrInfo()),
70      TLI(*TM.getTargetLowering()) {
71      Subtarget = &TM.getSubtarget<ARMSubtarget>();
72      AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
73      isThumb = AFI->isThumbFunction();
74    }
75
76    // Code from FastISel.cpp.
77    virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
78                                   const TargetRegisterClass *RC);
79    virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
80                                    const TargetRegisterClass *RC,
81                                    unsigned Op0, bool Op0IsKill);
82    virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
83                                     const TargetRegisterClass *RC,
84                                     unsigned Op0, bool Op0IsKill,
85                                     unsigned Op1, bool Op1IsKill);
86    virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
87                                     const TargetRegisterClass *RC,
88                                     unsigned Op0, bool Op0IsKill,
89                                     uint64_t Imm);
90    virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
91                                     const TargetRegisterClass *RC,
92                                     unsigned Op0, bool Op0IsKill,
93                                     const ConstantFP *FPImm);
94    virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
95                                    const TargetRegisterClass *RC,
96                                    uint64_t Imm);
97    virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
98                                      const TargetRegisterClass *RC,
99                                      unsigned Op0, bool Op0IsKill,
100                                      unsigned Op1, bool Op1IsKill,
101                                      uint64_t Imm);
102    virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
103                                                unsigned Op0, bool Op0IsKill,
104                                                uint32_t Idx);
105
106    // Backend specific FastISel code.
107    virtual bool TargetSelectInstruction(const Instruction *I);
108    virtual unsigned TargetMaterializeConstant(const Constant *C);
109
110  #include "ARMGenFastISel.inc"
111
112    // Instruction selection routines.
113    virtual bool ARMSelectLoad(const Instruction *I);
114    virtual bool ARMSelectStore(const Instruction *I);
115    virtual bool ARMSelectBranch(const Instruction *I);
116
117    // Utility routines.
118  private:
119    bool isTypeLegal(const Type *Ty, EVT &VT);
120    bool isLoadTypeLegal(const Type *Ty, EVT &VT);
121    bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset);
122    bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset);
123    bool ARMLoadAlloca(const Instruction *I, EVT VT);
124    bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT);
125    bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
126    bool ARMMaterializeConstant(const ConstantInt *Val, unsigned &Reg);
127
128    bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
129    const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
130};
131
132} // end anonymous namespace
133
134// #include "ARMGenCallingConv.inc"
135
136// DefinesOptionalPredicate - This is different from DefinesPredicate in that
137// we don't care about implicit defs here, just places we'll need to add a
138// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
139bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
140  const TargetInstrDesc &TID = MI->getDesc();
141  if (!TID.hasOptionalDef())
142    return false;
143
144  // Look to see if our OptionalDef is defining CPSR or CCR.
145  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
146    const MachineOperand &MO = MI->getOperand(i);
147    if (!MO.isReg() || !MO.isDef()) continue;
148    if (MO.getReg() == ARM::CPSR)
149      *CPSR = true;
150  }
151  return true;
152}
153
154// If the machine is predicable go ahead and add the predicate operands, if
155// it needs default CC operands add those.
156const MachineInstrBuilder &
157ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
158  MachineInstr *MI = &*MIB;
159
160  // Do we use a predicate?
161  if (TII.isPredicable(MI))
162    AddDefaultPred(MIB);
163
164  // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
165  // defines CPSR. All other OptionalDefines in ARM are the CCR register.
166  bool CPSR = false;
167  if (DefinesOptionalPredicate(MI, &CPSR)) {
168    if (CPSR)
169      AddDefaultT1CC(MIB);
170    else
171      AddDefaultCC(MIB);
172  }
173  return MIB;
174}
175
176unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
177                                    const TargetRegisterClass* RC) {
178  unsigned ResultReg = createResultReg(RC);
179  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
180
181  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
182  return ResultReg;
183}
184
185unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
186                                     const TargetRegisterClass *RC,
187                                     unsigned Op0, bool Op0IsKill) {
188  unsigned ResultReg = createResultReg(RC);
189  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
190
191  if (II.getNumDefs() >= 1)
192    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
193                   .addReg(Op0, Op0IsKill * RegState::Kill));
194  else {
195    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
196                   .addReg(Op0, Op0IsKill * RegState::Kill));
197    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
198                   TII.get(TargetOpcode::COPY), ResultReg)
199                   .addReg(II.ImplicitDefs[0]));
200  }
201  return ResultReg;
202}
203
204unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
205                                      const TargetRegisterClass *RC,
206                                      unsigned Op0, bool Op0IsKill,
207                                      unsigned Op1, bool Op1IsKill) {
208  unsigned ResultReg = createResultReg(RC);
209  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
210
211  if (II.getNumDefs() >= 1)
212    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
213                   .addReg(Op0, Op0IsKill * RegState::Kill)
214                   .addReg(Op1, Op1IsKill * RegState::Kill));
215  else {
216    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
217                   .addReg(Op0, Op0IsKill * RegState::Kill)
218                   .addReg(Op1, Op1IsKill * RegState::Kill));
219    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
220                           TII.get(TargetOpcode::COPY), ResultReg)
221                   .addReg(II.ImplicitDefs[0]));
222  }
223  return ResultReg;
224}
225
226unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
227                                      const TargetRegisterClass *RC,
228                                      unsigned Op0, bool Op0IsKill,
229                                      uint64_t Imm) {
230  unsigned ResultReg = createResultReg(RC);
231  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
232
233  if (II.getNumDefs() >= 1)
234    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
235                   .addReg(Op0, Op0IsKill * RegState::Kill)
236                   .addImm(Imm));
237  else {
238    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
239                   .addReg(Op0, Op0IsKill * RegState::Kill)
240                   .addImm(Imm));
241    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
242                           TII.get(TargetOpcode::COPY), ResultReg)
243                   .addReg(II.ImplicitDefs[0]));
244  }
245  return ResultReg;
246}
247
248unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
249                                      const TargetRegisterClass *RC,
250                                      unsigned Op0, bool Op0IsKill,
251                                      const ConstantFP *FPImm) {
252  unsigned ResultReg = createResultReg(RC);
253  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
254
255  if (II.getNumDefs() >= 1)
256    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
257                   .addReg(Op0, Op0IsKill * RegState::Kill)
258                   .addFPImm(FPImm));
259  else {
260    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
261                   .addReg(Op0, Op0IsKill * RegState::Kill)
262                   .addFPImm(FPImm));
263    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
264                           TII.get(TargetOpcode::COPY), ResultReg)
265                   .addReg(II.ImplicitDefs[0]));
266  }
267  return ResultReg;
268}
269
270unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
271                                       const TargetRegisterClass *RC,
272                                       unsigned Op0, bool Op0IsKill,
273                                       unsigned Op1, bool Op1IsKill,
274                                       uint64_t Imm) {
275  unsigned ResultReg = createResultReg(RC);
276  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
277
278  if (II.getNumDefs() >= 1)
279    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
280                   .addReg(Op0, Op0IsKill * RegState::Kill)
281                   .addReg(Op1, Op1IsKill * RegState::Kill)
282                   .addImm(Imm));
283  else {
284    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
285                   .addReg(Op0, Op0IsKill * RegState::Kill)
286                   .addReg(Op1, Op1IsKill * RegState::Kill)
287                   .addImm(Imm));
288    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
289                           TII.get(TargetOpcode::COPY), ResultReg)
290                   .addReg(II.ImplicitDefs[0]));
291  }
292  return ResultReg;
293}
294
295unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
296                                     const TargetRegisterClass *RC,
297                                     uint64_t Imm) {
298  unsigned ResultReg = createResultReg(RC);
299  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
300
301  if (II.getNumDefs() >= 1)
302    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
303                   .addImm(Imm));
304  else {
305    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
306                   .addImm(Imm));
307    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
308                           TII.get(TargetOpcode::COPY), ResultReg)
309                   .addReg(II.ImplicitDefs[0]));
310  }
311  return ResultReg;
312}
313
314unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
315                                                 unsigned Op0, bool Op0IsKill,
316                                                 uint32_t Idx) {
317  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
318  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
319         "Cannot yet extract from physregs");
320  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
321                         DL, TII.get(TargetOpcode::COPY), ResultReg)
322                 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
323  return ResultReg;
324}
325
326unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
327  EVT VT = TLI.getValueType(C->getType(), true);
328
329  // Only handle simple types.
330  if (!VT.isSimple()) return 0;
331
332  // Handle double width floating point?
333  if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0;
334
335  // TODO: Theoretically we could materialize fp constants directly with
336  // instructions from VFP3.
337
338  // MachineConstantPool wants an explicit alignment.
339  unsigned Align = TD.getPrefTypeAlignment(C->getType());
340  if (Align == 0) {
341    // TODO: Figure out if this is correct.
342    Align = TD.getTypeAllocSize(C->getType());
343  }
344  unsigned Idx = MCP.getConstantPoolIndex(C, Align);
345
346  unsigned DestReg = createResultReg(TLI.getRegClassFor(MVT::i32));
347  if (isThumb)
348    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
349                            TII.get(ARM::t2LDRpci))
350                    .addReg(DestReg).addConstantPoolIndex(Idx));
351  else
352    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
353                            TII.get(ARM::LDRcp))
354                            .addReg(DestReg).addConstantPoolIndex(Idx)
355                    .addReg(0).addImm(0));
356
357  // If we have a floating point constant we expect it in a floating point
358  // register.
359  // TODO: Make this use ARMBaseInstrInfo::copyPhysReg.
360  if (C->getType()->isFloatTy()) {
361    unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
362    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
363                            TII.get(ARM::VMOVRS), MoveReg)
364                    .addReg(DestReg));
365    return MoveReg;
366  }
367
368  return DestReg;
369}
370
371bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
372  VT = TLI.getValueType(Ty, true);
373
374  // Only handle simple types.
375  if (VT == MVT::Other || !VT.isSimple()) return false;
376
377  // Handle all legal types, i.e. a register that will directly hold this
378  // value.
379  return TLI.isTypeLegal(VT);
380}
381
382bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
383  if (isTypeLegal(Ty, VT)) return true;
384
385  // If this is a type than can be sign or zero-extended to a basic operation
386  // go ahead and accept it now.
387  if (VT == MVT::i8 || VT == MVT::i16)
388    return true;
389
390  return false;
391}
392
393// Computes the Reg+Offset to get to an object.
394bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
395                                      int &Offset) {
396  // Some boilerplate from the X86 FastISel.
397  const User *U = NULL;
398  unsigned Opcode = Instruction::UserOp1;
399  if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
400    // Don't walk into other basic blocks; it's possible we haven't
401    // visited them yet, so the instructions may not yet be assigned
402    // virtual registers.
403    if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
404      return false;
405
406    Opcode = I->getOpcode();
407    U = I;
408  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
409    Opcode = C->getOpcode();
410    U = C;
411  }
412
413  if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
414    if (Ty->getAddressSpace() > 255)
415      // Fast instruction selection doesn't support the special
416      // address spaces.
417      return false;
418
419  switch (Opcode) {
420    default:
421    //errs() << "Failing Opcode is: " << *Op1 << "\n";
422    break;
423    case Instruction::Alloca: {
424      assert(false && "Alloca should have been handled earlier!");
425      return false;
426    }
427  }
428
429  if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
430    //errs() << "Failing GV is: " << GV << "\n";
431    (void)GV;
432    return false;
433  }
434
435  // Try to get this in a register if nothing else has worked.
436  Reg = getRegForValue(Obj);
437  if (Reg == 0) return false;
438
439  // Since the offset may be too large for the load instruction
440  // get the reg+offset into a register.
441  // TODO: Verify the additions work, otherwise we'll need to add the
442  // offset instead of 0 to the instructions and do all sorts of operand
443  // munging.
444  // TODO: Optimize this somewhat.
445  if (Offset != 0) {
446    ARMCC::CondCodes Pred = ARMCC::AL;
447    unsigned PredReg = 0;
448
449    if (!isThumb)
450      emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
451                              Reg, Reg, Offset, Pred, PredReg,
452                              static_cast<const ARMBaseInstrInfo&>(TII));
453    else {
454      assert(AFI->isThumb2Function());
455      emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
456                             Reg, Reg, Offset, Pred, PredReg,
457                             static_cast<const ARMBaseInstrInfo&>(TII));
458    }
459  }
460
461  return true;
462}
463
464bool ARMFastISel::ARMLoadAlloca(const Instruction *I, EVT VT) {
465  Value *Op0 = I->getOperand(0);
466
467  // Verify it's an alloca.
468  if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op0)) {
469    DenseMap<const AllocaInst*, int>::iterator SI =
470      FuncInfo.StaticAllocaMap.find(AI);
471
472    if (SI != FuncInfo.StaticAllocaMap.end()) {
473      TargetRegisterClass* RC = TLI.getRegClassFor(VT);
474      unsigned ResultReg = createResultReg(RC);
475      TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
476                               ResultReg, SI->second, RC,
477                               TM.getRegisterInfo());
478      UpdateValueMap(I, ResultReg);
479      return true;
480    }
481  }
482  return false;
483}
484
485bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
486                              unsigned Reg, int Offset) {
487
488  assert(VT.isSimple() && "Non-simple types are invalid here!");
489  unsigned Opc;
490
491  switch (VT.getSimpleVT().SimpleTy) {
492    default:
493      assert(false && "Trying to emit for an unhandled type!");
494      return false;
495    case MVT::i16:
496      Opc = isThumb ? ARM::tLDRH : ARM::LDRH;
497      VT = MVT::i32;
498      break;
499    case MVT::i8:
500      Opc = isThumb ? ARM::tLDRB : ARM::LDRB;
501      VT = MVT::i32;
502      break;
503    case MVT::i32:
504      Opc = isThumb ? ARM::tLDR : ARM::LDR;
505      break;
506  }
507
508  ResultReg = createResultReg(TLI.getRegClassFor(VT));
509
510  // TODO: Fix the Addressing modes so that these can share some code.
511  // Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
512  if (isThumb)
513    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
514                            TII.get(Opc), ResultReg)
515                    .addReg(Reg).addImm(Offset).addReg(0));
516  else
517    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
518                            TII.get(Opc), ResultReg)
519                    .addReg(Reg).addReg(0).addImm(Offset));
520
521  return true;
522}
523
524bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT){
525  Value *Op1 = I->getOperand(1);
526
527  // Verify it's an alloca.
528  if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
529    DenseMap<const AllocaInst*, int>::iterator SI =
530      FuncInfo.StaticAllocaMap.find(AI);
531
532    if (SI != FuncInfo.StaticAllocaMap.end()) {
533      TargetRegisterClass* RC = TLI.getRegClassFor(VT);
534      assert(SrcReg != 0 && "Nothing to store!");
535      TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
536                              SrcReg, true /*isKill*/, SI->second, RC,
537                              TM.getRegisterInfo());
538      return true;
539    }
540  }
541  return false;
542}
543
544bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
545                               unsigned DstReg, int Offset) {
546  unsigned StrOpc;
547  switch (VT.getSimpleVT().SimpleTy) {
548    default: return false;
549    case MVT::i1:
550    case MVT::i8: StrOpc = isThumb ? ARM::tSTRB : ARM::STRB; break;
551    case MVT::i16: StrOpc = isThumb ? ARM::tSTRH : ARM::STRH; break;
552    case MVT::i32: StrOpc = isThumb ? ARM::tSTR : ARM::STR; break;
553    case MVT::f32:
554      if (!Subtarget->hasVFP2()) return false;
555      StrOpc = ARM::VSTRS;
556      break;
557    case MVT::f64:
558      if (!Subtarget->hasVFP2()) return false;
559      StrOpc = ARM::VSTRD;
560      break;
561  }
562
563  if (isThumb)
564    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
565                            TII.get(StrOpc), SrcReg)
566                    .addReg(DstReg).addImm(Offset).addReg(0));
567  else
568    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
569                            TII.get(StrOpc), SrcReg)
570                    .addReg(DstReg).addReg(0).addImm(Offset));
571
572  return true;
573}
574
575bool ARMFastISel::ARMSelectStore(const Instruction *I) {
576  Value *Op0 = I->getOperand(0);
577  unsigned SrcReg = 0;
578
579  // Yay type legalization
580  EVT VT;
581  if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
582    return false;
583
584  // Get the value to be stored into a register.
585  SrcReg = getRegForValue(Op0);
586  if (SrcReg == 0)
587    return false;
588
589  // If we're an alloca we know we have a frame index and can emit the store
590  // quickly.
591  if (ARMStoreAlloca(I, SrcReg, VT))
592    return true;
593
594  // Our register and offset with innocuous defaults.
595  unsigned Reg = 0;
596  int Offset = 0;
597
598  // See if we can handle this as Reg + Offset
599  if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
600    return false;
601
602  if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
603
604  return false;
605
606}
607
608bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
609  // Verify we have a legal type before going any further.
610  EVT VT;
611  if (!isLoadTypeLegal(I->getType(), VT))
612    return false;
613
614  // If we're an alloca we know we have a frame index and can emit the load
615  // directly in short order.
616  if (ARMLoadAlloca(I, VT))
617    return true;
618
619  // Our register and offset with innocuous defaults.
620  unsigned Reg = 0;
621  int Offset = 0;
622
623  // See if we can handle this as Reg + Offset
624  if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
625    return false;
626
627  unsigned ResultReg;
628  if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
629
630  UpdateValueMap(I, ResultReg);
631  return true;
632}
633
634bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
635  const BranchInst *BI = cast<BranchInst>(I);
636  MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
637  MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
638
639  // Simple branch support.
640  unsigned CondReg = getRegForValue(BI->getCondition());
641  if (CondReg == 0) return false;
642
643  unsigned CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
644  unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
645  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
646                  .addReg(CondReg).addReg(CondReg));
647  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
648                  .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
649  FastEmitBranch(FBB, DL);
650  FuncInfo.MBB->addSuccessor(TBB);
651  return true;
652}
653
654// TODO: SoftFP support.
655bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
656  // No Thumb-1 for now.
657  if (isThumb && !AFI->isThumb2Function()) return false;
658
659  switch (I->getOpcode()) {
660    case Instruction::Load:
661      return ARMSelectLoad(I);
662    case Instruction::Store:
663      return ARMSelectStore(I);
664    case Instruction::Br:
665      return ARMSelectBranch(I);
666    default: break;
667  }
668  return false;
669}
670
671namespace llvm {
672  llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
673    if (EnableARMFastISel) return new ARMFastISel(funcInfo);
674    return 0;
675  }
676}
677