ARMFastISel.cpp revision 318b6eec8d72ad6dad887abde3fed484bd8d86ef
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the ARM-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// ARMGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
17#include "ARMBaseInstrInfo.h"
18#include "ARMRegisterInfo.h"
19#include "ARMTargetMachine.h"
20#include "ARMSubtarget.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Instructions.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/CodeGen/Analysis.h"
27#include "llvm/CodeGen/FastISel.h"
28#include "llvm/CodeGen/FunctionLoweringInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineModuleInfo.h"
31#include "llvm/CodeGen/MachineConstantPool.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/Support/CallSite.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/GetElementPtrTypeIterator.h"
38#include "llvm/Target/TargetData.h"
39#include "llvm/Target/TargetInstrInfo.h"
40#include "llvm/Target/TargetLowering.h"
41#include "llvm/Target/TargetMachine.h"
42#include "llvm/Target/TargetOptions.h"
43using namespace llvm;
44
45static cl::opt<bool>
46EnableARMFastISel("arm-fast-isel",
47                  cl::desc("Turn on experimental ARM fast-isel support"),
48                  cl::init(false), cl::Hidden);
49
50namespace {
51
52class ARMFastISel : public FastISel {
53
54  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
55  /// make the right decision when generating code for different targets.
56  const ARMSubtarget *Subtarget;
57  const TargetMachine &TM;
58  const TargetInstrInfo &TII;
59  const TargetLowering &TLI;
60  const ARMFunctionInfo *AFI;
61
62  public:
63    explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
64    : FastISel(funcInfo),
65      TM(funcInfo.MF->getTarget()),
66      TII(*TM.getInstrInfo()),
67      TLI(*TM.getTargetLowering()) {
68      Subtarget = &TM.getSubtarget<ARMSubtarget>();
69      AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
70    }
71
72    // Code from FastISel.cpp.
73    virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
74                                   const TargetRegisterClass *RC);
75    virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
76                                    const TargetRegisterClass *RC,
77                                    unsigned Op0, bool Op0IsKill);
78    virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
79                                     const TargetRegisterClass *RC,
80                                     unsigned Op0, bool Op0IsKill,
81                                     unsigned Op1, bool Op1IsKill);
82    virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
83                                     const TargetRegisterClass *RC,
84                                     unsigned Op0, bool Op0IsKill,
85                                     uint64_t Imm);
86    virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
87                                     const TargetRegisterClass *RC,
88                                     unsigned Op0, bool Op0IsKill,
89                                     const ConstantFP *FPImm);
90    virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
91                                    const TargetRegisterClass *RC,
92                                    uint64_t Imm);
93    virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
94                                      const TargetRegisterClass *RC,
95                                      unsigned Op0, bool Op0IsKill,
96                                      unsigned Op1, bool Op1IsKill,
97                                      uint64_t Imm);
98    virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
99                                                unsigned Op0, bool Op0IsKill,
100                                                uint32_t Idx);
101
102    // Backend specific FastISel code.
103    virtual bool TargetSelectInstruction(const Instruction *I);
104
105  #include "ARMGenFastISel.inc"
106
107    // Instruction selection routines.
108    virtual bool ARMSelectLoad(const Instruction *I);
109    virtual bool ARMSelectStore(const Instruction *I);
110
111    // Utility routines.
112  private:
113    bool isTypeLegal(const Type *Ty, EVT &VT);
114    bool isLoadTypeLegal(const Type *Ty, EVT &VT);
115    bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset);
116    bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset);
117    bool ARMLoadAlloca(const Instruction *I);
118    bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg);
119    bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
120    bool ARMMaterializeConstant(const ConstantInt *Val, unsigned &Reg);
121
122    bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
123    const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
124};
125
126} // end anonymous namespace
127
128// #include "ARMGenCallingConv.inc"
129
130// DefinesOptionalPredicate - This is different from DefinesPredicate in that
131// we don't care about implicit defs here, just places we'll need to add a
132// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
133bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
134  const TargetInstrDesc &TID = MI->getDesc();
135  if (!TID.hasOptionalDef())
136    return false;
137
138  // Look to see if our OptionalDef is defining CPSR or CCR.
139  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
140    const MachineOperand &MO = MI->getOperand(i);
141    if (!MO.isReg() || !MO.isDef()) continue;
142    if (MO.getReg() == ARM::CPSR)
143      *CPSR = true;
144  }
145  return true;
146}
147
148// If the machine is predicable go ahead and add the predicate operands, if
149// it needs default CC operands add those.
150const MachineInstrBuilder &
151ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
152  MachineInstr *MI = &*MIB;
153
154  // Do we use a predicate?
155  if (TII.isPredicable(MI))
156    AddDefaultPred(MIB);
157
158  // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
159  // defines CPSR. All other OptionalDefines in ARM are the CCR register.
160  bool CPSR = false;
161  if (DefinesOptionalPredicate(MI, &CPSR)) {
162    if (CPSR)
163      AddDefaultT1CC(MIB);
164    else
165      AddDefaultCC(MIB);
166  }
167  return MIB;
168}
169
170unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
171                                    const TargetRegisterClass* RC) {
172  unsigned ResultReg = createResultReg(RC);
173  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
174
175  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
176  return ResultReg;
177}
178
179unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
180                                     const TargetRegisterClass *RC,
181                                     unsigned Op0, bool Op0IsKill) {
182  unsigned ResultReg = createResultReg(RC);
183  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
184
185  if (II.getNumDefs() >= 1)
186    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
187                   .addReg(Op0, Op0IsKill * RegState::Kill));
188  else {
189    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
190                   .addReg(Op0, Op0IsKill * RegState::Kill));
191    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
192                   TII.get(TargetOpcode::COPY), ResultReg)
193                   .addReg(II.ImplicitDefs[0]));
194  }
195  return ResultReg;
196}
197
198unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
199                                      const TargetRegisterClass *RC,
200                                      unsigned Op0, bool Op0IsKill,
201                                      unsigned Op1, bool Op1IsKill) {
202  unsigned ResultReg = createResultReg(RC);
203  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
204
205  if (II.getNumDefs() >= 1)
206    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
207                   .addReg(Op0, Op0IsKill * RegState::Kill)
208                   .addReg(Op1, Op1IsKill * RegState::Kill));
209  else {
210    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
211                   .addReg(Op0, Op0IsKill * RegState::Kill)
212                   .addReg(Op1, Op1IsKill * RegState::Kill));
213    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
214                           TII.get(TargetOpcode::COPY), ResultReg)
215                   .addReg(II.ImplicitDefs[0]));
216  }
217  return ResultReg;
218}
219
220unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
221                                      const TargetRegisterClass *RC,
222                                      unsigned Op0, bool Op0IsKill,
223                                      uint64_t Imm) {
224  unsigned ResultReg = createResultReg(RC);
225  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
226
227  if (II.getNumDefs() >= 1)
228    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
229                   .addReg(Op0, Op0IsKill * RegState::Kill)
230                   .addImm(Imm));
231  else {
232    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
233                   .addReg(Op0, Op0IsKill * RegState::Kill)
234                   .addImm(Imm));
235    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
236                           TII.get(TargetOpcode::COPY), ResultReg)
237                   .addReg(II.ImplicitDefs[0]));
238  }
239  return ResultReg;
240}
241
242unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
243                                      const TargetRegisterClass *RC,
244                                      unsigned Op0, bool Op0IsKill,
245                                      const ConstantFP *FPImm) {
246  unsigned ResultReg = createResultReg(RC);
247  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
248
249  if (II.getNumDefs() >= 1)
250    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
251                   .addReg(Op0, Op0IsKill * RegState::Kill)
252                   .addFPImm(FPImm));
253  else {
254    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
255                   .addReg(Op0, Op0IsKill * RegState::Kill)
256                   .addFPImm(FPImm));
257    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
258                           TII.get(TargetOpcode::COPY), ResultReg)
259                   .addReg(II.ImplicitDefs[0]));
260  }
261  return ResultReg;
262}
263
264unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
265                                       const TargetRegisterClass *RC,
266                                       unsigned Op0, bool Op0IsKill,
267                                       unsigned Op1, bool Op1IsKill,
268                                       uint64_t Imm) {
269  unsigned ResultReg = createResultReg(RC);
270  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
271
272  if (II.getNumDefs() >= 1)
273    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
274                   .addReg(Op0, Op0IsKill * RegState::Kill)
275                   .addReg(Op1, Op1IsKill * RegState::Kill)
276                   .addImm(Imm));
277  else {
278    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
279                   .addReg(Op0, Op0IsKill * RegState::Kill)
280                   .addReg(Op1, Op1IsKill * RegState::Kill)
281                   .addImm(Imm));
282    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
283                           TII.get(TargetOpcode::COPY), ResultReg)
284                   .addReg(II.ImplicitDefs[0]));
285  }
286  return ResultReg;
287}
288
289unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
290                                     const TargetRegisterClass *RC,
291                                     uint64_t Imm) {
292  unsigned ResultReg = createResultReg(RC);
293  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
294
295  if (II.getNumDefs() >= 1)
296    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
297                   .addImm(Imm));
298  else {
299    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
300                   .addImm(Imm));
301    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
302                           TII.get(TargetOpcode::COPY), ResultReg)
303                   .addReg(II.ImplicitDefs[0]));
304  }
305  return ResultReg;
306}
307
308unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
309                                                 unsigned Op0, bool Op0IsKill,
310                                                 uint32_t Idx) {
311  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
312  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
313         "Cannot yet extract from physregs");
314  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
315                         DL, TII.get(TargetOpcode::COPY), ResultReg)
316                 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
317  return ResultReg;
318}
319
320bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
321  VT = TLI.getValueType(Ty, true);
322
323  // Only handle simple types.
324  if (VT == MVT::Other || !VT.isSimple()) return false;
325
326  // Handle all legal types, i.e. a register that will directly hold this
327  // value.
328  return TLI.isTypeLegal(VT);
329}
330
331bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
332  if (isTypeLegal(Ty, VT)) return true;
333
334  // If this is a type than can be sign or zero-extended to a basic operation
335  // go ahead and accept it now.
336  if (VT == MVT::i8 || VT == MVT::i16)
337    return true;
338
339  return false;
340}
341
342// Computes the Reg+Offset to get to an object.
343bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
344                                      int &Offset) {
345  // Some boilerplate from the X86 FastISel.
346  const User *U = NULL;
347  unsigned Opcode = Instruction::UserOp1;
348  if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
349    // Don't walk into other basic blocks; it's possible we haven't
350    // visited them yet, so the instructions may not yet be assigned
351    // virtual registers.
352    if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
353      return false;
354
355    Opcode = I->getOpcode();
356    U = I;
357  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
358    Opcode = C->getOpcode();
359    U = C;
360  }
361
362  if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
363    if (Ty->getAddressSpace() > 255)
364      // Fast instruction selection doesn't support the special
365      // address spaces.
366      return false;
367
368  switch (Opcode) {
369    default:
370    //errs() << "Failing Opcode is: " << *Op1 << "\n";
371    break;
372    case Instruction::Alloca: {
373      assert(false && "Alloca should have been handled earlier!");
374      return false;
375    }
376  }
377
378  if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
379    //errs() << "Failing GV is: " << GV << "\n";
380    (void)GV;
381    return false;
382  }
383
384  // Try to get this in a register if nothing else has worked.
385  Reg = getRegForValue(Obj);
386  if (Reg == 0) return false;
387
388  // Since the offset may be too large for the load instruction
389  // get the reg+offset into a register.
390  // TODO: Verify the additions work, otherwise we'll need to add the
391  // offset instead of 0 to the instructions and do all sorts of operand
392  // munging.
393  // TODO: Optimize this somewhat.
394  if (Offset != 0) {
395    ARMCC::CondCodes Pred = ARMCC::AL;
396    unsigned PredReg = 0;
397
398    if (!AFI->isThumbFunction())
399      emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
400                              Reg, Reg, Offset, Pred, PredReg,
401                              static_cast<const ARMBaseInstrInfo&>(TII));
402    else {
403      assert(AFI->isThumb2Function());
404      emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
405                             Reg, Reg, Offset, Pred, PredReg,
406                             static_cast<const ARMBaseInstrInfo&>(TII));
407    }
408  }
409
410  return true;
411}
412
413bool ARMFastISel::ARMLoadAlloca(const Instruction *I) {
414  Value *Op0 = I->getOperand(0);
415
416  // Verify it's an alloca.
417  if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op0)) {
418    DenseMap<const AllocaInst*, int>::iterator SI =
419      FuncInfo.StaticAllocaMap.find(AI);
420
421    if (SI != FuncInfo.StaticAllocaMap.end()) {
422      TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
423      unsigned ResultReg = createResultReg(RC);
424      TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
425                               ResultReg, SI->second, RC,
426                               TM.getRegisterInfo());
427      UpdateValueMap(I, ResultReg);
428      return true;
429    }
430  }
431  return false;
432}
433
434bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
435                              unsigned Reg, int Offset) {
436
437  assert(VT.isSimple() && "Non-simple types are invalid here!");
438
439  bool isThumb = AFI->isThumbFunction();
440  unsigned Opc;
441
442  switch (VT.getSimpleVT().SimpleTy) {
443    default:
444      assert(false && "Trying to emit for an unhandled type!");
445      return false;
446    case MVT::i16:
447      Opc = isThumb ? ARM::tLDRH : ARM::LDRH;
448      VT = MVT::i32;
449      break;
450    case MVT::i8:
451      Opc = isThumb ? ARM::tLDRB : ARM::LDRB;
452      VT = MVT::i32;
453      break;
454    case MVT::i32:
455      Opc = isThumb ? ARM::tLDR : ARM::LDR;
456      break;
457  }
458
459  ResultReg = createResultReg(TLI.getRegClassFor(VT));
460
461  // TODO: Fix the Addressing modes so that these can share some code.
462  // Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
463  if (isThumb)
464    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
465                            TII.get(Opc), ResultReg)
466                    .addReg(Reg).addImm(Offset).addReg(0));
467  else
468    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
469                            TII.get(Opc), ResultReg)
470                    .addReg(Reg).addReg(0).addImm(Offset));
471
472  return true;
473}
474
475bool ARMFastISel::ARMMaterializeConstant(const ConstantInt *CI, unsigned &Reg) {
476  unsigned Opc;
477  bool Signed = true;
478  bool isThumb = AFI->isThumbFunction();
479  EVT VT = TLI.getValueType(CI->getType(), true);
480
481  switch (VT.getSimpleVT().SimpleTy) {
482    default: return false;
483    case MVT::i1:  Signed = false;     // FALLTHROUGH to handle as i8.
484    case MVT::i8:
485    case MVT::i16:
486    case MVT::i32:
487    Opc = isThumb ? ARM::t2MOVi32imm : ARM::MOVi32imm; break;
488  }
489
490  Reg = createResultReg(TLI.getRegClassFor(VT));
491  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
492                          Reg)
493                  .addImm(Signed ? (uint64_t) CI->getSExtValue() :
494                                    CI->getZExtValue()));
495
496  return true;
497}
498
499bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg) {
500  Value *Op1 = I->getOperand(1);
501
502  // Verify it's an alloca.
503  if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
504    DenseMap<const AllocaInst*, int>::iterator SI =
505      FuncInfo.StaticAllocaMap.find(AI);
506
507    if (SI != FuncInfo.StaticAllocaMap.end()) {
508      TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
509      assert(SrcReg != 0 && "Nothing to store!");
510      TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
511                              SrcReg, true /*isKill*/, SI->second, RC,
512                              TM.getRegisterInfo());
513      return true;
514    }
515  }
516  return false;
517}
518
519bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
520                               unsigned DstReg, int Offset) {
521  bool isThumb = AFI->isThumbFunction();
522
523  unsigned StrOpc;
524  switch (VT.getSimpleVT().SimpleTy) {
525    default: return false;
526    case MVT::i1:
527    case MVT::i8: StrOpc = isThumb ? ARM::tSTRB : ARM::STRB; break;
528    case MVT::i16: StrOpc = isThumb ? ARM::tSTRH : ARM::STRH; break;
529    case MVT::i32: StrOpc = isThumb ? ARM::tSTR : ARM::STR; break;
530  }
531
532  if (isThumb)
533    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
534                            TII.get(StrOpc), SrcReg)
535                    .addReg(DstReg).addImm(Offset).addReg(0));
536  else
537    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
538                            TII.get(StrOpc), SrcReg)
539                    .addReg(DstReg).addReg(0).addImm(Offset));
540
541  return true;
542}
543
544bool ARMFastISel::ARMSelectStore(const Instruction *I) {
545  Value *Op0 = I->getOperand(0);
546  unsigned SrcReg = 0;
547
548  // Yay type legalization
549  EVT VT;
550  if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
551    return false;
552
553  // First see if we're a constant that we want to store, we'll need to
554  // materialize that into a register.
555  // Handle 'null' like i32/i64 0.
556  if (isa<ConstantPointerNull>(Op0))
557    Op0 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
558
559  // If this is a store of a simple constant, materialize the constant into
560  // a register then emit the store into the location.
561  if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
562    if (!ARMMaterializeConstant(CI, SrcReg))
563      return false;
564
565  // If Reg is still 0, try to get the value into a register.
566  if (SrcReg == 0)
567    SrcReg = getRegForValue(Op0);
568  if (SrcReg == 0)
569    return false;
570
571  // If we're an alloca we know we have a frame index and can emit the store
572  // quickly.
573  if (ARMStoreAlloca(I, SrcReg))
574    return true;
575
576  // Our register and offset with innocuous defaults.
577  unsigned Reg = 0;
578  int Offset = 0;
579
580  // See if we can handle this as Reg + Offset
581  if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
582    return false;
583
584  if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
585
586  return false;
587
588}
589
590bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
591  // If we're an alloca we know we have a frame index and can emit the load
592  // directly in short order.
593  if (ARMLoadAlloca(I))
594    return true;
595
596  // Verify we have a legal type before going any further.
597  EVT VT;
598  if (!isLoadTypeLegal(I->getType(), VT))
599    return false;
600
601  // Our register and offset with innocuous defaults.
602  unsigned Reg = 0;
603  int Offset = 0;
604
605  // See if we can handle this as Reg + Offset
606  if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
607    return false;
608
609  unsigned ResultReg;
610  if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
611
612  UpdateValueMap(I, ResultReg);
613  return true;
614}
615
616bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
617  // No Thumb-1 for now.
618  if (AFI->isThumbFunction() && !AFI->isThumb2Function()) return false;
619
620  switch (I->getOpcode()) {
621    case Instruction::Load:
622      return ARMSelectLoad(I);
623    case Instruction::Store:
624      return ARMSelectStore(I);
625    default: break;
626  }
627  return false;
628}
629
630namespace llvm {
631  llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
632    if (EnableARMFastISel) return new ARMFastISel(funcInfo);
633    return 0;
634  }
635}
636