ARMFastISel.cpp revision 7fe55b739c1bc319da9c81bcfd9d3e5d5030721b
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the ARM-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// ARMGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
17#include "ARMBaseInstrInfo.h"
18#include "ARMRegisterInfo.h"
19#include "ARMTargetMachine.h"
20#include "ARMSubtarget.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Instructions.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/CodeGen/Analysis.h"
27#include "llvm/CodeGen/FastISel.h"
28#include "llvm/CodeGen/FunctionLoweringInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineModuleInfo.h"
31#include "llvm/CodeGen/MachineConstantPool.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/Support/CallSite.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/GetElementPtrTypeIterator.h"
38#include "llvm/Target/TargetData.h"
39#include "llvm/Target/TargetInstrInfo.h"
40#include "llvm/Target/TargetLowering.h"
41#include "llvm/Target/TargetMachine.h"
42#include "llvm/Target/TargetOptions.h"
43using namespace llvm;
44
45static cl::opt<bool>
46EnableARMFastISel("arm-fast-isel",
47                  cl::desc("Turn on experimental ARM fast-isel support"),
48                  cl::init(false), cl::Hidden);
49
50namespace {
51
52class ARMFastISel : public FastISel {
53
54  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
55  /// make the right decision when generating code for different targets.
56  const ARMSubtarget *Subtarget;
57  const TargetMachine &TM;
58  const TargetInstrInfo &TII;
59  const TargetLowering &TLI;
60  const ARMFunctionInfo *AFI;
61
62  public:
63    explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
64    : FastISel(funcInfo),
65      TM(funcInfo.MF->getTarget()),
66      TII(*TM.getInstrInfo()),
67      TLI(*TM.getTargetLowering()) {
68      Subtarget = &TM.getSubtarget<ARMSubtarget>();
69      AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
70    }
71
72    // Code from FastISel.cpp.
73    virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
74                                   const TargetRegisterClass *RC);
75    virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
76                                    const TargetRegisterClass *RC,
77                                    unsigned Op0, bool Op0IsKill);
78    virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
79                                     const TargetRegisterClass *RC,
80                                     unsigned Op0, bool Op0IsKill,
81                                     unsigned Op1, bool Op1IsKill);
82    virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
83                                     const TargetRegisterClass *RC,
84                                     unsigned Op0, bool Op0IsKill,
85                                     uint64_t Imm);
86    virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
87                                     const TargetRegisterClass *RC,
88                                     unsigned Op0, bool Op0IsKill,
89                                     const ConstantFP *FPImm);
90    virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
91                                    const TargetRegisterClass *RC,
92                                    uint64_t Imm);
93    virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
94                                      const TargetRegisterClass *RC,
95                                      unsigned Op0, bool Op0IsKill,
96                                      unsigned Op1, bool Op1IsKill,
97                                      uint64_t Imm);
98    virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
99                                                unsigned Op0, bool Op0IsKill,
100                                                uint32_t Idx);
101
102    // Backend specific FastISel code.
103    virtual bool TargetSelectInstruction(const Instruction *I);
104
105  #include "ARMGenFastISel.inc"
106
107    // Instruction selection routines.
108    virtual bool ARMSelectLoad(const Instruction *I);
109
110    // Utility routines.
111  private:
112    bool ARMComputeRegOffset(const Instruction *I, unsigned &Reg, int &Offset);
113
114    bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
115    const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
116};
117
118} // end anonymous namespace
119
120// #include "ARMGenCallingConv.inc"
121
122// DefinesOptionalPredicate - This is different from DefinesPredicate in that
123// we don't care about implicit defs here, just places we'll need to add a
124// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
125bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
126  const TargetInstrDesc &TID = MI->getDesc();
127  if (!TID.hasOptionalDef())
128    return false;
129
130  // Look to see if our OptionalDef is defining CPSR or CCR.
131  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
132    const MachineOperand &MO = MI->getOperand(i);
133    if (!MO.isReg() || !MO.isDef()) continue;
134    if (MO.getReg() == ARM::CPSR)
135      *CPSR = true;
136  }
137  return true;
138}
139
140// If the machine is predicable go ahead and add the predicate operands, if
141// it needs default CC operands add those.
142const MachineInstrBuilder &
143ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
144  MachineInstr *MI = &*MIB;
145
146  // Do we use a predicate?
147  if (TII.isPredicable(MI))
148    AddDefaultPred(MIB);
149
150  // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
151  // defines CPSR. All other OptionalDefines in ARM are the CCR register.
152  bool CPSR = false;
153  if (DefinesOptionalPredicate(MI, &CPSR)) {
154    if (CPSR)
155      AddDefaultT1CC(MIB);
156    else
157      AddDefaultCC(MIB);
158  }
159  return MIB;
160}
161
162unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
163                                    const TargetRegisterClass* RC) {
164  unsigned ResultReg = createResultReg(RC);
165  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
166
167  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
168  return ResultReg;
169}
170
171unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
172                                     const TargetRegisterClass *RC,
173                                     unsigned Op0, bool Op0IsKill) {
174  unsigned ResultReg = createResultReg(RC);
175  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
176
177  if (II.getNumDefs() >= 1)
178    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
179                   .addReg(Op0, Op0IsKill * RegState::Kill));
180  else {
181    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
182                   .addReg(Op0, Op0IsKill * RegState::Kill));
183    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
184                   TII.get(TargetOpcode::COPY), ResultReg)
185                   .addReg(II.ImplicitDefs[0]));
186  }
187  return ResultReg;
188}
189
190unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
191                                      const TargetRegisterClass *RC,
192                                      unsigned Op0, bool Op0IsKill,
193                                      unsigned Op1, bool Op1IsKill) {
194  unsigned ResultReg = createResultReg(RC);
195  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
196
197  if (II.getNumDefs() >= 1)
198    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
199                   .addReg(Op0, Op0IsKill * RegState::Kill)
200                   .addReg(Op1, Op1IsKill * RegState::Kill));
201  else {
202    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
203                   .addReg(Op0, Op0IsKill * RegState::Kill)
204                   .addReg(Op1, Op1IsKill * RegState::Kill));
205    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
206                           TII.get(TargetOpcode::COPY), ResultReg)
207                   .addReg(II.ImplicitDefs[0]));
208  }
209  return ResultReg;
210}
211
212unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
213                                      const TargetRegisterClass *RC,
214                                      unsigned Op0, bool Op0IsKill,
215                                      uint64_t Imm) {
216  unsigned ResultReg = createResultReg(RC);
217  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
218
219  if (II.getNumDefs() >= 1)
220    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
221                   .addReg(Op0, Op0IsKill * RegState::Kill)
222                   .addImm(Imm));
223  else {
224    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
225                   .addReg(Op0, Op0IsKill * RegState::Kill)
226                   .addImm(Imm));
227    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
228                           TII.get(TargetOpcode::COPY), ResultReg)
229                   .addReg(II.ImplicitDefs[0]));
230  }
231  return ResultReg;
232}
233
234unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
235                                      const TargetRegisterClass *RC,
236                                      unsigned Op0, bool Op0IsKill,
237                                      const ConstantFP *FPImm) {
238  unsigned ResultReg = createResultReg(RC);
239  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
240
241  if (II.getNumDefs() >= 1)
242    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
243                   .addReg(Op0, Op0IsKill * RegState::Kill)
244                   .addFPImm(FPImm));
245  else {
246    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
247                   .addReg(Op0, Op0IsKill * RegState::Kill)
248                   .addFPImm(FPImm));
249    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
250                           TII.get(TargetOpcode::COPY), ResultReg)
251                   .addReg(II.ImplicitDefs[0]));
252  }
253  return ResultReg;
254}
255
256unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
257                                       const TargetRegisterClass *RC,
258                                       unsigned Op0, bool Op0IsKill,
259                                       unsigned Op1, bool Op1IsKill,
260                                       uint64_t Imm) {
261  unsigned ResultReg = createResultReg(RC);
262  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
263
264  if (II.getNumDefs() >= 1)
265    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
266                   .addReg(Op0, Op0IsKill * RegState::Kill)
267                   .addReg(Op1, Op1IsKill * RegState::Kill)
268                   .addImm(Imm));
269  else {
270    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
271                   .addReg(Op0, Op0IsKill * RegState::Kill)
272                   .addReg(Op1, Op1IsKill * RegState::Kill)
273                   .addImm(Imm));
274    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
275                           TII.get(TargetOpcode::COPY), ResultReg)
276                   .addReg(II.ImplicitDefs[0]));
277  }
278  return ResultReg;
279}
280
281unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
282                                     const TargetRegisterClass *RC,
283                                     uint64_t Imm) {
284  unsigned ResultReg = createResultReg(RC);
285  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
286
287  if (II.getNumDefs() >= 1)
288    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
289                   .addImm(Imm));
290  else {
291    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
292                   .addImm(Imm));
293    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
294                           TII.get(TargetOpcode::COPY), ResultReg)
295                   .addReg(II.ImplicitDefs[0]));
296  }
297  return ResultReg;
298}
299
300unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
301                                                 unsigned Op0, bool Op0IsKill,
302                                                 uint32_t Idx) {
303  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
304  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
305         "Cannot yet extract from physregs");
306  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
307                         DL, TII.get(TargetOpcode::COPY), ResultReg)
308                 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
309  return ResultReg;
310}
311
312bool ARMFastISel::ARMComputeRegOffset(const Instruction *I, unsigned &Reg,
313                                      int &Offset) {
314  // Some boilerplate from the X86 FastISel.
315  const User *U = NULL;
316  Value *Op1 = I->getOperand(0);
317  unsigned Opcode = Instruction::UserOp1;
318  if (const Instruction *I = dyn_cast<Instruction>(Op1)) {
319    // Don't walk into other basic blocks; it's possible we haven't
320    // visited them yet, so the instructions may not yet be assigned
321    // virtual registers.
322    if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
323      return false;
324
325    Opcode = I->getOpcode();
326    U = I;
327  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Op1)) {
328    Opcode = C->getOpcode();
329    U = C;
330  }
331
332  if (const PointerType *Ty = dyn_cast<PointerType>(Op1->getType()))
333    if (Ty->getAddressSpace() > 255)
334      // Fast instruction selection doesn't support the special
335      // address spaces.
336      return false;
337
338  switch (Opcode) {
339    default:
340    //errs() << "Failing Opcode is: " << *Op1 << "\n";
341    break;
342    case Instruction::Alloca: {
343      // Do static allocas.
344      const AllocaInst *A = cast<AllocaInst>(Op1);
345      DenseMap<const AllocaInst*, int>::iterator SI =
346        FuncInfo.StaticAllocaMap.find(A);
347      if (SI != FuncInfo.StaticAllocaMap.end())
348        Offset =
349          TM.getRegisterInfo()->getFrameIndexReference(*FuncInfo.MF,
350                                                       SI->second, Reg);
351      else
352        return false;
353      return true;
354    }
355  }
356  return false;
357}
358
359bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
360
361  unsigned Reg;
362  int Offset;
363
364  // See if we can handle this as Reg + Offset
365  if (!ARMComputeRegOffset(I, Reg, Offset))
366    return false;
367
368  unsigned ResultReg = createResultReg(ARM::GPRRegisterClass);
369  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
370                          TII.get(ARM::LDR), ResultReg)
371                  .addImm(0).addReg(Reg).addImm(Offset));
372
373  return true;
374}
375
376bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
377  // No Thumb-1 for now.
378  if (AFI->isThumbFunction() && !AFI->isThumb2Function()) return false;
379
380  switch (I->getOpcode()) {
381    case Instruction::Load:
382      return ARMSelectLoad(I);
383    default: break;
384  }
385  return false;
386}
387
388namespace llvm {
389  llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
390    if (EnableARMFastISel) return new ARMFastISel(funcInfo);
391    return 0;
392  }
393}
394