ARMFastISel.cpp revision 8300712c1e73dc106242f0007e0e0e4dd9ea38ce
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the ARM-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// ARMGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
17#include "ARMBaseInstrInfo.h"
18#include "ARMRegisterInfo.h"
19#include "ARMTargetMachine.h"
20#include "ARMSubtarget.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Instructions.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/CodeGen/Analysis.h"
27#include "llvm/CodeGen/FastISel.h"
28#include "llvm/CodeGen/FunctionLoweringInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineModuleInfo.h"
31#include "llvm/CodeGen/MachineConstantPool.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/Support/CallSite.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/GetElementPtrTypeIterator.h"
38#include "llvm/Target/TargetData.h"
39#include "llvm/Target/TargetInstrInfo.h"
40#include "llvm/Target/TargetLowering.h"
41#include "llvm/Target/TargetMachine.h"
42#include "llvm/Target/TargetOptions.h"
43using namespace llvm;
44
45static cl::opt<bool>
46EnableARMFastISel("arm-fast-isel",
47                  cl::desc("Turn on experimental ARM fast-isel support"),
48                  cl::init(false), cl::Hidden);
49
50namespace {
51
52class ARMFastISel : public FastISel {
53
54  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
55  /// make the right decision when generating code for different targets.
56  const ARMSubtarget *Subtarget;
57  const TargetMachine &TM;
58  const TargetInstrInfo &TII;
59  const TargetLowering &TLI;
60
61  public:
62    explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
63    : FastISel(funcInfo),
64      TM(funcInfo.MF->getTarget()),
65      TII(*TM.getInstrInfo()),
66      TLI(*TM.getTargetLowering()) {
67      Subtarget = &TM.getSubtarget<ARMSubtarget>();
68    }
69
70    // Code from FastISel.cpp.
71    virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
72                                   const TargetRegisterClass *RC);
73    virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
74                                    const TargetRegisterClass *RC,
75                                    unsigned Op0, bool Op0IsKill);
76    virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
77                                     const TargetRegisterClass *RC,
78                                     unsigned Op0, bool Op0IsKill,
79                                     unsigned Op1, bool Op1IsKill);
80    virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
81                                     const TargetRegisterClass *RC,
82                                     unsigned Op0, bool Op0IsKill,
83                                     uint64_t Imm);
84    virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
85                                     const TargetRegisterClass *RC,
86                                     unsigned Op0, bool Op0IsKill,
87                                     const ConstantFP *FPImm);
88    virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
89                                    const TargetRegisterClass *RC,
90                                    uint64_t Imm);
91    virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
92                                      const TargetRegisterClass *RC,
93                                      unsigned Op0, bool Op0IsKill,
94                                      unsigned Op1, bool Op1IsKill,
95                                      uint64_t Imm);
96    virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
97                                                unsigned Op0, bool Op0IsKill,
98                                                uint32_t Idx);
99
100    // Backend specific FastISel code.
101    virtual bool TargetSelectInstruction(const Instruction *I);
102
103  #include "ARMGenFastISel.inc"
104
105    // Instruction selection routines.
106    virtual bool ARMSelectLoad(const Instruction *I);
107
108    // Utility routines.
109  private:
110    bool ARMComputeRegOffset(const Instruction *I, unsigned &Reg, int &Offset);
111
112    bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
113    const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
114};
115
116} // end anonymous namespace
117
118// #include "ARMGenCallingConv.inc"
119
120// DefinesOptionalPredicate - This is different from DefinesPredicate in that
121// we don't care about implicit defs here, just places we'll need to add a
122// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
123bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
124  const TargetInstrDesc &TID = MI->getDesc();
125  if (!TID.hasOptionalDef())
126    return false;
127
128  // Look to see if our OptionalDef is defining CPSR or CCR.
129  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
130    const MachineOperand &MO = MI->getOperand(i);
131    if (!MO.isReg() || !MO.isDef()) continue;
132    if (MO.getReg() == ARM::CPSR)
133      *CPSR = true;
134  }
135  return true;
136}
137
138// If the machine is predicable go ahead and add the predicate operands, if
139// it needs default CC operands add those.
140const MachineInstrBuilder &
141ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
142  MachineInstr *MI = &*MIB;
143
144  // Do we use a predicate?
145  if (TII.isPredicable(MI))
146    AddDefaultPred(MIB);
147
148  // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
149  // defines CPSR. All other OptionalDefines in ARM are the CCR register.
150  bool CPSR = false;
151  if (DefinesOptionalPredicate(MI, &CPSR)) {
152    if (CPSR)
153      AddDefaultT1CC(MIB);
154    else
155      AddDefaultCC(MIB);
156  }
157  return MIB;
158}
159
160unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
161                                    const TargetRegisterClass* RC) {
162  unsigned ResultReg = createResultReg(RC);
163  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
164
165  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
166  return ResultReg;
167}
168
169unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
170                                     const TargetRegisterClass *RC,
171                                     unsigned Op0, bool Op0IsKill) {
172  unsigned ResultReg = createResultReg(RC);
173  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
174
175  if (II.getNumDefs() >= 1)
176    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
177                   .addReg(Op0, Op0IsKill * RegState::Kill));
178  else {
179    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
180                   .addReg(Op0, Op0IsKill * RegState::Kill));
181    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
182                   TII.get(TargetOpcode::COPY), ResultReg)
183                   .addReg(II.ImplicitDefs[0]));
184  }
185  return ResultReg;
186}
187
188unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
189                                      const TargetRegisterClass *RC,
190                                      unsigned Op0, bool Op0IsKill,
191                                      unsigned Op1, bool Op1IsKill) {
192  unsigned ResultReg = createResultReg(RC);
193  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
194
195  if (II.getNumDefs() >= 1)
196    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
197                   .addReg(Op0, Op0IsKill * RegState::Kill)
198                   .addReg(Op1, Op1IsKill * RegState::Kill));
199  else {
200    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
201                   .addReg(Op0, Op0IsKill * RegState::Kill)
202                   .addReg(Op1, Op1IsKill * RegState::Kill));
203    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
204                           TII.get(TargetOpcode::COPY), ResultReg)
205                   .addReg(II.ImplicitDefs[0]));
206  }
207  return ResultReg;
208}
209
210unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
211                                      const TargetRegisterClass *RC,
212                                      unsigned Op0, bool Op0IsKill,
213                                      uint64_t Imm) {
214  unsigned ResultReg = createResultReg(RC);
215  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
216
217  if (II.getNumDefs() >= 1)
218    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
219                   .addReg(Op0, Op0IsKill * RegState::Kill)
220                   .addImm(Imm));
221  else {
222    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
223                   .addReg(Op0, Op0IsKill * RegState::Kill)
224                   .addImm(Imm));
225    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
226                           TII.get(TargetOpcode::COPY), ResultReg)
227                   .addReg(II.ImplicitDefs[0]));
228  }
229  return ResultReg;
230}
231
232unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
233                                      const TargetRegisterClass *RC,
234                                      unsigned Op0, bool Op0IsKill,
235                                      const ConstantFP *FPImm) {
236  unsigned ResultReg = createResultReg(RC);
237  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
238
239  if (II.getNumDefs() >= 1)
240    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
241                   .addReg(Op0, Op0IsKill * RegState::Kill)
242                   .addFPImm(FPImm));
243  else {
244    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
245                   .addReg(Op0, Op0IsKill * RegState::Kill)
246                   .addFPImm(FPImm));
247    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
248                           TII.get(TargetOpcode::COPY), ResultReg)
249                   .addReg(II.ImplicitDefs[0]));
250  }
251  return ResultReg;
252}
253
254unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
255                                       const TargetRegisterClass *RC,
256                                       unsigned Op0, bool Op0IsKill,
257                                       unsigned Op1, bool Op1IsKill,
258                                       uint64_t Imm) {
259  unsigned ResultReg = createResultReg(RC);
260  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
261
262  if (II.getNumDefs() >= 1)
263    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
264                   .addReg(Op0, Op0IsKill * RegState::Kill)
265                   .addReg(Op1, Op1IsKill * RegState::Kill)
266                   .addImm(Imm));
267  else {
268    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
269                   .addReg(Op0, Op0IsKill * RegState::Kill)
270                   .addReg(Op1, Op1IsKill * RegState::Kill)
271                   .addImm(Imm));
272    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
273                           TII.get(TargetOpcode::COPY), ResultReg)
274                   .addReg(II.ImplicitDefs[0]));
275  }
276  return ResultReg;
277}
278
279unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
280                                     const TargetRegisterClass *RC,
281                                     uint64_t Imm) {
282  unsigned ResultReg = createResultReg(RC);
283  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
284
285  if (II.getNumDefs() >= 1)
286    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
287                   .addImm(Imm));
288  else {
289    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
290                   .addImm(Imm));
291    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
292                           TII.get(TargetOpcode::COPY), ResultReg)
293                   .addReg(II.ImplicitDefs[0]));
294  }
295  return ResultReg;
296}
297
298unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
299                                                 unsigned Op0, bool Op0IsKill,
300                                                 uint32_t Idx) {
301  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
302  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
303         "Cannot yet extract from physregs");
304  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
305                         DL, TII.get(TargetOpcode::COPY), ResultReg)
306                 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
307  return ResultReg;
308}
309
310bool ARMFastISel::ARMComputeRegOffset(const Instruction *I, unsigned &Reg,
311                                      int &Offset) {
312  // Some boilerplate from the X86 FastISel.
313  const User *U = NULL;
314  Value *Op1 = I->getOperand(0);
315  unsigned Opcode = Instruction::UserOp1;
316  if (const Instruction *I = dyn_cast<Instruction>(Op1)) {
317    // Don't walk into other basic blocks; it's possible we haven't
318    // visited them yet, so the instructions may not yet be assigned
319    // virtual registers.
320    if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
321      return false;
322
323    Opcode = I->getOpcode();
324    U = I;
325  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Op1)) {
326    Opcode = C->getOpcode();
327    U = C;
328  }
329
330  if (const PointerType *Ty = dyn_cast<PointerType>(Op1->getType()))
331    if (Ty->getAddressSpace() > 255)
332      // Fast instruction selection doesn't support the special
333      // address spaces.
334      return false;
335
336  switch (Opcode) {
337    default:
338    //errs() << "Failing Opcode is: " << *Op1 << "\n";
339    break;
340    case Instruction::Alloca: {
341      // Do static allocas.
342      const AllocaInst *A = cast<AllocaInst>(Op1);
343      DenseMap<const AllocaInst*, int>::iterator SI =
344        FuncInfo.StaticAllocaMap.find(A);
345      if (SI != FuncInfo.StaticAllocaMap.end())
346        Offset =
347          TM.getRegisterInfo()->getFrameIndexReference(*FuncInfo.MF,
348                                                       SI->second, Reg);
349      else
350        return false;
351      return true;
352    }
353  }
354  return false;
355}
356
357bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
358
359  unsigned Reg;
360  int Offset;
361
362  // See if we can handle this as Reg + Offset
363  if (!ARMComputeRegOffset(I, Reg, Offset))
364    return false;
365
366
367  unsigned ResultReg = createResultReg(ARM::GPRRegisterClass);
368  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
369                          TII.get(ARM::LDR), ResultReg)
370                  .addImm(0).addReg(Reg).addImm(Offset));
371
372  return true;
373}
374
375bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
376  switch (I->getOpcode()) {
377    case Instruction::Load:
378      return ARMSelectLoad(I);
379    default: break;
380  }
381  return false;
382}
383
384namespace llvm {
385  llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
386    if (EnableARMFastISel) return new ARMFastISel(funcInfo);
387    return 0;
388  }
389}
390