ARMFastISel.cpp revision 9f782d4dcf580ae508cc83f412884cd3c5f9207d
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the ARM-specific support for the FastISel class. Some
11// of the target-specific code is generated by tablegen in the file
12// ARMGenFastISel.inc, which is #included here.
13//
14//===----------------------------------------------------------------------===//
15
16#include "ARM.h"
17#include "ARMBaseInstrInfo.h"
18#include "ARMRegisterInfo.h"
19#include "ARMTargetMachine.h"
20#include "ARMSubtarget.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Instructions.h"
25#include "llvm/IntrinsicInst.h"
26#include "llvm/CodeGen/Analysis.h"
27#include "llvm/CodeGen/FastISel.h"
28#include "llvm/CodeGen/FunctionLoweringInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineModuleInfo.h"
31#include "llvm/CodeGen/MachineConstantPool.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/Support/CallSite.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/GetElementPtrTypeIterator.h"
38#include "llvm/Target/TargetData.h"
39#include "llvm/Target/TargetInstrInfo.h"
40#include "llvm/Target/TargetLowering.h"
41#include "llvm/Target/TargetMachine.h"
42#include "llvm/Target/TargetOptions.h"
43using namespace llvm;
44
45static cl::opt<bool>
46EnableARMFastISel("arm-fast-isel",
47                  cl::desc("Turn on experimental ARM fast-isel support"),
48                  cl::init(false), cl::Hidden);
49
50namespace {
51
52class ARMFastISel : public FastISel {
53
54  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
55  /// make the right decision when generating code for different targets.
56  const ARMSubtarget *Subtarget;
57  const TargetMachine &TM;
58  const TargetInstrInfo &TII;
59  const TargetLowering &TLI;
60  const ARMFunctionInfo *AFI;
61
62  // FIXME: Remove this and replace it with queries.
63  const TargetRegisterClass *FixedRC;
64
65  public:
66    explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
67    : FastISel(funcInfo),
68      TM(funcInfo.MF->getTarget()),
69      TII(*TM.getInstrInfo()),
70      TLI(*TM.getTargetLowering()) {
71      Subtarget = &TM.getSubtarget<ARMSubtarget>();
72      AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
73      FixedRC = ARM::GPRRegisterClass;
74    }
75
76    // Code from FastISel.cpp.
77    virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
78                                   const TargetRegisterClass *RC);
79    virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
80                                    const TargetRegisterClass *RC,
81                                    unsigned Op0, bool Op0IsKill);
82    virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
83                                     const TargetRegisterClass *RC,
84                                     unsigned Op0, bool Op0IsKill,
85                                     unsigned Op1, bool Op1IsKill);
86    virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
87                                     const TargetRegisterClass *RC,
88                                     unsigned Op0, bool Op0IsKill,
89                                     uint64_t Imm);
90    virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
91                                     const TargetRegisterClass *RC,
92                                     unsigned Op0, bool Op0IsKill,
93                                     const ConstantFP *FPImm);
94    virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
95                                    const TargetRegisterClass *RC,
96                                    uint64_t Imm);
97    virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
98                                      const TargetRegisterClass *RC,
99                                      unsigned Op0, bool Op0IsKill,
100                                      unsigned Op1, bool Op1IsKill,
101                                      uint64_t Imm);
102    virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
103                                                unsigned Op0, bool Op0IsKill,
104                                                uint32_t Idx);
105
106    // Backend specific FastISel code.
107    virtual bool TargetSelectInstruction(const Instruction *I);
108
109  #include "ARMGenFastISel.inc"
110
111    // Instruction selection routines.
112    virtual bool ARMSelectLoad(const Instruction *I);
113
114    // Utility routines.
115  private:
116    bool ARMLoadAlloca(const Instruction *I);
117    bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
118
119    bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
120    const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
121};
122
123} // end anonymous namespace
124
125// #include "ARMGenCallingConv.inc"
126
127// DefinesOptionalPredicate - This is different from DefinesPredicate in that
128// we don't care about implicit defs here, just places we'll need to add a
129// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
130bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
131  const TargetInstrDesc &TID = MI->getDesc();
132  if (!TID.hasOptionalDef())
133    return false;
134
135  // Look to see if our OptionalDef is defining CPSR or CCR.
136  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
137    const MachineOperand &MO = MI->getOperand(i);
138    if (!MO.isReg() || !MO.isDef()) continue;
139    if (MO.getReg() == ARM::CPSR)
140      *CPSR = true;
141  }
142  return true;
143}
144
145// If the machine is predicable go ahead and add the predicate operands, if
146// it needs default CC operands add those.
147const MachineInstrBuilder &
148ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
149  MachineInstr *MI = &*MIB;
150
151  // Do we use a predicate?
152  if (TII.isPredicable(MI))
153    AddDefaultPred(MIB);
154
155  // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
156  // defines CPSR. All other OptionalDefines in ARM are the CCR register.
157  bool CPSR = false;
158  if (DefinesOptionalPredicate(MI, &CPSR)) {
159    if (CPSR)
160      AddDefaultT1CC(MIB);
161    else
162      AddDefaultCC(MIB);
163  }
164  return MIB;
165}
166
167unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
168                                    const TargetRegisterClass* RC) {
169  unsigned ResultReg = createResultReg(RC);
170  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
171
172  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
173  return ResultReg;
174}
175
176unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
177                                     const TargetRegisterClass *RC,
178                                     unsigned Op0, bool Op0IsKill) {
179  unsigned ResultReg = createResultReg(RC);
180  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
181
182  if (II.getNumDefs() >= 1)
183    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
184                   .addReg(Op0, Op0IsKill * RegState::Kill));
185  else {
186    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
187                   .addReg(Op0, Op0IsKill * RegState::Kill));
188    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
189                   TII.get(TargetOpcode::COPY), ResultReg)
190                   .addReg(II.ImplicitDefs[0]));
191  }
192  return ResultReg;
193}
194
195unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
196                                      const TargetRegisterClass *RC,
197                                      unsigned Op0, bool Op0IsKill,
198                                      unsigned Op1, bool Op1IsKill) {
199  unsigned ResultReg = createResultReg(RC);
200  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
201
202  if (II.getNumDefs() >= 1)
203    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
204                   .addReg(Op0, Op0IsKill * RegState::Kill)
205                   .addReg(Op1, Op1IsKill * RegState::Kill));
206  else {
207    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
208                   .addReg(Op0, Op0IsKill * RegState::Kill)
209                   .addReg(Op1, Op1IsKill * RegState::Kill));
210    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
211                           TII.get(TargetOpcode::COPY), ResultReg)
212                   .addReg(II.ImplicitDefs[0]));
213  }
214  return ResultReg;
215}
216
217unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
218                                      const TargetRegisterClass *RC,
219                                      unsigned Op0, bool Op0IsKill,
220                                      uint64_t Imm) {
221  unsigned ResultReg = createResultReg(RC);
222  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
223
224  if (II.getNumDefs() >= 1)
225    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
226                   .addReg(Op0, Op0IsKill * RegState::Kill)
227                   .addImm(Imm));
228  else {
229    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
230                   .addReg(Op0, Op0IsKill * RegState::Kill)
231                   .addImm(Imm));
232    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
233                           TII.get(TargetOpcode::COPY), ResultReg)
234                   .addReg(II.ImplicitDefs[0]));
235  }
236  return ResultReg;
237}
238
239unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
240                                      const TargetRegisterClass *RC,
241                                      unsigned Op0, bool Op0IsKill,
242                                      const ConstantFP *FPImm) {
243  unsigned ResultReg = createResultReg(RC);
244  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
245
246  if (II.getNumDefs() >= 1)
247    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
248                   .addReg(Op0, Op0IsKill * RegState::Kill)
249                   .addFPImm(FPImm));
250  else {
251    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
252                   .addReg(Op0, Op0IsKill * RegState::Kill)
253                   .addFPImm(FPImm));
254    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
255                           TII.get(TargetOpcode::COPY), ResultReg)
256                   .addReg(II.ImplicitDefs[0]));
257  }
258  return ResultReg;
259}
260
261unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
262                                       const TargetRegisterClass *RC,
263                                       unsigned Op0, bool Op0IsKill,
264                                       unsigned Op1, bool Op1IsKill,
265                                       uint64_t Imm) {
266  unsigned ResultReg = createResultReg(RC);
267  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
268
269  if (II.getNumDefs() >= 1)
270    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
271                   .addReg(Op0, Op0IsKill * RegState::Kill)
272                   .addReg(Op1, Op1IsKill * RegState::Kill)
273                   .addImm(Imm));
274  else {
275    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
276                   .addReg(Op0, Op0IsKill * RegState::Kill)
277                   .addReg(Op1, Op1IsKill * RegState::Kill)
278                   .addImm(Imm));
279    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
280                           TII.get(TargetOpcode::COPY), ResultReg)
281                   .addReg(II.ImplicitDefs[0]));
282  }
283  return ResultReg;
284}
285
286unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
287                                     const TargetRegisterClass *RC,
288                                     uint64_t Imm) {
289  unsigned ResultReg = createResultReg(RC);
290  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
291
292  if (II.getNumDefs() >= 1)
293    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
294                   .addImm(Imm));
295  else {
296    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
297                   .addImm(Imm));
298    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
299                           TII.get(TargetOpcode::COPY), ResultReg)
300                   .addReg(II.ImplicitDefs[0]));
301  }
302  return ResultReg;
303}
304
305unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
306                                                 unsigned Op0, bool Op0IsKill,
307                                                 uint32_t Idx) {
308  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
309  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
310         "Cannot yet extract from physregs");
311  AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
312                         DL, TII.get(TargetOpcode::COPY), ResultReg)
313                 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
314  return ResultReg;
315}
316
317// Computes the Reg+Offset to get to an object.
318bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
319                                      int &Offset) {
320  // Some boilerplate from the X86 FastISel.
321  const User *U = NULL;
322  unsigned Opcode = Instruction::UserOp1;
323  if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
324    // Don't walk into other basic blocks; it's possible we haven't
325    // visited them yet, so the instructions may not yet be assigned
326    // virtual registers.
327    if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
328      return false;
329
330    Opcode = I->getOpcode();
331    U = I;
332  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
333    Opcode = C->getOpcode();
334    U = C;
335  }
336
337  if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
338    if (Ty->getAddressSpace() > 255)
339      // Fast instruction selection doesn't support the special
340      // address spaces.
341      return false;
342
343  switch (Opcode) {
344    default:
345    //errs() << "Failing Opcode is: " << *Op1 << "\n";
346    break;
347    case Instruction::Alloca: {
348      assert(false && "Alloca should have been handled earlier!");
349      return false;
350    }
351  }
352
353  if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
354    //errs() << "Failing GV is: " << GV << "\n";
355    (void)GV;
356    return false;
357  }
358
359  // Try to get this in a register if nothing else has worked.
360  Reg = getRegForValue(Obj);
361  return Reg != 0;
362}
363
364bool ARMFastISel::ARMLoadAlloca(const Instruction *I) {
365  Value *Op0 = I->getOperand(0);
366
367  // Verify it's an alloca.
368  const Instruction *Inst = dyn_cast<Instruction>(Op0);
369  if (!Inst || Inst->getOpcode() != Instruction::Alloca) return false;
370
371  const AllocaInst *AI = cast<AllocaInst>(Op0);
372  DenseMap<const AllocaInst*, int>::iterator SI =
373    FuncInfo.StaticAllocaMap.find(AI);
374
375  if (SI != FuncInfo.StaticAllocaMap.end()) {
376    unsigned ResultReg = createResultReg(FixedRC);
377    TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
378                              ResultReg, SI->second, FixedRC,
379                              TM.getRegisterInfo());
380    UpdateValueMap(I, ResultReg);
381    return true;
382  }
383
384  return false;
385}
386
387bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
388  // Our register and offset with innocuous defaults.
389  unsigned Reg = 0;
390  int Offset = 0;
391
392  // If we're an alloca we know we have a frame index and can emit the load
393  // directly in short order.
394  if (ARMLoadAlloca(I))
395    return true;
396
397  // See if we can handle this as Reg + Offset
398  if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
399    return false;
400
401  // Since the offset may be too large for the load instruction
402  // get the reg+offset into a register.
403  // TODO: Optimize this somewhat.
404  ARMCC::CondCodes Pred = ARMCC::AL;
405  unsigned PredReg = 0;
406
407  if (!AFI->isThumbFunction())
408    emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
409                            Reg, Reg, Offset, Pred, PredReg,
410                            static_cast<const ARMBaseInstrInfo&>(TII));
411  else {
412    assert(AFI->isThumb2Function());
413    emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
414                           Reg, Reg, Offset, Pred, PredReg,
415                           static_cast<const ARMBaseInstrInfo&>(TII));
416  }
417
418  // FIXME: There is more than one register class in the world...
419  // TODO: Verify the additions above work, otherwise we'll need to add the
420  // offset instead of 0 and do all sorts of operand munging.
421  unsigned ResultReg = createResultReg(FixedRC);
422  // TODO: Fix the Addressing modes so that these can share some code.
423  if (AFI->isThumb2Function())
424    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
425                            TII.get(ARM::tLDR), ResultReg)
426                    .addReg(Reg).addImm(0).addReg(0));
427  else
428    AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
429                            TII.get(ARM::LDR), ResultReg)
430                    .addReg(Reg).addReg(0).addImm(0));
431  UpdateValueMap(I, ResultReg);
432
433  return true;
434}
435
436bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
437  // No Thumb-1 for now.
438  if (AFI->isThumbFunction() && !AFI->isThumb2Function()) return false;
439
440  switch (I->getOpcode()) {
441    case Instruction::Load:
442      return ARMSelectLoad(I);
443    default: break;
444  }
445  return false;
446}
447
448namespace llvm {
449  llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
450    if (EnableARMFastISel) return new ARMFastISel(funcInfo);
451    return 0;
452  }
453}
454