ARMBaseInstrInfo.cpp revision cdc17ebc2b2e9e18ac516b9d246a5c5a3af227d3
1//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMGenInstrInfo.inc"
18#include "ARMMachineFunctionInfo.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/CodeGen/LiveVariables.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/Target/TargetAsmInfo.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/ErrorHandling.h"
27using namespace llvm;
28
29static cl::opt<bool>
30EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
31               cl::desc("Enable ARM 2-addr to 3-addr conv"));
32
33ARMBaseInstrInfo::ARMBaseInstrInfo()
34  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
35}
36
37MachineInstr *
38ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
39                                        MachineBasicBlock::iterator &MBBI,
40                                        LiveVariables *LV) const {
41  // FIXME: Thumb2 support.
42
43  if (!EnableARM3Addr)
44    return NULL;
45
46  MachineInstr *MI = MBBI;
47  MachineFunction &MF = *MI->getParent()->getParent();
48  unsigned TSFlags = MI->getDesc().TSFlags;
49  bool isPre = false;
50  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
51  default: return NULL;
52  case ARMII::IndexModePre:
53    isPre = true;
54    break;
55  case ARMII::IndexModePost:
56    break;
57  }
58
59  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
60  // operation.
61  unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
62  if (MemOpc == 0)
63    return NULL;
64
65  MachineInstr *UpdateMI = NULL;
66  MachineInstr *MemMI = NULL;
67  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
68  const TargetInstrDesc &TID = MI->getDesc();
69  unsigned NumOps = TID.getNumOperands();
70  bool isLoad = !TID.mayStore();
71  const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
72  const MachineOperand &Base = MI->getOperand(2);
73  const MachineOperand &Offset = MI->getOperand(NumOps-3);
74  unsigned WBReg = WB.getReg();
75  unsigned BaseReg = Base.getReg();
76  unsigned OffReg = Offset.getReg();
77  unsigned OffImm = MI->getOperand(NumOps-2).getImm();
78  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
79  switch (AddrMode) {
80  default:
81    assert(false && "Unknown indexed op!");
82    return NULL;
83  case ARMII::AddrMode2: {
84    bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
85    unsigned Amt = ARM_AM::getAM2Offset(OffImm);
86    if (OffReg == 0) {
87      if (ARM_AM::getSOImmVal(Amt) == -1)
88        // Can't encode it in a so_imm operand. This transformation will
89        // add more than 1 instruction. Abandon!
90        return NULL;
91      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
92                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
93        .addReg(BaseReg).addImm(Amt)
94        .addImm(Pred).addReg(0).addReg(0);
95    } else if (Amt != 0) {
96      ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
97      unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
98      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
99                         get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
100        .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
101        .addImm(Pred).addReg(0).addReg(0);
102    } else
103      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
104                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
105        .addReg(BaseReg).addReg(OffReg)
106        .addImm(Pred).addReg(0).addReg(0);
107    break;
108  }
109  case ARMII::AddrMode3 : {
110    bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
111    unsigned Amt = ARM_AM::getAM3Offset(OffImm);
112    if (OffReg == 0)
113      // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
114      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
115                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
116        .addReg(BaseReg).addImm(Amt)
117        .addImm(Pred).addReg(0).addReg(0);
118    else
119      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
120                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
121        .addReg(BaseReg).addReg(OffReg)
122        .addImm(Pred).addReg(0).addReg(0);
123    break;
124  }
125  }
126
127  std::vector<MachineInstr*> NewMIs;
128  if (isPre) {
129    if (isLoad)
130      MemMI = BuildMI(MF, MI->getDebugLoc(),
131                      get(MemOpc), MI->getOperand(0).getReg())
132        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
133    else
134      MemMI = BuildMI(MF, MI->getDebugLoc(),
135                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
136        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
137    NewMIs.push_back(MemMI);
138    NewMIs.push_back(UpdateMI);
139  } else {
140    if (isLoad)
141      MemMI = BuildMI(MF, MI->getDebugLoc(),
142                      get(MemOpc), MI->getOperand(0).getReg())
143        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
144    else
145      MemMI = BuildMI(MF, MI->getDebugLoc(),
146                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
147        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
148    if (WB.isDead())
149      UpdateMI->getOperand(0).setIsDead();
150    NewMIs.push_back(UpdateMI);
151    NewMIs.push_back(MemMI);
152  }
153
154  // Transfer LiveVariables states, kill / dead info.
155  if (LV) {
156    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
157      MachineOperand &MO = MI->getOperand(i);
158      if (MO.isReg() && MO.getReg() &&
159          TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
160        unsigned Reg = MO.getReg();
161
162        LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
163        if (MO.isDef()) {
164          MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
165          if (MO.isDead())
166            LV->addVirtualRegisterDead(Reg, NewMI);
167        }
168        if (MO.isUse() && MO.isKill()) {
169          for (unsigned j = 0; j < 2; ++j) {
170            // Look at the two new MI's in reverse order.
171            MachineInstr *NewMI = NewMIs[j];
172            if (!NewMI->readsRegister(Reg))
173              continue;
174            LV->addVirtualRegisterKilled(Reg, NewMI);
175            if (VI.removeKill(MI))
176              VI.Kills.push_back(NewMI);
177            break;
178          }
179        }
180      }
181    }
182  }
183
184  MFI->insert(MBBI, NewMIs[1]);
185  MFI->insert(MBBI, NewMIs[0]);
186  return NewMIs[0];
187}
188
189// Branch analysis.
190bool
191ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
192                                MachineBasicBlock *&FBB,
193                                SmallVectorImpl<MachineOperand> &Cond,
194                                bool AllowModify) const {
195  // If the block has no terminators, it just falls into the block after it.
196  MachineBasicBlock::iterator I = MBB.end();
197  if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
198    return false;
199
200  // Get the last instruction in the block.
201  MachineInstr *LastInst = I;
202
203  // If there is only one terminator instruction, process it.
204  unsigned LastOpc = LastInst->getOpcode();
205  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
206    if (isUncondBranchOpcode(LastOpc)) {
207      TBB = LastInst->getOperand(0).getMBB();
208      return false;
209    }
210    if (isCondBranchOpcode(LastOpc)) {
211      // Block ends with fall-through condbranch.
212      TBB = LastInst->getOperand(0).getMBB();
213      Cond.push_back(LastInst->getOperand(1));
214      Cond.push_back(LastInst->getOperand(2));
215      return false;
216    }
217    return true;  // Can't handle indirect branch.
218  }
219
220  // Get the instruction before it if it is a terminator.
221  MachineInstr *SecondLastInst = I;
222
223  // If there are three terminators, we don't know what sort of block this is.
224  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
225    return true;
226
227  // If the block ends with a B and a Bcc, handle it.
228  unsigned SecondLastOpc = SecondLastInst->getOpcode();
229  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
230    TBB =  SecondLastInst->getOperand(0).getMBB();
231    Cond.push_back(SecondLastInst->getOperand(1));
232    Cond.push_back(SecondLastInst->getOperand(2));
233    FBB = LastInst->getOperand(0).getMBB();
234    return false;
235  }
236
237  // If the block ends with two unconditional branches, handle it.  The second
238  // one is not executed, so remove it.
239  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
240    TBB = SecondLastInst->getOperand(0).getMBB();
241    I = LastInst;
242    if (AllowModify)
243      I->eraseFromParent();
244    return false;
245  }
246
247  // ...likewise if it ends with a branch table followed by an unconditional
248  // branch. The branch folder can create these, and we must get rid of them for
249  // correctness of Thumb constant islands.
250  if (isJumpTableBranchOpcode(SecondLastOpc) &&
251      isUncondBranchOpcode(LastOpc)) {
252    I = LastInst;
253    if (AllowModify)
254      I->eraseFromParent();
255    return true;
256  }
257
258  // Otherwise, can't handle this.
259  return true;
260}
261
262
263unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
264  MachineBasicBlock::iterator I = MBB.end();
265  if (I == MBB.begin()) return 0;
266  --I;
267  if (!isUncondBranchOpcode(I->getOpcode()) &&
268      !isCondBranchOpcode(I->getOpcode()))
269    return 0;
270
271  // Remove the branch.
272  I->eraseFromParent();
273
274  I = MBB.end();
275
276  if (I == MBB.begin()) return 1;
277  --I;
278  if (!isCondBranchOpcode(I->getOpcode()))
279    return 1;
280
281  // Remove the branch.
282  I->eraseFromParent();
283  return 2;
284}
285
286unsigned
287ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
288                               MachineBasicBlock *FBB,
289                             const SmallVectorImpl<MachineOperand> &Cond) const {
290  // FIXME this should probably have a DebugLoc argument
291  DebugLoc dl = DebugLoc::getUnknownLoc();
292
293  ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
294  int BOpc   = !AFI->isThumbFunction()
295    ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
296  int BccOpc = !AFI->isThumbFunction()
297    ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
298
299  // Shouldn't be a fall through.
300  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
301  assert((Cond.size() == 2 || Cond.size() == 0) &&
302         "ARM branch conditions have two components!");
303
304  if (FBB == 0) {
305    if (Cond.empty()) // Unconditional branch?
306      BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
307    else
308      BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
309        .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
310    return 1;
311  }
312
313  // Two-way conditional branch.
314  BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
315    .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
316  BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
317  return 2;
318}
319
320bool ARMBaseInstrInfo::
321ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
322  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
323  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
324  return false;
325}
326
327bool ARMBaseInstrInfo::
328PredicateInstruction(MachineInstr *MI,
329                     const SmallVectorImpl<MachineOperand> &Pred) const {
330  unsigned Opc = MI->getOpcode();
331  if (isUncondBranchOpcode(Opc)) {
332    MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
333    MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
334    MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
335    return true;
336  }
337
338  int PIdx = MI->findFirstPredOperandIdx();
339  if (PIdx != -1) {
340    MachineOperand &PMO = MI->getOperand(PIdx);
341    PMO.setImm(Pred[0].getImm());
342    MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
343    return true;
344  }
345  return false;
346}
347
348bool ARMBaseInstrInfo::
349SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
350                  const SmallVectorImpl<MachineOperand> &Pred2) const {
351  if (Pred1.size() > 2 || Pred2.size() > 2)
352    return false;
353
354  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
355  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
356  if (CC1 == CC2)
357    return true;
358
359  switch (CC1) {
360  default:
361    return false;
362  case ARMCC::AL:
363    return true;
364  case ARMCC::HS:
365    return CC2 == ARMCC::HI;
366  case ARMCC::LS:
367    return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
368  case ARMCC::GE:
369    return CC2 == ARMCC::GT;
370  case ARMCC::LE:
371    return CC2 == ARMCC::LT;
372  }
373}
374
375bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
376                                    std::vector<MachineOperand> &Pred) const {
377  // FIXME: This confuses implicit_def with optional CPSR def.
378  const TargetInstrDesc &TID = MI->getDesc();
379  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
380    return false;
381
382  bool Found = false;
383  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
384    const MachineOperand &MO = MI->getOperand(i);
385    if (MO.isReg() && MO.getReg() == ARM::CPSR) {
386      Pred.push_back(MO);
387      Found = true;
388    }
389  }
390
391  return Found;
392}
393
394
395/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
396static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
397                                unsigned JTI) DISABLE_INLINE;
398static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
399                                unsigned JTI) {
400  return JT[JTI].MBBs.size();
401}
402
403/// GetInstSize - Return the size of the specified MachineInstr.
404///
405unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
406  const MachineBasicBlock &MBB = *MI->getParent();
407  const MachineFunction *MF = MBB.getParent();
408  const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
409
410  // Basic size info comes from the TSFlags field.
411  const TargetInstrDesc &TID = MI->getDesc();
412  unsigned TSFlags = TID.TSFlags;
413
414  unsigned Opc = MI->getOpcode();
415  switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
416  default: {
417    // If this machine instr is an inline asm, measure it.
418    if (MI->getOpcode() == ARM::INLINEASM)
419      return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *TAI);
420    if (MI->isLabel())
421      return 0;
422    switch (Opc) {
423    default:
424      llvm_unreachable("Unknown or unset size field for instr!");
425    case TargetInstrInfo::IMPLICIT_DEF:
426    case TargetInstrInfo::DECLARE:
427    case TargetInstrInfo::DBG_LABEL:
428    case TargetInstrInfo::EH_LABEL:
429      return 0;
430    }
431    break;
432  }
433  case ARMII::Size8Bytes: return 8;          // ARM instruction x 2.
434  case ARMII::Size4Bytes: return 4;          // ARM / Thumb2 instruction.
435  case ARMII::Size2Bytes: return 2;          // Thumb1 instruction.
436  case ARMII::SizeSpecial: {
437    switch (Opc) {
438    case ARM::CONSTPOOL_ENTRY:
439      // If this machine instr is a constant pool entry, its size is recorded as
440      // operand #2.
441      return MI->getOperand(2).getImm();
442    case ARM::Int_eh_sjlj_setjmp:
443      return 24;
444    case ARM::BR_JTr:
445    case ARM::BR_JTm:
446    case ARM::BR_JTadd:
447    case ARM::tBR_JTr:
448    case ARM::t2BR_JT:
449    case ARM::t2TBB:
450    case ARM::t2TBH: {
451      // These are jumptable branches, i.e. a branch followed by an inlined
452      // jumptable. The size is 4 + 4 * number of entries. For TBB, each
453      // entry is one byte; TBH two byte each.
454      unsigned EntrySize = (Opc == ARM::t2TBB)
455        ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
456      unsigned NumOps = TID.getNumOperands();
457      MachineOperand JTOP =
458        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
459      unsigned JTI = JTOP.getIndex();
460      const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
461      const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
462      assert(JTI < JT.size());
463      // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
464      // 4 aligned. The assembler / linker may add 2 byte padding just before
465      // the JT entries.  The size does not include this padding; the
466      // constant islands pass does separate bookkeeping for it.
467      // FIXME: If we know the size of the function is less than (1 << 16) *2
468      // bytes, we can use 16-bit entries instead. Then there won't be an
469      // alignment issue.
470      unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
471      unsigned NumEntries = getNumJTEntries(JT, JTI);
472      if (Opc == ARM::t2TBB && (NumEntries & 1))
473        // Make sure the instruction that follows TBB is 2-byte aligned.
474        // FIXME: Constant island pass should insert an "ALIGN" instruction
475        // instead.
476        ++NumEntries;
477      return NumEntries * EntrySize + InstSize;
478    }
479    default:
480      // Otherwise, pseudo-instruction sizes are zero.
481      return 0;
482    }
483  }
484  }
485  return 0; // Not reached
486}
487
488/// Return true if the instruction is a register to register move and
489/// leave the source and dest operands in the passed parameters.
490///
491bool
492ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
493                              unsigned &SrcReg, unsigned &DstReg,
494                              unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
495  SrcSubIdx = DstSubIdx = 0; // No sub-registers.
496
497  switch (MI.getOpcode()) {
498  default: break;
499  case ARM::FCPYS:
500  case ARM::FCPYD:
501  case ARM::VMOVD:
502  case  ARM::VMOVQ: {
503    SrcReg = MI.getOperand(1).getReg();
504    DstReg = MI.getOperand(0).getReg();
505    return true;
506  }
507  case ARM::MOVr:
508  case ARM::tMOVr:
509  case ARM::tMOVgpr2tgpr:
510  case ARM::tMOVtgpr2gpr:
511  case ARM::tMOVgpr2gpr:
512  case ARM::t2MOVr: {
513    assert(MI.getDesc().getNumOperands() >= 2 &&
514           MI.getOperand(0).isReg() &&
515           MI.getOperand(1).isReg() &&
516           "Invalid ARM MOV instruction");
517    SrcReg = MI.getOperand(1).getReg();
518    DstReg = MI.getOperand(0).getReg();
519    return true;
520  }
521  }
522
523  return false;
524}
525
526unsigned
527ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
528                                      int &FrameIndex) const {
529  switch (MI->getOpcode()) {
530  default: break;
531  case ARM::LDR:
532  case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
533    if (MI->getOperand(1).isFI() &&
534        MI->getOperand(2).isReg() &&
535        MI->getOperand(3).isImm() &&
536        MI->getOperand(2).getReg() == 0 &&
537        MI->getOperand(3).getImm() == 0) {
538      FrameIndex = MI->getOperand(1).getIndex();
539      return MI->getOperand(0).getReg();
540    }
541    break;
542  case ARM::t2LDRi12:
543  case ARM::tRestore:
544    if (MI->getOperand(1).isFI() &&
545        MI->getOperand(2).isImm() &&
546        MI->getOperand(2).getImm() == 0) {
547      FrameIndex = MI->getOperand(1).getIndex();
548      return MI->getOperand(0).getReg();
549    }
550    break;
551  case ARM::FLDD:
552  case ARM::FLDS:
553    if (MI->getOperand(1).isFI() &&
554        MI->getOperand(2).isImm() &&
555        MI->getOperand(2).getImm() == 0) {
556      FrameIndex = MI->getOperand(1).getIndex();
557      return MI->getOperand(0).getReg();
558    }
559    break;
560  }
561
562  return 0;
563}
564
565unsigned
566ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
567                                     int &FrameIndex) const {
568  switch (MI->getOpcode()) {
569  default: break;
570  case ARM::STR:
571  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
572    if (MI->getOperand(1).isFI() &&
573        MI->getOperand(2).isReg() &&
574        MI->getOperand(3).isImm() &&
575        MI->getOperand(2).getReg() == 0 &&
576        MI->getOperand(3).getImm() == 0) {
577      FrameIndex = MI->getOperand(1).getIndex();
578      return MI->getOperand(0).getReg();
579    }
580    break;
581  case ARM::t2STRi12:
582  case ARM::tSpill:
583    if (MI->getOperand(1).isFI() &&
584        MI->getOperand(2).isImm() &&
585        MI->getOperand(2).getImm() == 0) {
586      FrameIndex = MI->getOperand(1).getIndex();
587      return MI->getOperand(0).getReg();
588    }
589    break;
590  case ARM::FSTD:
591  case ARM::FSTS:
592    if (MI->getOperand(1).isFI() &&
593        MI->getOperand(2).isImm() &&
594        MI->getOperand(2).getImm() == 0) {
595      FrameIndex = MI->getOperand(1).getIndex();
596      return MI->getOperand(0).getReg();
597    }
598    break;
599  }
600
601  return 0;
602}
603
604bool
605ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
606                               MachineBasicBlock::iterator I,
607                               unsigned DestReg, unsigned SrcReg,
608                               const TargetRegisterClass *DestRC,
609                               const TargetRegisterClass *SrcRC) const {
610  DebugLoc DL = DebugLoc::getUnknownLoc();
611  if (I != MBB.end()) DL = I->getDebugLoc();
612
613  if (DestRC != SrcRC) {
614    if (((DestRC == ARM::DPRRegisterClass) &&
615         (SrcRC == ARM::DPR_VFP2RegisterClass)) ||
616        ((SrcRC == ARM::DPRRegisterClass) &&
617         (DestRC == ARM::DPR_VFP2RegisterClass))) {
618      // Allow copy between DPR and DPR_VFP2.
619    } else {
620      return false;
621    }
622  }
623
624  if (DestRC == ARM::GPRRegisterClass) {
625    AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
626                                        DestReg).addReg(SrcReg)));
627  } else if (DestRC == ARM::SPRRegisterClass) {
628    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYS), DestReg)
629                   .addReg(SrcReg));
630  } else if ((DestRC == ARM::DPRRegisterClass) ||
631             (DestRC == ARM::DPR_VFP2RegisterClass)) {
632    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYD), DestReg)
633                   .addReg(SrcReg));
634  } else if (DestRC == ARM::QPRRegisterClass) {
635    BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg);
636  } else {
637    return false;
638  }
639
640  return true;
641}
642
643void ARMBaseInstrInfo::
644storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
645                    unsigned SrcReg, bool isKill, int FI,
646                    const TargetRegisterClass *RC) const {
647  DebugLoc DL = DebugLoc::getUnknownLoc();
648  if (I != MBB.end()) DL = I->getDebugLoc();
649
650  if (RC == ARM::GPRRegisterClass) {
651    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
652                   .addReg(SrcReg, getKillRegState(isKill))
653                   .addFrameIndex(FI).addReg(0).addImm(0));
654  } else if (RC == ARM::DPRRegisterClass || RC == ARM::DPR_VFP2RegisterClass) {
655    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTD))
656                   .addReg(SrcReg, getKillRegState(isKill))
657                   .addFrameIndex(FI).addImm(0));
658  } else if (RC == ARM::SPRRegisterClass) {
659    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS))
660                   .addReg(SrcReg, getKillRegState(isKill))
661                   .addFrameIndex(FI).addImm(0));
662  } else {
663    assert(RC == ARM::QPRRegisterClass && "Unknown regclass!");
664    // FIXME: Neon instructions should support predicates
665    BuildMI(MBB, I, DL, get(ARM::VSTRQ)).addReg(SrcReg, getKillRegState(isKill))
666      .addFrameIndex(FI).addImm(0);
667  }
668}
669
670void ARMBaseInstrInfo::
671loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
672                     unsigned DestReg, int FI,
673                     const TargetRegisterClass *RC) const {
674  DebugLoc DL = DebugLoc::getUnknownLoc();
675  if (I != MBB.end()) DL = I->getDebugLoc();
676
677  if (RC == ARM::GPRRegisterClass) {
678    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
679                   .addFrameIndex(FI).addReg(0).addImm(0));
680  } else if (RC == ARM::DPRRegisterClass || RC == ARM::DPR_VFP2RegisterClass) {
681    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg)
682                   .addFrameIndex(FI).addImm(0));
683  } else if (RC == ARM::SPRRegisterClass) {
684    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg)
685                   .addFrameIndex(FI).addImm(0));
686  } else {
687    assert(RC == ARM::QPRRegisterClass && "Unknown regclass!");
688    // FIXME: Neon instructions should support predicates
689    BuildMI(MBB, I, DL, get(ARM::VLDRQ), DestReg).addFrameIndex(FI).addImm(0);
690  }
691}
692
693MachineInstr *ARMBaseInstrInfo::
694foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
695                      const SmallVectorImpl<unsigned> &Ops, int FI) const {
696  if (Ops.size() != 1) return NULL;
697
698  unsigned OpNum = Ops[0];
699  unsigned Opc = MI->getOpcode();
700  MachineInstr *NewMI = NULL;
701  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
702    // If it is updating CPSR, then it cannot be folded.
703    if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
704      return NULL;
705    unsigned Pred = MI->getOperand(2).getImm();
706    unsigned PredReg = MI->getOperand(3).getReg();
707    if (OpNum == 0) { // move -> store
708      unsigned SrcReg = MI->getOperand(1).getReg();
709      bool isKill = MI->getOperand(1).isKill();
710      bool isUndef = MI->getOperand(1).isUndef();
711      if (Opc == ARM::MOVr)
712        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
713          .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
714          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
715      else // ARM::t2MOVr
716        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
717          .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
718          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
719    } else {          // move -> load
720      unsigned DstReg = MI->getOperand(0).getReg();
721      bool isDead = MI->getOperand(0).isDead();
722      bool isUndef = MI->getOperand(0).isUndef();
723      if (Opc == ARM::MOVr)
724        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
725          .addReg(DstReg,
726                  RegState::Define |
727                  getDeadRegState(isDead) |
728                  getUndefRegState(isUndef))
729          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
730      else // ARM::t2MOVr
731        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
732          .addReg(DstReg,
733                  RegState::Define |
734                  getDeadRegState(isDead) |
735                  getUndefRegState(isUndef))
736          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
737    }
738  } else if (Opc == ARM::tMOVgpr2gpr ||
739             Opc == ARM::tMOVtgpr2gpr ||
740             Opc == ARM::tMOVgpr2tgpr) {
741    if (OpNum == 0) { // move -> store
742      unsigned SrcReg = MI->getOperand(1).getReg();
743      bool isKill = MI->getOperand(1).isKill();
744      bool isUndef = MI->getOperand(1).isUndef();
745      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
746        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
747        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
748    } else {          // move -> load
749      unsigned DstReg = MI->getOperand(0).getReg();
750      bool isDead = MI->getOperand(0).isDead();
751      bool isUndef = MI->getOperand(0).isUndef();
752      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
753        .addReg(DstReg,
754                RegState::Define |
755                getDeadRegState(isDead) |
756                getUndefRegState(isUndef))
757        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
758    }
759  } else if (Opc == ARM::FCPYS) {
760    unsigned Pred = MI->getOperand(2).getImm();
761    unsigned PredReg = MI->getOperand(3).getReg();
762    if (OpNum == 0) { // move -> store
763      unsigned SrcReg = MI->getOperand(1).getReg();
764      bool isKill = MI->getOperand(1).isKill();
765      bool isUndef = MI->getOperand(1).isUndef();
766      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS))
767        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
768        .addFrameIndex(FI)
769        .addImm(0).addImm(Pred).addReg(PredReg);
770    } else {          // move -> load
771      unsigned DstReg = MI->getOperand(0).getReg();
772      bool isDead = MI->getOperand(0).isDead();
773      bool isUndef = MI->getOperand(0).isUndef();
774      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS))
775        .addReg(DstReg,
776                RegState::Define |
777                getDeadRegState(isDead) |
778                getUndefRegState(isUndef))
779        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
780    }
781  }
782  else if (Opc == ARM::FCPYD) {
783    unsigned Pred = MI->getOperand(2).getImm();
784    unsigned PredReg = MI->getOperand(3).getReg();
785    if (OpNum == 0) { // move -> store
786      unsigned SrcReg = MI->getOperand(1).getReg();
787      bool isKill = MI->getOperand(1).isKill();
788      bool isUndef = MI->getOperand(1).isUndef();
789      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD))
790        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
791        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
792    } else {          // move -> load
793      unsigned DstReg = MI->getOperand(0).getReg();
794      bool isDead = MI->getOperand(0).isDead();
795      bool isUndef = MI->getOperand(0).isUndef();
796      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD))
797        .addReg(DstReg,
798                RegState::Define |
799                getDeadRegState(isDead) |
800                getUndefRegState(isUndef))
801        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
802    }
803  }
804
805  return NewMI;
806}
807
808MachineInstr*
809ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
810                                        MachineInstr* MI,
811                                        const SmallVectorImpl<unsigned> &Ops,
812                                        MachineInstr* LoadMI) const {
813  // FIXME
814  return 0;
815}
816
817bool
818ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
819                                   const SmallVectorImpl<unsigned> &Ops) const {
820  if (Ops.size() != 1) return false;
821
822  unsigned Opc = MI->getOpcode();
823  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
824    // If it is updating CPSR, then it cannot be folded.
825    return MI->getOperand(4).getReg() != ARM::CPSR ||
826      MI->getOperand(4).isDead();
827  } else if (Opc == ARM::tMOVgpr2gpr ||
828             Opc == ARM::tMOVtgpr2gpr ||
829             Opc == ARM::tMOVgpr2tgpr) {
830    return true;
831  } else if (Opc == ARM::FCPYS || Opc == ARM::FCPYD) {
832    return true;
833  } else if (Opc == ARM::VMOVD || Opc == ARM::VMOVQ) {
834    return false; // FIXME
835  }
836
837  return false;
838}
839
840/// getInstrPredicate - If instruction is predicated, returns its predicate
841/// condition, otherwise returns AL. It also returns the condition code
842/// register by reference.
843ARMCC::CondCodes llvm::getInstrPredicate(MachineInstr *MI, unsigned &PredReg) {
844  int PIdx = MI->findFirstPredOperandIdx();
845  if (PIdx == -1) {
846    PredReg = 0;
847    return ARMCC::AL;
848  }
849
850  PredReg = MI->getOperand(PIdx+1).getReg();
851  return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
852}
853
854
855int llvm::getMatchingCondBranchOpcode(int Opc) {
856  if (Opc == ARM::B)
857    return ARM::Bcc;
858  else if (Opc == ARM::tB)
859    return ARM::tBcc;
860  else if (Opc == ARM::t2B)
861      return ARM::t2Bcc;
862
863  llvm_unreachable("Unknown unconditional branch opcode!");
864  return 0;
865}
866
867
868void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
869                               MachineBasicBlock::iterator &MBBI, DebugLoc dl,
870                               unsigned DestReg, unsigned BaseReg, int NumBytes,
871                               ARMCC::CondCodes Pred, unsigned PredReg,
872                               const ARMBaseInstrInfo &TII) {
873  bool isSub = NumBytes < 0;
874  if (isSub) NumBytes = -NumBytes;
875
876  while (NumBytes) {
877    unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
878    unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
879    assert(ThisVal && "Didn't extract field correctly");
880
881    // We will handle these bits from offset, clear them.
882    NumBytes &= ~ThisVal;
883
884    assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
885
886    // Build the new ADD / SUB.
887    unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
888    BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
889      .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
890      .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
891    BaseReg = DestReg;
892  }
893}
894
895int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
896                               unsigned FrameReg, int Offset,
897                               const ARMBaseInstrInfo &TII) {
898  unsigned Opcode = MI.getOpcode();
899  const TargetInstrDesc &Desc = MI.getDesc();
900  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
901  bool isSub = false;
902
903  // Memory operands in inline assembly always use AddrMode2.
904  if (Opcode == ARM::INLINEASM)
905    AddrMode = ARMII::AddrMode2;
906
907  if (Opcode == ARM::ADDri) {
908    Offset += MI.getOperand(FrameRegIdx+1).getImm();
909    if (Offset == 0) {
910      // Turn it into a move.
911      MI.setDesc(TII.get(ARM::MOVr));
912      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
913      MI.RemoveOperand(FrameRegIdx+1);
914      return 0;
915    } else if (Offset < 0) {
916      Offset = -Offset;
917      isSub = true;
918      MI.setDesc(TII.get(ARM::SUBri));
919    }
920
921    // Common case: small offset, fits into instruction.
922    if (ARM_AM::getSOImmVal(Offset) != -1) {
923      // Replace the FrameIndex with sp / fp
924      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
925      MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
926      return 0;
927    }
928
929    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
930    // as possible.
931    unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
932    unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
933
934    // We will handle these bits from offset, clear them.
935    Offset &= ~ThisImmVal;
936
937    // Get the properly encoded SOImmVal field.
938    assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
939           "Bit extraction didn't work?");
940    MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
941 } else {
942    unsigned ImmIdx = 0;
943    int InstrOffs = 0;
944    unsigned NumBits = 0;
945    unsigned Scale = 1;
946    switch (AddrMode) {
947    case ARMII::AddrMode2: {
948      ImmIdx = FrameRegIdx+2;
949      InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
950      if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
951        InstrOffs *= -1;
952      NumBits = 12;
953      break;
954    }
955    case ARMII::AddrMode3: {
956      ImmIdx = FrameRegIdx+2;
957      InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
958      if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
959        InstrOffs *= -1;
960      NumBits = 8;
961      break;
962    }
963    case ARMII::AddrMode4:
964     break;
965    case ARMII::AddrMode5: {
966      ImmIdx = FrameRegIdx+1;
967      InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
968      if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
969        InstrOffs *= -1;
970      NumBits = 8;
971      Scale = 4;
972      break;
973    }
974    default:
975      llvm_unreachable("Unsupported addressing mode!");
976      break;
977    }
978
979    Offset += InstrOffs * Scale;
980    assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
981    if (Offset < 0) {
982      Offset = -Offset;
983      isSub = true;
984    }
985
986    // Attempt to fold address comp. if opcode has offset bits
987    if (NumBits > 0) {
988      // Common case: small offset, fits into instruction.
989      MachineOperand &ImmOp = MI.getOperand(ImmIdx);
990      int ImmedOffset = Offset / Scale;
991      unsigned Mask = (1 << NumBits) - 1;
992      if ((unsigned)Offset <= Mask * Scale) {
993        // Replace the FrameIndex with sp
994        MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
995        if (isSub)
996          ImmedOffset |= 1 << NumBits;
997        ImmOp.ChangeToImmediate(ImmedOffset);
998        return 0;
999      }
1000
1001      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1002      ImmedOffset = ImmedOffset & Mask;
1003      if (isSub)
1004        ImmedOffset |= 1 << NumBits;
1005      ImmOp.ChangeToImmediate(ImmedOffset);
1006      Offset &= ~(Mask*Scale);
1007    }
1008  }
1009
1010  return (isSub) ? -Offset : Offset;
1011}
1012