ARMBaseInstrInfo.cpp revision 25f7cfc3cccba6f569f29f79ea533bae960b93c0
1//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMGenInstrInfo.inc"
18#include "ARMMachineFunctionInfo.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/CodeGen/LiveVariables.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/Target/TargetAsmInfo.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/ErrorHandling.h"
27using namespace llvm;
28
29static cl::opt<bool>
30EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
31               cl::desc("Enable ARM 2-addr to 3-addr conv"));
32
33ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &sti)
34  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
35}
36
37MachineInstr *
38ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
39                                        MachineBasicBlock::iterator &MBBI,
40                                        LiveVariables *LV) const {
41  // FIXME: Thumb2 support.
42
43  if (!EnableARM3Addr)
44    return NULL;
45
46  MachineInstr *MI = MBBI;
47  MachineFunction &MF = *MI->getParent()->getParent();
48  unsigned TSFlags = MI->getDesc().TSFlags;
49  bool isPre = false;
50  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
51  default: return NULL;
52  case ARMII::IndexModePre:
53    isPre = true;
54    break;
55  case ARMII::IndexModePost:
56    break;
57  }
58
59  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
60  // operation.
61  unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
62  if (MemOpc == 0)
63    return NULL;
64
65  MachineInstr *UpdateMI = NULL;
66  MachineInstr *MemMI = NULL;
67  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
68  const TargetInstrDesc &TID = MI->getDesc();
69  unsigned NumOps = TID.getNumOperands();
70  bool isLoad = !TID.mayStore();
71  const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
72  const MachineOperand &Base = MI->getOperand(2);
73  const MachineOperand &Offset = MI->getOperand(NumOps-3);
74  unsigned WBReg = WB.getReg();
75  unsigned BaseReg = Base.getReg();
76  unsigned OffReg = Offset.getReg();
77  unsigned OffImm = MI->getOperand(NumOps-2).getImm();
78  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
79  switch (AddrMode) {
80  default:
81    assert(false && "Unknown indexed op!");
82    return NULL;
83  case ARMII::AddrMode2: {
84    bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
85    unsigned Amt = ARM_AM::getAM2Offset(OffImm);
86    if (OffReg == 0) {
87      if (ARM_AM::getSOImmVal(Amt) == -1)
88        // Can't encode it in a so_imm operand. This transformation will
89        // add more than 1 instruction. Abandon!
90        return NULL;
91      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
92                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
93        .addReg(BaseReg).addImm(Amt)
94        .addImm(Pred).addReg(0).addReg(0);
95    } else if (Amt != 0) {
96      ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
97      unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
98      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
99                         get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
100        .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
101        .addImm(Pred).addReg(0).addReg(0);
102    } else
103      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
104                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
105        .addReg(BaseReg).addReg(OffReg)
106        .addImm(Pred).addReg(0).addReg(0);
107    break;
108  }
109  case ARMII::AddrMode3 : {
110    bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
111    unsigned Amt = ARM_AM::getAM3Offset(OffImm);
112    if (OffReg == 0)
113      // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
114      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
115                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
116        .addReg(BaseReg).addImm(Amt)
117        .addImm(Pred).addReg(0).addReg(0);
118    else
119      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
120                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
121        .addReg(BaseReg).addReg(OffReg)
122        .addImm(Pred).addReg(0).addReg(0);
123    break;
124  }
125  }
126
127  std::vector<MachineInstr*> NewMIs;
128  if (isPre) {
129    if (isLoad)
130      MemMI = BuildMI(MF, MI->getDebugLoc(),
131                      get(MemOpc), MI->getOperand(0).getReg())
132        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
133    else
134      MemMI = BuildMI(MF, MI->getDebugLoc(),
135                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
136        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
137    NewMIs.push_back(MemMI);
138    NewMIs.push_back(UpdateMI);
139  } else {
140    if (isLoad)
141      MemMI = BuildMI(MF, MI->getDebugLoc(),
142                      get(MemOpc), MI->getOperand(0).getReg())
143        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
144    else
145      MemMI = BuildMI(MF, MI->getDebugLoc(),
146                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
147        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
148    if (WB.isDead())
149      UpdateMI->getOperand(0).setIsDead();
150    NewMIs.push_back(UpdateMI);
151    NewMIs.push_back(MemMI);
152  }
153
154  // Transfer LiveVariables states, kill / dead info.
155  if (LV) {
156    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
157      MachineOperand &MO = MI->getOperand(i);
158      if (MO.isReg() && MO.getReg() &&
159          TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
160        unsigned Reg = MO.getReg();
161
162        LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
163        if (MO.isDef()) {
164          MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
165          if (MO.isDead())
166            LV->addVirtualRegisterDead(Reg, NewMI);
167        }
168        if (MO.isUse() && MO.isKill()) {
169          for (unsigned j = 0; j < 2; ++j) {
170            // Look at the two new MI's in reverse order.
171            MachineInstr *NewMI = NewMIs[j];
172            if (!NewMI->readsRegister(Reg))
173              continue;
174            LV->addVirtualRegisterKilled(Reg, NewMI);
175            if (VI.removeKill(MI))
176              VI.Kills.push_back(NewMI);
177            break;
178          }
179        }
180      }
181    }
182  }
183
184  MFI->insert(MBBI, NewMIs[1]);
185  MFI->insert(MBBI, NewMIs[0]);
186  return NewMIs[0];
187}
188
189// Branch analysis.
190bool
191ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
192                                MachineBasicBlock *&FBB,
193                                SmallVectorImpl<MachineOperand> &Cond,
194                                bool AllowModify) const {
195  // If the block has no terminators, it just falls into the block after it.
196  MachineBasicBlock::iterator I = MBB.end();
197  if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
198    return false;
199
200  // Get the last instruction in the block.
201  MachineInstr *LastInst = I;
202
203  // If there is only one terminator instruction, process it.
204  unsigned LastOpc = LastInst->getOpcode();
205  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
206    if (isUncondBranchOpcode(LastOpc)) {
207      TBB = LastInst->getOperand(0).getMBB();
208      return false;
209    }
210    if (isCondBranchOpcode(LastOpc)) {
211      // Block ends with fall-through condbranch.
212      TBB = LastInst->getOperand(0).getMBB();
213      Cond.push_back(LastInst->getOperand(1));
214      Cond.push_back(LastInst->getOperand(2));
215      return false;
216    }
217    return true;  // Can't handle indirect branch.
218  }
219
220  // Get the instruction before it if it is a terminator.
221  MachineInstr *SecondLastInst = I;
222
223  // If there are three terminators, we don't know what sort of block this is.
224  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
225    return true;
226
227  // If the block ends with a B and a Bcc, handle it.
228  unsigned SecondLastOpc = SecondLastInst->getOpcode();
229  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
230    TBB =  SecondLastInst->getOperand(0).getMBB();
231    Cond.push_back(SecondLastInst->getOperand(1));
232    Cond.push_back(SecondLastInst->getOperand(2));
233    FBB = LastInst->getOperand(0).getMBB();
234    return false;
235  }
236
237  // If the block ends with two unconditional branches, handle it.  The second
238  // one is not executed, so remove it.
239  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
240    TBB = SecondLastInst->getOperand(0).getMBB();
241    I = LastInst;
242    if (AllowModify)
243      I->eraseFromParent();
244    return false;
245  }
246
247  // ...likewise if it ends with a branch table followed by an unconditional
248  // branch. The branch folder can create these, and we must get rid of them for
249  // correctness of Thumb constant islands.
250  if (isJumpTableBranchOpcode(SecondLastOpc) &&
251      isUncondBranchOpcode(LastOpc)) {
252    I = LastInst;
253    if (AllowModify)
254      I->eraseFromParent();
255    return true;
256  }
257
258  // Otherwise, can't handle this.
259  return true;
260}
261
262
263unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
264  MachineBasicBlock::iterator I = MBB.end();
265  if (I == MBB.begin()) return 0;
266  --I;
267  if (!isUncondBranchOpcode(I->getOpcode()) &&
268      !isCondBranchOpcode(I->getOpcode()))
269    return 0;
270
271  // Remove the branch.
272  I->eraseFromParent();
273
274  I = MBB.end();
275
276  if (I == MBB.begin()) return 1;
277  --I;
278  if (!isCondBranchOpcode(I->getOpcode()))
279    return 1;
280
281  // Remove the branch.
282  I->eraseFromParent();
283  return 2;
284}
285
286unsigned
287ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
288                               MachineBasicBlock *FBB,
289                             const SmallVectorImpl<MachineOperand> &Cond) const {
290  // FIXME this should probably have a DebugLoc argument
291  DebugLoc dl = DebugLoc::getUnknownLoc();
292
293  ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
294  int BOpc   = !AFI->isThumbFunction()
295    ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
296  int BccOpc = !AFI->isThumbFunction()
297    ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
298
299  // Shouldn't be a fall through.
300  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
301  assert((Cond.size() == 2 || Cond.size() == 0) &&
302         "ARM branch conditions have two components!");
303
304  if (FBB == 0) {
305    if (Cond.empty()) // Unconditional branch?
306      BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
307    else
308      BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
309        .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
310    return 1;
311  }
312
313  // Two-way conditional branch.
314  BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
315    .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
316  BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
317  return 2;
318}
319
320bool ARMBaseInstrInfo::
321ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
322  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
323  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
324  return false;
325}
326
327bool ARMBaseInstrInfo::
328PredicateInstruction(MachineInstr *MI,
329                     const SmallVectorImpl<MachineOperand> &Pred) const {
330  unsigned Opc = MI->getOpcode();
331  if (isUncondBranchOpcode(Opc)) {
332    MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
333    MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
334    MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
335    return true;
336  }
337
338  int PIdx = MI->findFirstPredOperandIdx();
339  if (PIdx != -1) {
340    MachineOperand &PMO = MI->getOperand(PIdx);
341    PMO.setImm(Pred[0].getImm());
342    MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
343    return true;
344  }
345  return false;
346}
347
348bool ARMBaseInstrInfo::
349SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
350                  const SmallVectorImpl<MachineOperand> &Pred2) const {
351  if (Pred1.size() > 2 || Pred2.size() > 2)
352    return false;
353
354  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
355  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
356  if (CC1 == CC2)
357    return true;
358
359  switch (CC1) {
360  default:
361    return false;
362  case ARMCC::AL:
363    return true;
364  case ARMCC::HS:
365    return CC2 == ARMCC::HI;
366  case ARMCC::LS:
367    return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
368  case ARMCC::GE:
369    return CC2 == ARMCC::GT;
370  case ARMCC::LE:
371    return CC2 == ARMCC::LT;
372  }
373}
374
375bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
376                                    std::vector<MachineOperand> &Pred) const {
377  const TargetInstrDesc &TID = MI->getDesc();
378  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
379    return false;
380
381  bool Found = false;
382  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
383    const MachineOperand &MO = MI->getOperand(i);
384    if (MO.isReg() && MO.getReg() == ARM::CPSR) {
385      Pred.push_back(MO);
386      Found = true;
387    }
388  }
389
390  return Found;
391}
392
393
394/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
395static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
396                                unsigned JTI) DISABLE_INLINE;
397static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
398                                unsigned JTI) {
399  return JT[JTI].MBBs.size();
400}
401
402/// GetInstSize - Return the size of the specified MachineInstr.
403///
404unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
405  const MachineBasicBlock &MBB = *MI->getParent();
406  const MachineFunction *MF = MBB.getParent();
407  const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
408
409  // Basic size info comes from the TSFlags field.
410  const TargetInstrDesc &TID = MI->getDesc();
411  unsigned TSFlags = TID.TSFlags;
412
413  unsigned Opc = MI->getOpcode();
414  switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
415  default: {
416    // If this machine instr is an inline asm, measure it.
417    if (MI->getOpcode() == ARM::INLINEASM)
418      return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
419    if (MI->isLabel())
420      return 0;
421    switch (Opc) {
422    default:
423      llvm_unreachable("Unknown or unset size field for instr!");
424    case TargetInstrInfo::IMPLICIT_DEF:
425    case TargetInstrInfo::DECLARE:
426    case TargetInstrInfo::DBG_LABEL:
427    case TargetInstrInfo::EH_LABEL:
428      return 0;
429    }
430    break;
431  }
432  case ARMII::Size8Bytes: return 8;          // ARM instruction x 2.
433  case ARMII::Size4Bytes: return 4;          // ARM / Thumb2 instruction.
434  case ARMII::Size2Bytes: return 2;          // Thumb1 instruction.
435  case ARMII::SizeSpecial: {
436    switch (Opc) {
437    case ARM::CONSTPOOL_ENTRY:
438      // If this machine instr is a constant pool entry, its size is recorded as
439      // operand #2.
440      return MI->getOperand(2).getImm();
441    case ARM::Int_eh_sjlj_setjmp:
442      return 12;
443    case ARM::BR_JTr:
444    case ARM::BR_JTm:
445    case ARM::BR_JTadd:
446    case ARM::tBR_JTr:
447    case ARM::t2BR_JT:
448    case ARM::t2TBB:
449    case ARM::t2TBH: {
450      // These are jumptable branches, i.e. a branch followed by an inlined
451      // jumptable. The size is 4 + 4 * number of entries. For TBB, each
452      // entry is one byte; TBH two byte each.
453      unsigned EntrySize = (Opc == ARM::t2TBB)
454        ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
455      unsigned NumOps = TID.getNumOperands();
456      MachineOperand JTOP =
457        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
458      unsigned JTI = JTOP.getIndex();
459      const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
460      const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
461      assert(JTI < JT.size());
462      // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
463      // 4 aligned. The assembler / linker may add 2 byte padding just before
464      // the JT entries.  The size does not include this padding; the
465      // constant islands pass does separate bookkeeping for it.
466      // FIXME: If we know the size of the function is less than (1 << 16) *2
467      // bytes, we can use 16-bit entries instead. Then there won't be an
468      // alignment issue.
469      unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
470      unsigned NumEntries = getNumJTEntries(JT, JTI);
471      if (Opc == ARM::t2TBB && (NumEntries & 1))
472        // Make sure the instruction that follows TBB is 2-byte aligned.
473        // FIXME: Constant island pass should insert an "ALIGN" instruction
474        // instead.
475        ++NumEntries;
476      return NumEntries * EntrySize + InstSize;
477    }
478    default:
479      // Otherwise, pseudo-instruction sizes are zero.
480      return 0;
481    }
482  }
483  }
484  return 0; // Not reached
485}
486
487/// Return true if the instruction is a register to register move and
488/// leave the source and dest operands in the passed parameters.
489///
490bool
491ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
492                              unsigned &SrcReg, unsigned &DstReg,
493                              unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
494  SrcSubIdx = DstSubIdx = 0; // No sub-registers.
495
496  switch (MI.getOpcode()) {
497  default: break;
498  case ARM::FCPYS:
499  case ARM::FCPYD:
500  case ARM::VMOVD:
501  case  ARM::VMOVQ: {
502    SrcReg = MI.getOperand(1).getReg();
503    DstReg = MI.getOperand(0).getReg();
504    return true;
505  }
506  case ARM::MOVr:
507  case ARM::tMOVr:
508  case ARM::tMOVgpr2tgpr:
509  case ARM::tMOVtgpr2gpr:
510  case ARM::tMOVgpr2gpr:
511  case ARM::t2MOVr: {
512    assert(MI.getDesc().getNumOperands() >= 2 &&
513           MI.getOperand(0).isReg() &&
514           MI.getOperand(1).isReg() &&
515           "Invalid ARM MOV instruction");
516    SrcReg = MI.getOperand(1).getReg();
517    DstReg = MI.getOperand(0).getReg();
518    return true;
519  }
520  }
521
522  return false;
523}
524
525unsigned
526ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
527                                      int &FrameIndex) const {
528  switch (MI->getOpcode()) {
529  default: break;
530  case ARM::LDR:
531  case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
532    if (MI->getOperand(1).isFI() &&
533        MI->getOperand(2).isReg() &&
534        MI->getOperand(3).isImm() &&
535        MI->getOperand(2).getReg() == 0 &&
536        MI->getOperand(3).getImm() == 0) {
537      FrameIndex = MI->getOperand(1).getIndex();
538      return MI->getOperand(0).getReg();
539    }
540    break;
541  case ARM::t2LDRi12:
542  case ARM::tRestore:
543    if (MI->getOperand(1).isFI() &&
544        MI->getOperand(2).isImm() &&
545        MI->getOperand(2).getImm() == 0) {
546      FrameIndex = MI->getOperand(1).getIndex();
547      return MI->getOperand(0).getReg();
548    }
549    break;
550  case ARM::FLDD:
551  case ARM::FLDS:
552    if (MI->getOperand(1).isFI() &&
553        MI->getOperand(2).isImm() &&
554        MI->getOperand(2).getImm() == 0) {
555      FrameIndex = MI->getOperand(1).getIndex();
556      return MI->getOperand(0).getReg();
557    }
558    break;
559  }
560
561  return 0;
562}
563
564unsigned
565ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
566                                     int &FrameIndex) const {
567  switch (MI->getOpcode()) {
568  default: break;
569  case ARM::STR:
570  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
571    if (MI->getOperand(1).isFI() &&
572        MI->getOperand(2).isReg() &&
573        MI->getOperand(3).isImm() &&
574        MI->getOperand(2).getReg() == 0 &&
575        MI->getOperand(3).getImm() == 0) {
576      FrameIndex = MI->getOperand(1).getIndex();
577      return MI->getOperand(0).getReg();
578    }
579    break;
580  case ARM::t2STRi12:
581  case ARM::tSpill:
582    if (MI->getOperand(1).isFI() &&
583        MI->getOperand(2).isImm() &&
584        MI->getOperand(2).getImm() == 0) {
585      FrameIndex = MI->getOperand(1).getIndex();
586      return MI->getOperand(0).getReg();
587    }
588    break;
589  case ARM::FSTD:
590  case  ARM::FSTS:
591    if (MI->getOperand(1).isFI() &&
592        MI->getOperand(2).isImm() &&
593        MI->getOperand(2).getImm() == 0) {
594      FrameIndex = MI->getOperand(1).getIndex();
595      return MI->getOperand(0).getReg();
596    }
597    break;
598  }
599
600  return 0;
601}
602
603bool
604ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
605                               MachineBasicBlock::iterator I,
606                               unsigned DestReg, unsigned SrcReg,
607                               const TargetRegisterClass *DestRC,
608                               const TargetRegisterClass *SrcRC) const {
609  DebugLoc DL = DebugLoc::getUnknownLoc();
610  if (I != MBB.end()) DL = I->getDebugLoc();
611
612  if (DestRC != SrcRC) {
613    // Not yet supported!
614    return false;
615  }
616
617  if (DestRC == ARM::GPRRegisterClass)
618    AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
619                                        DestReg).addReg(SrcReg)));
620  else if (DestRC == ARM::SPRRegisterClass)
621    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYS), DestReg)
622                   .addReg(SrcReg));
623  else if (DestRC == ARM::DPRRegisterClass)
624    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYD), DestReg)
625                   .addReg(SrcReg));
626  else if (DestRC == ARM::QPRRegisterClass)
627    BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg);
628  else
629    return false;
630
631  return true;
632}
633
634void ARMBaseInstrInfo::
635storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
636                    unsigned SrcReg, bool isKill, int FI,
637                    const TargetRegisterClass *RC) const {
638  DebugLoc DL = DebugLoc::getUnknownLoc();
639  if (I != MBB.end()) DL = I->getDebugLoc();
640
641  if (RC == ARM::GPRRegisterClass) {
642    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
643                   .addReg(SrcReg, getKillRegState(isKill))
644                   .addFrameIndex(FI).addReg(0).addImm(0));
645  } else if (RC == ARM::DPRRegisterClass) {
646    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTD))
647                   .addReg(SrcReg, getKillRegState(isKill))
648                   .addFrameIndex(FI).addImm(0));
649  } else {
650    assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
651    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS))
652                   .addReg(SrcReg, getKillRegState(isKill))
653                   .addFrameIndex(FI).addImm(0));
654  }
655}
656
657void ARMBaseInstrInfo::
658loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
659                     unsigned DestReg, int FI,
660                     const TargetRegisterClass *RC) const {
661  DebugLoc DL = DebugLoc::getUnknownLoc();
662  if (I != MBB.end()) DL = I->getDebugLoc();
663
664  if (RC == ARM::GPRRegisterClass) {
665    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
666                   .addFrameIndex(FI).addReg(0).addImm(0));
667  } else if (RC == ARM::DPRRegisterClass) {
668    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg)
669                   .addFrameIndex(FI).addImm(0));
670  } else {
671    assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
672    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg)
673                   .addFrameIndex(FI).addImm(0));
674  }
675}
676
677MachineInstr *ARMBaseInstrInfo::
678foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
679                      const SmallVectorImpl<unsigned> &Ops, int FI) const {
680  if (Ops.size() != 1) return NULL;
681
682  unsigned OpNum = Ops[0];
683  unsigned Opc = MI->getOpcode();
684  MachineInstr *NewMI = NULL;
685  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
686    // If it is updating CPSR, then it cannot be folded.
687    if (MI->getOperand(4).getReg() != ARM::CPSR || MI->getOperand(4).isDead()) {
688      unsigned Pred = MI->getOperand(2).getImm();
689      unsigned PredReg = MI->getOperand(3).getReg();
690      if (OpNum == 0) { // move -> store
691        unsigned SrcReg = MI->getOperand(1).getReg();
692        bool isKill = MI->getOperand(1).isKill();
693        bool isUndef = MI->getOperand(1).isUndef();
694        if (Opc == ARM::MOVr)
695          NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
696            .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
697            .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
698        else // ARM::t2MOVr
699          NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
700            .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
701            .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
702      } else {          // move -> load
703        unsigned DstReg = MI->getOperand(0).getReg();
704        bool isDead = MI->getOperand(0).isDead();
705        bool isUndef = MI->getOperand(0).isUndef();
706        if (Opc == ARM::MOVr)
707          NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
708            .addReg(DstReg,
709                    RegState::Define |
710                    getDeadRegState(isDead) |
711                    getUndefRegState(isUndef))
712            .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
713        else // ARM::t2MOVr
714          NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
715            .addReg(DstReg,
716                    RegState::Define |
717                    getDeadRegState(isDead) |
718                    getUndefRegState(isUndef))
719            .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
720      }
721    }
722  }
723  else if (Opc == ARM::FCPYS) {
724    unsigned Pred = MI->getOperand(2).getImm();
725    unsigned PredReg = MI->getOperand(3).getReg();
726    if (OpNum == 0) { // move -> store
727      unsigned SrcReg = MI->getOperand(1).getReg();
728      bool isKill = MI->getOperand(1).isKill();
729      bool isUndef = MI->getOperand(1).isUndef();
730      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS))
731        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
732        .addFrameIndex(FI)
733        .addImm(0).addImm(Pred).addReg(PredReg);
734    } else {          // move -> load
735      unsigned DstReg = MI->getOperand(0).getReg();
736      bool isDead = MI->getOperand(0).isDead();
737      bool isUndef = MI->getOperand(0).isUndef();
738      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS))
739        .addReg(DstReg,
740                RegState::Define |
741                getDeadRegState(isDead) |
742                getUndefRegState(isUndef))
743        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
744    }
745  }
746  else if (Opc == ARM::FCPYD) {
747    unsigned Pred = MI->getOperand(2).getImm();
748    unsigned PredReg = MI->getOperand(3).getReg();
749    if (OpNum == 0) { // move -> store
750      unsigned SrcReg = MI->getOperand(1).getReg();
751      bool isKill = MI->getOperand(1).isKill();
752      bool isUndef = MI->getOperand(1).isUndef();
753      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD))
754        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
755        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
756    } else {          // move -> load
757      unsigned DstReg = MI->getOperand(0).getReg();
758      bool isDead = MI->getOperand(0).isDead();
759      bool isUndef = MI->getOperand(0).isUndef();
760      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD))
761        .addReg(DstReg,
762                RegState::Define |
763                getDeadRegState(isDead) |
764                getUndefRegState(isUndef))
765        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
766    }
767  }
768
769  return NewMI;
770}
771
772MachineInstr*
773ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
774                                        MachineInstr* MI,
775                                        const SmallVectorImpl<unsigned> &Ops,
776                                        MachineInstr* LoadMI) const {
777  // FIXME
778  return 0;
779}
780
781bool
782ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
783                                       const SmallVectorImpl<unsigned> &Ops) const {
784  if (Ops.size() != 1) return false;
785
786  unsigned Opc = MI->getOpcode();
787  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
788    // If it is updating CPSR, then it cannot be folded.
789    return MI->getOperand(4).getReg() != ARM::CPSR ||MI->getOperand(4).isDead();
790  } else if (Opc == ARM::FCPYS || Opc == ARM::FCPYD) {
791    return true;
792  } else if (Opc == ARM::VMOVD || Opc == ARM::VMOVQ) {
793    return false; // FIXME
794  }
795
796  return false;
797}
798
799int llvm::getMatchingCondBranchOpcode(int Opc) {
800  if (Opc == ARM::B)
801    return ARM::Bcc;
802  else if (Opc == ARM::tB)
803    return ARM::tBcc;
804  else if (Opc == ARM::t2B)
805      return ARM::t2Bcc;
806
807  llvm_unreachable("Unknown unconditional branch opcode!");
808  return 0;
809}
810
811
812void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
813                               MachineBasicBlock::iterator &MBBI, DebugLoc dl,
814                               unsigned DestReg, unsigned BaseReg, int NumBytes,
815                               ARMCC::CondCodes Pred, unsigned PredReg,
816                               const ARMBaseInstrInfo &TII) {
817  bool isSub = NumBytes < 0;
818  if (isSub) NumBytes = -NumBytes;
819
820  while (NumBytes) {
821    unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
822    unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
823    assert(ThisVal && "Didn't extract field correctly");
824
825    // We will handle these bits from offset, clear them.
826    NumBytes &= ~ThisVal;
827
828    assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
829
830    // Build the new ADD / SUB.
831    unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
832    BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
833      .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
834      .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
835    BaseReg = DestReg;
836  }
837}
838
839int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
840                               unsigned FrameReg, int Offset,
841                               const ARMBaseInstrInfo &TII) {
842  unsigned Opcode = MI.getOpcode();
843  const TargetInstrDesc &Desc = MI.getDesc();
844  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
845  bool isSub = false;
846
847  // Memory operands in inline assembly always use AddrMode2.
848  if (Opcode == ARM::INLINEASM)
849    AddrMode = ARMII::AddrMode2;
850
851  if (Opcode == ARM::ADDri) {
852    Offset += MI.getOperand(FrameRegIdx+1).getImm();
853    if (Offset == 0) {
854      // Turn it into a move.
855      MI.setDesc(TII.get(ARM::MOVr));
856      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
857      MI.RemoveOperand(FrameRegIdx+1);
858      return 0;
859    } else if (Offset < 0) {
860      Offset = -Offset;
861      isSub = true;
862      MI.setDesc(TII.get(ARM::SUBri));
863    }
864
865    // Common case: small offset, fits into instruction.
866    if (ARM_AM::getSOImmVal(Offset) != -1) {
867      // Replace the FrameIndex with sp / fp
868      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
869      MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
870      return 0;
871    }
872
873    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
874    // as possible.
875    unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
876    unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
877
878    // We will handle these bits from offset, clear them.
879    Offset &= ~ThisImmVal;
880
881    // Get the properly encoded SOImmVal field.
882    assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
883           "Bit extraction didn't work?");
884    MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
885 } else {
886    unsigned ImmIdx = 0;
887    int InstrOffs = 0;
888    unsigned NumBits = 0;
889    unsigned Scale = 1;
890    switch (AddrMode) {
891    case ARMII::AddrMode2: {
892      ImmIdx = FrameRegIdx+2;
893      InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
894      if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
895        InstrOffs *= -1;
896      NumBits = 12;
897      break;
898    }
899    case ARMII::AddrMode3: {
900      ImmIdx = FrameRegIdx+2;
901      InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
902      if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
903        InstrOffs *= -1;
904      NumBits = 8;
905      break;
906    }
907    case ARMII::AddrMode5: {
908      ImmIdx = FrameRegIdx+1;
909      InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
910      if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
911        InstrOffs *= -1;
912      NumBits = 8;
913      Scale = 4;
914      break;
915    }
916    default:
917      llvm_unreachable("Unsupported addressing mode!");
918      break;
919    }
920
921    Offset += InstrOffs * Scale;
922    assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
923    if (Offset < 0) {
924      Offset = -Offset;
925      isSub = true;
926    }
927
928    // Attempt to fold address comp. if opcode has offset bits
929    if (NumBits > 0) {
930      // Common case: small offset, fits into instruction.
931      MachineOperand &ImmOp = MI.getOperand(ImmIdx);
932      int ImmedOffset = Offset / Scale;
933      unsigned Mask = (1 << NumBits) - 1;
934      if ((unsigned)Offset <= Mask * Scale) {
935        // Replace the FrameIndex with sp
936        MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
937        if (isSub)
938          ImmedOffset |= 1 << NumBits;
939        ImmOp.ChangeToImmediate(ImmedOffset);
940        return 0;
941      }
942
943      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
944      ImmedOffset = ImmedOffset & Mask;
945      if (isSub)
946        ImmedOffset |= 1 << NumBits;
947      ImmOp.ChangeToImmediate(ImmedOffset);
948      Offset &= ~(Mask*Scale);
949    }
950  }
951
952  return (isSub) ? -Offset : Offset;
953}
954