ARMBaseInstrInfo.cpp revision 69b9f9883e10efa266d59a5dd2f4d99de92c6707
1//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMConstantPoolValue.h"
18#include "ARMGenInstrInfo.inc"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMRegisterInfo.h"
21#include "llvm/Constants.h"
22#include "llvm/Function.h"
23#include "llvm/GlobalValue.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/CodeGen/LiveVariables.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineJumpTableInfo.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/PseudoSourceValue.h"
32#include "llvm/MC/MCAsmInfo.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36using namespace llvm;
37
38static cl::opt<bool>
39EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
40               cl::desc("Enable ARM 2-addr to 3-addr conv"));
41
42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
43  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
44    Subtarget(STI) {
45}
46
47MachineInstr *
48ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
49                                        MachineBasicBlock::iterator &MBBI,
50                                        LiveVariables *LV) const {
51  // FIXME: Thumb2 support.
52
53  if (!EnableARM3Addr)
54    return NULL;
55
56  MachineInstr *MI = MBBI;
57  MachineFunction &MF = *MI->getParent()->getParent();
58  unsigned TSFlags = MI->getDesc().TSFlags;
59  bool isPre = false;
60  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
61  default: return NULL;
62  case ARMII::IndexModePre:
63    isPre = true;
64    break;
65  case ARMII::IndexModePost:
66    break;
67  }
68
69  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
70  // operation.
71  unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
72  if (MemOpc == 0)
73    return NULL;
74
75  MachineInstr *UpdateMI = NULL;
76  MachineInstr *MemMI = NULL;
77  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
78  const TargetInstrDesc &TID = MI->getDesc();
79  unsigned NumOps = TID.getNumOperands();
80  bool isLoad = !TID.mayStore();
81  const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
82  const MachineOperand &Base = MI->getOperand(2);
83  const MachineOperand &Offset = MI->getOperand(NumOps-3);
84  unsigned WBReg = WB.getReg();
85  unsigned BaseReg = Base.getReg();
86  unsigned OffReg = Offset.getReg();
87  unsigned OffImm = MI->getOperand(NumOps-2).getImm();
88  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
89  switch (AddrMode) {
90  default:
91    assert(false && "Unknown indexed op!");
92    return NULL;
93  case ARMII::AddrMode2: {
94    bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
95    unsigned Amt = ARM_AM::getAM2Offset(OffImm);
96    if (OffReg == 0) {
97      if (ARM_AM::getSOImmVal(Amt) == -1)
98        // Can't encode it in a so_imm operand. This transformation will
99        // add more than 1 instruction. Abandon!
100        return NULL;
101      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
102                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
103        .addReg(BaseReg).addImm(Amt)
104        .addImm(Pred).addReg(0).addReg(0);
105    } else if (Amt != 0) {
106      ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
107      unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
108      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
109                         get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
110        .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
111        .addImm(Pred).addReg(0).addReg(0);
112    } else
113      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
114                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
115        .addReg(BaseReg).addReg(OffReg)
116        .addImm(Pred).addReg(0).addReg(0);
117    break;
118  }
119  case ARMII::AddrMode3 : {
120    bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
121    unsigned Amt = ARM_AM::getAM3Offset(OffImm);
122    if (OffReg == 0)
123      // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
124      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
125                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
126        .addReg(BaseReg).addImm(Amt)
127        .addImm(Pred).addReg(0).addReg(0);
128    else
129      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
130                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
131        .addReg(BaseReg).addReg(OffReg)
132        .addImm(Pred).addReg(0).addReg(0);
133    break;
134  }
135  }
136
137  std::vector<MachineInstr*> NewMIs;
138  if (isPre) {
139    if (isLoad)
140      MemMI = BuildMI(MF, MI->getDebugLoc(),
141                      get(MemOpc), MI->getOperand(0).getReg())
142        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
143    else
144      MemMI = BuildMI(MF, MI->getDebugLoc(),
145                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
146        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
147    NewMIs.push_back(MemMI);
148    NewMIs.push_back(UpdateMI);
149  } else {
150    if (isLoad)
151      MemMI = BuildMI(MF, MI->getDebugLoc(),
152                      get(MemOpc), MI->getOperand(0).getReg())
153        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
154    else
155      MemMI = BuildMI(MF, MI->getDebugLoc(),
156                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
157        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
158    if (WB.isDead())
159      UpdateMI->getOperand(0).setIsDead();
160    NewMIs.push_back(UpdateMI);
161    NewMIs.push_back(MemMI);
162  }
163
164  // Transfer LiveVariables states, kill / dead info.
165  if (LV) {
166    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
167      MachineOperand &MO = MI->getOperand(i);
168      if (MO.isReg() && MO.getReg() &&
169          TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
170        unsigned Reg = MO.getReg();
171
172        LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
173        if (MO.isDef()) {
174          MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
175          if (MO.isDead())
176            LV->addVirtualRegisterDead(Reg, NewMI);
177        }
178        if (MO.isUse() && MO.isKill()) {
179          for (unsigned j = 0; j < 2; ++j) {
180            // Look at the two new MI's in reverse order.
181            MachineInstr *NewMI = NewMIs[j];
182            if (!NewMI->readsRegister(Reg))
183              continue;
184            LV->addVirtualRegisterKilled(Reg, NewMI);
185            if (VI.removeKill(MI))
186              VI.Kills.push_back(NewMI);
187            break;
188          }
189        }
190      }
191    }
192  }
193
194  MFI->insert(MBBI, NewMIs[1]);
195  MFI->insert(MBBI, NewMIs[0]);
196  return NewMIs[0];
197}
198
199// Branch analysis.
200bool
201ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
202                                MachineBasicBlock *&FBB,
203                                SmallVectorImpl<MachineOperand> &Cond,
204                                bool AllowModify) const {
205  // If the block has no terminators, it just falls into the block after it.
206  MachineBasicBlock::iterator I = MBB.end();
207  if (I == MBB.begin())
208    return false;
209  --I;
210  while (I->isDebugValue()) {
211    if (I == MBB.begin())
212      return false;
213    --I;
214  }
215  if (!isUnpredicatedTerminator(I))
216    return false;
217
218  // Get the last instruction in the block.
219  MachineInstr *LastInst = I;
220
221  // If there is only one terminator instruction, process it.
222  unsigned LastOpc = LastInst->getOpcode();
223  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
224    if (isUncondBranchOpcode(LastOpc)) {
225      TBB = LastInst->getOperand(0).getMBB();
226      return false;
227    }
228    if (isCondBranchOpcode(LastOpc)) {
229      // Block ends with fall-through condbranch.
230      TBB = LastInst->getOperand(0).getMBB();
231      Cond.push_back(LastInst->getOperand(1));
232      Cond.push_back(LastInst->getOperand(2));
233      return false;
234    }
235    return true;  // Can't handle indirect branch.
236  }
237
238  // Get the instruction before it if it is a terminator.
239  MachineInstr *SecondLastInst = I;
240
241  // If there are three terminators, we don't know what sort of block this is.
242  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
243    return true;
244
245  // If the block ends with a B and a Bcc, handle it.
246  unsigned SecondLastOpc = SecondLastInst->getOpcode();
247  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
248    TBB =  SecondLastInst->getOperand(0).getMBB();
249    Cond.push_back(SecondLastInst->getOperand(1));
250    Cond.push_back(SecondLastInst->getOperand(2));
251    FBB = LastInst->getOperand(0).getMBB();
252    return false;
253  }
254
255  // If the block ends with two unconditional branches, handle it.  The second
256  // one is not executed, so remove it.
257  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
258    TBB = SecondLastInst->getOperand(0).getMBB();
259    I = LastInst;
260    if (AllowModify)
261      I->eraseFromParent();
262    return false;
263  }
264
265  // ...likewise if it ends with a branch table followed by an unconditional
266  // branch. The branch folder can create these, and we must get rid of them for
267  // correctness of Thumb constant islands.
268  if ((isJumpTableBranchOpcode(SecondLastOpc) ||
269       isIndirectBranchOpcode(SecondLastOpc)) &&
270      isUncondBranchOpcode(LastOpc)) {
271    I = LastInst;
272    if (AllowModify)
273      I->eraseFromParent();
274    return true;
275  }
276
277  // Otherwise, can't handle this.
278  return true;
279}
280
281
282unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
283  MachineBasicBlock::iterator I = MBB.end();
284  if (I == MBB.begin()) return 0;
285  --I;
286  while (I->isDebugValue()) {
287    if (I == MBB.begin())
288      return 0;
289    --I;
290  }
291  if (!isUncondBranchOpcode(I->getOpcode()) &&
292      !isCondBranchOpcode(I->getOpcode()))
293    return 0;
294
295  // Remove the branch.
296  I->eraseFromParent();
297
298  I = MBB.end();
299
300  if (I == MBB.begin()) return 1;
301  --I;
302  if (!isCondBranchOpcode(I->getOpcode()))
303    return 1;
304
305  // Remove the branch.
306  I->eraseFromParent();
307  return 2;
308}
309
310unsigned
311ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
312                               MachineBasicBlock *FBB,
313                             const SmallVectorImpl<MachineOperand> &Cond) const {
314  // FIXME this should probably have a DebugLoc argument
315  DebugLoc dl;
316
317  ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
318  int BOpc   = !AFI->isThumbFunction()
319    ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
320  int BccOpc = !AFI->isThumbFunction()
321    ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
322
323  // Shouldn't be a fall through.
324  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
325  assert((Cond.size() == 2 || Cond.size() == 0) &&
326         "ARM branch conditions have two components!");
327
328  if (FBB == 0) {
329    if (Cond.empty()) // Unconditional branch?
330      BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
331    else
332      BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
333        .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
334    return 1;
335  }
336
337  // Two-way conditional branch.
338  BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
339    .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
340  BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
341  return 2;
342}
343
344bool ARMBaseInstrInfo::
345ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
346  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
347  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
348  return false;
349}
350
351bool ARMBaseInstrInfo::
352PredicateInstruction(MachineInstr *MI,
353                     const SmallVectorImpl<MachineOperand> &Pred) const {
354  unsigned Opc = MI->getOpcode();
355  if (isUncondBranchOpcode(Opc)) {
356    MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
357    MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
358    MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
359    return true;
360  }
361
362  int PIdx = MI->findFirstPredOperandIdx();
363  if (PIdx != -1) {
364    MachineOperand &PMO = MI->getOperand(PIdx);
365    PMO.setImm(Pred[0].getImm());
366    MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
367    return true;
368  }
369  return false;
370}
371
372bool ARMBaseInstrInfo::
373SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
374                  const SmallVectorImpl<MachineOperand> &Pred2) const {
375  if (Pred1.size() > 2 || Pred2.size() > 2)
376    return false;
377
378  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
379  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
380  if (CC1 == CC2)
381    return true;
382
383  switch (CC1) {
384  default:
385    return false;
386  case ARMCC::AL:
387    return true;
388  case ARMCC::HS:
389    return CC2 == ARMCC::HI;
390  case ARMCC::LS:
391    return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
392  case ARMCC::GE:
393    return CC2 == ARMCC::GT;
394  case ARMCC::LE:
395    return CC2 == ARMCC::LT;
396  }
397}
398
399bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
400                                    std::vector<MachineOperand> &Pred) const {
401  // FIXME: This confuses implicit_def with optional CPSR def.
402  const TargetInstrDesc &TID = MI->getDesc();
403  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
404    return false;
405
406  bool Found = false;
407  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
408    const MachineOperand &MO = MI->getOperand(i);
409    if (MO.isReg() && MO.getReg() == ARM::CPSR) {
410      Pred.push_back(MO);
411      Found = true;
412    }
413  }
414
415  return Found;
416}
417
418/// isPredicable - Return true if the specified instruction can be predicated.
419/// By default, this returns true for every instruction with a
420/// PredicateOperand.
421bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
422  const TargetInstrDesc &TID = MI->getDesc();
423  if (!TID.isPredicable())
424    return false;
425
426  if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
427    ARMFunctionInfo *AFI =
428      MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
429    return AFI->isThumb2Function();
430  }
431  return true;
432}
433
434/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
435DISABLE_INLINE
436static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
437                                unsigned JTI);
438static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
439                                unsigned JTI) {
440  assert(JTI < JT.size());
441  return JT[JTI].MBBs.size();
442}
443
444/// GetInstSize - Return the size of the specified MachineInstr.
445///
446unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
447  const MachineBasicBlock &MBB = *MI->getParent();
448  const MachineFunction *MF = MBB.getParent();
449  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
450
451  // Basic size info comes from the TSFlags field.
452  const TargetInstrDesc &TID = MI->getDesc();
453  unsigned TSFlags = TID.TSFlags;
454
455  unsigned Opc = MI->getOpcode();
456  switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
457  default: {
458    // If this machine instr is an inline asm, measure it.
459    if (MI->getOpcode() == ARM::INLINEASM)
460      return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
461    if (MI->isLabel())
462      return 0;
463    switch (Opc) {
464    default:
465      llvm_unreachable("Unknown or unset size field for instr!");
466    case TargetOpcode::IMPLICIT_DEF:
467    case TargetOpcode::KILL:
468    case TargetOpcode::DBG_LABEL:
469    case TargetOpcode::EH_LABEL:
470    case TargetOpcode::DBG_VALUE:
471      return 0;
472    }
473    break;
474  }
475  case ARMII::Size8Bytes: return 8;          // ARM instruction x 2.
476  case ARMII::Size4Bytes: return 4;          // ARM / Thumb2 instruction.
477  case ARMII::Size2Bytes: return 2;          // Thumb1 instruction.
478  case ARMII::SizeSpecial: {
479    switch (Opc) {
480    case ARM::CONSTPOOL_ENTRY:
481      // If this machine instr is a constant pool entry, its size is recorded as
482      // operand #2.
483      return MI->getOperand(2).getImm();
484    case ARM::Int_eh_sjlj_setjmp:
485    case ARM::Int_eh_sjlj_setjmp_nofp:
486      return 24;
487    case ARM::tInt_eh_sjlj_setjmp:
488    case ARM::t2Int_eh_sjlj_setjmp:
489    case ARM::t2Int_eh_sjlj_setjmp_nofp:
490      return 14;
491    case ARM::BR_JTr:
492    case ARM::BR_JTm:
493    case ARM::BR_JTadd:
494    case ARM::tBR_JTr:
495    case ARM::t2BR_JT:
496    case ARM::t2TBB:
497    case ARM::t2TBH: {
498      // These are jumptable branches, i.e. a branch followed by an inlined
499      // jumptable. The size is 4 + 4 * number of entries. For TBB, each
500      // entry is one byte; TBH two byte each.
501      unsigned EntrySize = (Opc == ARM::t2TBB)
502        ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
503      unsigned NumOps = TID.getNumOperands();
504      MachineOperand JTOP =
505        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
506      unsigned JTI = JTOP.getIndex();
507      const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
508      assert(MJTI != 0);
509      const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
510      assert(JTI < JT.size());
511      // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
512      // 4 aligned. The assembler / linker may add 2 byte padding just before
513      // the JT entries.  The size does not include this padding; the
514      // constant islands pass does separate bookkeeping for it.
515      // FIXME: If we know the size of the function is less than (1 << 16) *2
516      // bytes, we can use 16-bit entries instead. Then there won't be an
517      // alignment issue.
518      unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
519      unsigned NumEntries = getNumJTEntries(JT, JTI);
520      if (Opc == ARM::t2TBB && (NumEntries & 1))
521        // Make sure the instruction that follows TBB is 2-byte aligned.
522        // FIXME: Constant island pass should insert an "ALIGN" instruction
523        // instead.
524        ++NumEntries;
525      return NumEntries * EntrySize + InstSize;
526    }
527    default:
528      // Otherwise, pseudo-instruction sizes are zero.
529      return 0;
530    }
531  }
532  }
533  return 0; // Not reached
534}
535
536/// Return true if the instruction is a register to register move and
537/// leave the source and dest operands in the passed parameters.
538///
539bool
540ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
541                              unsigned &SrcReg, unsigned &DstReg,
542                              unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
543  switch (MI.getOpcode()) {
544  default: break;
545  case ARM::VMOVS:
546  case ARM::VMOVD:
547  case ARM::VMOVDneon:
548  case ARM::VMOVQ:
549  case ARM::VMOVQQ : {
550    SrcReg = MI.getOperand(1).getReg();
551    DstReg = MI.getOperand(0).getReg();
552    SrcSubIdx = MI.getOperand(1).getSubReg();
553    DstSubIdx = MI.getOperand(0).getSubReg();
554    return true;
555  }
556  case ARM::MOVr:
557  case ARM::tMOVr:
558  case ARM::tMOVgpr2tgpr:
559  case ARM::tMOVtgpr2gpr:
560  case ARM::tMOVgpr2gpr:
561  case ARM::t2MOVr: {
562    assert(MI.getDesc().getNumOperands() >= 2 &&
563           MI.getOperand(0).isReg() &&
564           MI.getOperand(1).isReg() &&
565           "Invalid ARM MOV instruction");
566    SrcReg = MI.getOperand(1).getReg();
567    DstReg = MI.getOperand(0).getReg();
568    SrcSubIdx = MI.getOperand(1).getSubReg();
569    DstSubIdx = MI.getOperand(0).getSubReg();
570    return true;
571  }
572  }
573
574  return false;
575}
576
577unsigned
578ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
579                                      int &FrameIndex) const {
580  switch (MI->getOpcode()) {
581  default: break;
582  case ARM::LDR:
583  case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
584    if (MI->getOperand(1).isFI() &&
585        MI->getOperand(2).isReg() &&
586        MI->getOperand(3).isImm() &&
587        MI->getOperand(2).getReg() == 0 &&
588        MI->getOperand(3).getImm() == 0) {
589      FrameIndex = MI->getOperand(1).getIndex();
590      return MI->getOperand(0).getReg();
591    }
592    break;
593  case ARM::t2LDRi12:
594  case ARM::tRestore:
595    if (MI->getOperand(1).isFI() &&
596        MI->getOperand(2).isImm() &&
597        MI->getOperand(2).getImm() == 0) {
598      FrameIndex = MI->getOperand(1).getIndex();
599      return MI->getOperand(0).getReg();
600    }
601    break;
602  case ARM::VLDRD:
603  case ARM::VLDRS:
604    if (MI->getOperand(1).isFI() &&
605        MI->getOperand(2).isImm() &&
606        MI->getOperand(2).getImm() == 0) {
607      FrameIndex = MI->getOperand(1).getIndex();
608      return MI->getOperand(0).getReg();
609    }
610    break;
611  }
612
613  return 0;
614}
615
616unsigned
617ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
618                                     int &FrameIndex) const {
619  switch (MI->getOpcode()) {
620  default: break;
621  case ARM::STR:
622  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
623    if (MI->getOperand(1).isFI() &&
624        MI->getOperand(2).isReg() &&
625        MI->getOperand(3).isImm() &&
626        MI->getOperand(2).getReg() == 0 &&
627        MI->getOperand(3).getImm() == 0) {
628      FrameIndex = MI->getOperand(1).getIndex();
629      return MI->getOperand(0).getReg();
630    }
631    break;
632  case ARM::t2STRi12:
633  case ARM::tSpill:
634    if (MI->getOperand(1).isFI() &&
635        MI->getOperand(2).isImm() &&
636        MI->getOperand(2).getImm() == 0) {
637      FrameIndex = MI->getOperand(1).getIndex();
638      return MI->getOperand(0).getReg();
639    }
640    break;
641  case ARM::VSTRD:
642  case ARM::VSTRS:
643    if (MI->getOperand(1).isFI() &&
644        MI->getOperand(2).isImm() &&
645        MI->getOperand(2).getImm() == 0) {
646      FrameIndex = MI->getOperand(1).getIndex();
647      return MI->getOperand(0).getReg();
648    }
649    break;
650  }
651
652  return 0;
653}
654
655bool
656ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
657                               MachineBasicBlock::iterator I,
658                               unsigned DestReg, unsigned SrcReg,
659                               const TargetRegisterClass *DestRC,
660                               const TargetRegisterClass *SrcRC,
661                               DebugLoc DL) const {
662  // tGPR is used sometimes in ARM instructions that need to avoid using
663  // certain registers.  Just treat it as GPR here.
664  if (DestRC == ARM::tGPRRegisterClass)
665    DestRC = ARM::GPRRegisterClass;
666  if (SrcRC == ARM::tGPRRegisterClass)
667    SrcRC = ARM::GPRRegisterClass;
668
669  // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
670  if (DestRC == ARM::DPR_8RegisterClass)
671    DestRC = ARM::DPR_VFP2RegisterClass;
672  if (SrcRC == ARM::DPR_8RegisterClass)
673    SrcRC = ARM::DPR_VFP2RegisterClass;
674
675  // Allow QPR / QPR_VFP2 / QPR_8 cross-class copies.
676  if (DestRC == ARM::QPR_VFP2RegisterClass ||
677      DestRC == ARM::QPR_8RegisterClass)
678    DestRC = ARM::QPRRegisterClass;
679  if (SrcRC == ARM::QPR_VFP2RegisterClass ||
680      SrcRC == ARM::QPR_8RegisterClass)
681    SrcRC = ARM::QPRRegisterClass;
682
683  // Allow QQPR / QQPR_VFP2 / QQPR_8 cross-class copies.
684  if (DestRC == ARM::QQPR_VFP2RegisterClass ||
685      DestRC == ARM::QQPR_8RegisterClass)
686    DestRC = ARM::QQPRRegisterClass;
687  if (SrcRC == ARM::QQPR_VFP2RegisterClass ||
688      SrcRC == ARM::QQPR_8RegisterClass)
689    SrcRC = ARM::QQPRRegisterClass;
690
691  // Disallow copies of unequal sizes.
692  if (DestRC != SrcRC && DestRC->getSize() != SrcRC->getSize())
693    return false;
694
695  if (DestRC == ARM::GPRRegisterClass) {
696    if (SrcRC == ARM::SPRRegisterClass)
697      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVRS), DestReg)
698                     .addReg(SrcReg));
699    else
700      AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
701                                          DestReg).addReg(SrcReg)));
702  } else {
703    unsigned Opc;
704
705    if (DestRC == ARM::SPRRegisterClass)
706      Opc = (SrcRC == ARM::GPRRegisterClass ? ARM::VMOVSR : ARM::VMOVS);
707    else if (DestRC == ARM::DPRRegisterClass)
708      Opc = ARM::VMOVD;
709    else if (DestRC == ARM::DPR_VFP2RegisterClass ||
710             SrcRC == ARM::DPR_VFP2RegisterClass)
711      // Always use neon reg-reg move if source or dest is NEON-only regclass.
712      Opc = ARM::VMOVDneon;
713    else if (DestRC == ARM::QPRRegisterClass)
714      Opc = ARM::VMOVQ;
715    else if (DestRC == ARM::QQPRRegisterClass)
716      Opc = ARM::VMOVQQ;
717    else
718      return false;
719
720    AddDefaultPred(BuildMI(MBB, I, DL, get(Opc), DestReg).addReg(SrcReg));
721  }
722
723  return true;
724}
725
726static const
727MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB,
728                             unsigned Reg, unsigned SubIdx, unsigned State,
729                             const TargetRegisterInfo *TRI) {
730  if (!SubIdx)
731    return MIB.addReg(Reg, State);
732
733  if (TargetRegisterInfo::isPhysicalRegister(Reg))
734    return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
735  return MIB.addReg(Reg, State, SubIdx);
736}
737
738void ARMBaseInstrInfo::
739storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
740                    unsigned SrcReg, bool isKill, int FI,
741                    const TargetRegisterClass *RC,
742                    const TargetRegisterInfo *TRI) const {
743  DebugLoc DL;
744  if (I != MBB.end()) DL = I->getDebugLoc();
745  MachineFunction &MF = *MBB.getParent();
746  MachineFrameInfo &MFI = *MF.getFrameInfo();
747  unsigned Align = MFI.getObjectAlignment(FI);
748
749  MachineMemOperand *MMO =
750    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
751                            MachineMemOperand::MOStore, 0,
752                            MFI.getObjectSize(FI),
753                            Align);
754
755  // tGPR is used sometimes in ARM instructions that need to avoid using
756  // certain registers.  Just treat it as GPR here.
757  if (RC == ARM::tGPRRegisterClass)
758    RC = ARM::GPRRegisterClass;
759
760  if (RC == ARM::GPRRegisterClass) {
761    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
762                   .addReg(SrcReg, getKillRegState(isKill))
763                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
764  } else if (RC == ARM::SPRRegisterClass) {
765    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
766                   .addReg(SrcReg, getKillRegState(isKill))
767                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
768  } else if (RC == ARM::DPRRegisterClass ||
769             RC == ARM::DPR_VFP2RegisterClass ||
770             RC == ARM::DPR_8RegisterClass) {
771    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
772                   .addReg(SrcReg, getKillRegState(isKill))
773                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
774  } else if (RC == ARM::QPRRegisterClass ||
775             RC == ARM::QPR_VFP2RegisterClass ||
776             RC == ARM::QPR_8RegisterClass) {
777    // FIXME: Neon instructions should support predicates
778    if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
779      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q))
780                     .addFrameIndex(FI).addImm(128)
781                     .addReg(SrcReg, getKillRegState(isKill))
782                     .addMemOperand(MMO));
783    } else {
784      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQ))
785                     .addReg(SrcReg, getKillRegState(isKill))
786                     .addFrameIndex(FI)
787                     .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
788                     .addMemOperand(MMO));
789    }
790  } else {
791    assert((RC == ARM::QQPRRegisterClass ||
792            RC == ARM::QQPR_VFP2RegisterClass ||
793            RC == ARM::QQPR_8RegisterClass) && "Unknown regclass!");
794    if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
795      MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VST2q32))
796        .addFrameIndex(FI).addImm(128);
797      MIB = AddDReg(MIB, SrcReg, ARM::DSUBREG_0, getKillRegState(isKill), TRI);
798      MIB = AddDReg(MIB, SrcReg, ARM::DSUBREG_1, 0, TRI);
799      MIB = AddDReg(MIB, SrcReg, ARM::DSUBREG_2, 0, TRI);
800      MIB = AddDReg(MIB, SrcReg, ARM::DSUBREG_3, 0, TRI);
801      AddDefaultPred(MIB.addMemOperand(MMO));
802    } else {
803      MachineInstrBuilder MIB =
804        AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD))
805                       .addFrameIndex(FI)
806                       .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4)))
807        .addMemOperand(MMO);
808      MIB = AddDReg(MIB, SrcReg, ARM::DSUBREG_0, getKillRegState(isKill), TRI);
809      MIB = AddDReg(MIB, SrcReg, ARM::DSUBREG_1, 0, TRI);
810      MIB = AddDReg(MIB, SrcReg, ARM::DSUBREG_2, 0, TRI);
811            AddDReg(MIB, SrcReg, ARM::DSUBREG_3, 0, TRI);
812    }
813  }
814}
815
816void ARMBaseInstrInfo::
817loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
818                     unsigned DestReg, int FI,
819                     const TargetRegisterClass *RC,
820                     const TargetRegisterInfo *TRI) const {
821  DebugLoc DL;
822  if (I != MBB.end()) DL = I->getDebugLoc();
823  MachineFunction &MF = *MBB.getParent();
824  MachineFrameInfo &MFI = *MF.getFrameInfo();
825  unsigned Align = MFI.getObjectAlignment(FI);
826  MachineMemOperand *MMO =
827    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
828                            MachineMemOperand::MOLoad, 0,
829                            MFI.getObjectSize(FI),
830                            Align);
831
832  // tGPR is used sometimes in ARM instructions that need to avoid using
833  // certain registers.  Just treat it as GPR here.
834  if (RC == ARM::tGPRRegisterClass)
835    RC = ARM::GPRRegisterClass;
836
837  if (RC == ARM::GPRRegisterClass) {
838    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
839                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
840  } else if (RC == ARM::SPRRegisterClass) {
841    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
842                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
843  } else if (RC == ARM::DPRRegisterClass ||
844             RC == ARM::DPR_VFP2RegisterClass ||
845             RC == ARM::DPR_8RegisterClass) {
846    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
847                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
848  } else if (RC == ARM::QPRRegisterClass ||
849             RC == ARM::QPR_VFP2RegisterClass ||
850             RC == ARM::QPR_8RegisterClass) {
851    if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
852      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q), DestReg)
853                     .addFrameIndex(FI).addImm(128)
854                     .addMemOperand(MMO));
855    } else {
856      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQ), DestReg)
857                     .addFrameIndex(FI)
858                     .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
859                     .addMemOperand(MMO));
860    }
861  } else {
862    assert((RC == ARM::QQPRRegisterClass ||
863            RC == ARM::QQPR_VFP2RegisterClass ||
864            RC == ARM::QQPR_8RegisterClass) && "Unknown regclass!");
865    if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
866      MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD2q32));
867      MIB = AddDReg(MIB, DestReg, ARM::DSUBREG_0, RegState::Define, TRI);
868      MIB = AddDReg(MIB, DestReg, ARM::DSUBREG_1, RegState::Define, TRI);
869      MIB = AddDReg(MIB, DestReg, ARM::DSUBREG_2, RegState::Define, TRI);
870      MIB = AddDReg(MIB, DestReg, ARM::DSUBREG_3, RegState::Define, TRI);
871      AddDefaultPred(MIB.addFrameIndex(FI).addImm(128).addMemOperand(MMO));
872    } else {
873      MachineInstrBuilder MIB =
874        AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
875                       .addFrameIndex(FI)
876                       .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4)))
877        .addMemOperand(MMO);
878      MIB = AddDReg(MIB, DestReg, ARM::DSUBREG_0, RegState::Define, TRI);
879      MIB = AddDReg(MIB, DestReg, ARM::DSUBREG_1, RegState::Define, TRI);
880      MIB = AddDReg(MIB, DestReg, ARM::DSUBREG_2, RegState::Define, TRI);
881            AddDReg(MIB, DestReg, ARM::DSUBREG_3, RegState::Define, TRI);
882    }
883  }
884}
885
886MachineInstr*
887ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
888                                           int FrameIx, uint64_t Offset,
889                                           const MDNode *MDPtr,
890                                           DebugLoc DL) const {
891  MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
892    .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
893  return &*MIB;
894}
895
896MachineInstr *ARMBaseInstrInfo::
897foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
898                      const SmallVectorImpl<unsigned> &Ops, int FI) const {
899  if (Ops.size() != 1) return NULL;
900
901  unsigned OpNum = Ops[0];
902  unsigned Opc = MI->getOpcode();
903  MachineInstr *NewMI = NULL;
904  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
905    // If it is updating CPSR, then it cannot be folded.
906    if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
907      return NULL;
908    unsigned Pred = MI->getOperand(2).getImm();
909    unsigned PredReg = MI->getOperand(3).getReg();
910    if (OpNum == 0) { // move -> store
911      unsigned SrcReg = MI->getOperand(1).getReg();
912      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
913      bool isKill = MI->getOperand(1).isKill();
914      bool isUndef = MI->getOperand(1).isUndef();
915      if (Opc == ARM::MOVr)
916        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
917          .addReg(SrcReg,
918                  getKillRegState(isKill) | getUndefRegState(isUndef),
919                  SrcSubReg)
920          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
921      else // ARM::t2MOVr
922        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
923          .addReg(SrcReg,
924                  getKillRegState(isKill) | getUndefRegState(isUndef),
925                  SrcSubReg)
926          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
927    } else {          // move -> load
928      unsigned DstReg = MI->getOperand(0).getReg();
929      unsigned DstSubReg = MI->getOperand(0).getSubReg();
930      bool isDead = MI->getOperand(0).isDead();
931      bool isUndef = MI->getOperand(0).isUndef();
932      if (Opc == ARM::MOVr)
933        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
934          .addReg(DstReg,
935                  RegState::Define |
936                  getDeadRegState(isDead) |
937                  getUndefRegState(isUndef), DstSubReg)
938          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
939      else // ARM::t2MOVr
940        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
941          .addReg(DstReg,
942                  RegState::Define |
943                  getDeadRegState(isDead) |
944                  getUndefRegState(isUndef), DstSubReg)
945          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
946    }
947  } else if (Opc == ARM::tMOVgpr2gpr ||
948             Opc == ARM::tMOVtgpr2gpr ||
949             Opc == ARM::tMOVgpr2tgpr) {
950    if (OpNum == 0) { // move -> store
951      unsigned SrcReg = MI->getOperand(1).getReg();
952      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
953      bool isKill = MI->getOperand(1).isKill();
954      bool isUndef = MI->getOperand(1).isUndef();
955      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
956        .addReg(SrcReg,
957                getKillRegState(isKill) | getUndefRegState(isUndef),
958                SrcSubReg)
959        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
960    } else {          // move -> load
961      unsigned DstReg = MI->getOperand(0).getReg();
962      unsigned DstSubReg = MI->getOperand(0).getSubReg();
963      bool isDead = MI->getOperand(0).isDead();
964      bool isUndef = MI->getOperand(0).isUndef();
965      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
966        .addReg(DstReg,
967                RegState::Define |
968                getDeadRegState(isDead) |
969                getUndefRegState(isUndef),
970                DstSubReg)
971        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
972    }
973  } else if (Opc == ARM::VMOVS) {
974    unsigned Pred = MI->getOperand(2).getImm();
975    unsigned PredReg = MI->getOperand(3).getReg();
976    if (OpNum == 0) { // move -> store
977      unsigned SrcReg = MI->getOperand(1).getReg();
978      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
979      bool isKill = MI->getOperand(1).isKill();
980      bool isUndef = MI->getOperand(1).isUndef();
981      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
982        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
983                SrcSubReg)
984        .addFrameIndex(FI)
985        .addImm(0).addImm(Pred).addReg(PredReg);
986    } else {          // move -> load
987      unsigned DstReg = MI->getOperand(0).getReg();
988      unsigned DstSubReg = MI->getOperand(0).getSubReg();
989      bool isDead = MI->getOperand(0).isDead();
990      bool isUndef = MI->getOperand(0).isUndef();
991      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
992        .addReg(DstReg,
993                RegState::Define |
994                getDeadRegState(isDead) |
995                getUndefRegState(isUndef),
996                DstSubReg)
997        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
998    }
999  } else if (Opc == ARM::VMOVD || Opc == ARM::VMOVDneon) {
1000    unsigned Pred = MI->getOperand(2).getImm();
1001    unsigned PredReg = MI->getOperand(3).getReg();
1002    if (OpNum == 0) { // move -> store
1003      unsigned SrcReg = MI->getOperand(1).getReg();
1004      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
1005      bool isKill = MI->getOperand(1).isKill();
1006      bool isUndef = MI->getOperand(1).isUndef();
1007      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
1008        .addReg(SrcReg,
1009                getKillRegState(isKill) | getUndefRegState(isUndef),
1010                SrcSubReg)
1011        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
1012    } else {          // move -> load
1013      unsigned DstReg = MI->getOperand(0).getReg();
1014      unsigned DstSubReg = MI->getOperand(0).getSubReg();
1015      bool isDead = MI->getOperand(0).isDead();
1016      bool isUndef = MI->getOperand(0).isUndef();
1017      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
1018        .addReg(DstReg,
1019                RegState::Define |
1020                getDeadRegState(isDead) |
1021                getUndefRegState(isUndef),
1022                DstSubReg)
1023        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
1024    }
1025  }  else if (Opc == ARM::VMOVQ) {
1026    MachineFrameInfo &MFI = *MF.getFrameInfo();
1027    unsigned Pred = MI->getOperand(2).getImm();
1028    unsigned PredReg = MI->getOperand(3).getReg();
1029    if (OpNum == 0) { // move -> store
1030      unsigned SrcReg = MI->getOperand(1).getReg();
1031      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
1032      bool isKill = MI->getOperand(1).isKill();
1033      bool isUndef = MI->getOperand(1).isUndef();
1034      if (MFI.getObjectAlignment(FI) >= 16 &&
1035          getRegisterInfo().canRealignStack(MF)) {
1036        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VST1q))
1037          .addFrameIndex(FI).addImm(128)
1038          .addReg(SrcReg,
1039                  getKillRegState(isKill) | getUndefRegState(isUndef),
1040                  SrcSubReg)
1041          .addImm(Pred).addReg(PredReg);
1042      } else {
1043        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTMQ))
1044          .addReg(SrcReg,
1045                  getKillRegState(isKill) | getUndefRegState(isUndef),
1046                  SrcSubReg)
1047          .addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
1048          .addImm(Pred).addReg(PredReg);
1049      }
1050    } else {          // move -> load
1051      unsigned DstReg = MI->getOperand(0).getReg();
1052      unsigned DstSubReg = MI->getOperand(0).getSubReg();
1053      bool isDead = MI->getOperand(0).isDead();
1054      bool isUndef = MI->getOperand(0).isUndef();
1055      if (MFI.getObjectAlignment(FI) >= 16 &&
1056          getRegisterInfo().canRealignStack(MF)) {
1057        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLD1q))
1058          .addReg(DstReg,
1059                  RegState::Define |
1060                  getDeadRegState(isDead) |
1061                  getUndefRegState(isUndef),
1062                  DstSubReg)
1063          .addFrameIndex(FI).addImm(128).addImm(Pred).addReg(PredReg);
1064      } else {
1065        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDMQ))
1066          .addReg(DstReg,
1067                  RegState::Define |
1068                  getDeadRegState(isDead) |
1069                  getUndefRegState(isUndef),
1070                  DstSubReg)
1071          .addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
1072          .addImm(Pred).addReg(PredReg);
1073      }
1074    }
1075  }
1076
1077  return NewMI;
1078}
1079
1080MachineInstr*
1081ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
1082                                        MachineInstr* MI,
1083                                        const SmallVectorImpl<unsigned> &Ops,
1084                                        MachineInstr* LoadMI) const {
1085  // FIXME
1086  return 0;
1087}
1088
1089bool
1090ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
1091                                   const SmallVectorImpl<unsigned> &Ops) const {
1092  if (Ops.size() != 1) return false;
1093
1094  unsigned Opc = MI->getOpcode();
1095  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
1096    // If it is updating CPSR, then it cannot be folded.
1097    return MI->getOperand(4).getReg() != ARM::CPSR ||
1098      MI->getOperand(4).isDead();
1099  } else if (Opc == ARM::tMOVgpr2gpr ||
1100             Opc == ARM::tMOVtgpr2gpr ||
1101             Opc == ARM::tMOVgpr2tgpr) {
1102    return true;
1103  } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD ||
1104             Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
1105    return true;
1106  }
1107
1108  return false;
1109}
1110
1111/// Create a copy of a const pool value. Update CPI to the new index and return
1112/// the label UID.
1113static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1114  MachineConstantPool *MCP = MF.getConstantPool();
1115  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1116
1117  const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1118  assert(MCPE.isMachineConstantPoolEntry() &&
1119         "Expecting a machine constantpool entry!");
1120  ARMConstantPoolValue *ACPV =
1121    static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1122
1123  unsigned PCLabelId = AFI->createConstPoolEntryUId();
1124  ARMConstantPoolValue *NewCPV = 0;
1125  if (ACPV->isGlobalValue())
1126    NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
1127                                      ARMCP::CPValue, 4);
1128  else if (ACPV->isExtSymbol())
1129    NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
1130                                      ACPV->getSymbol(), PCLabelId, 4);
1131  else if (ACPV->isBlockAddress())
1132    NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
1133                                      ARMCP::CPBlockAddress, 4);
1134  else
1135    llvm_unreachable("Unexpected ARM constantpool value type!!");
1136  CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
1137  return PCLabelId;
1138}
1139
1140void ARMBaseInstrInfo::
1141reMaterialize(MachineBasicBlock &MBB,
1142              MachineBasicBlock::iterator I,
1143              unsigned DestReg, unsigned SubIdx,
1144              const MachineInstr *Orig,
1145              const TargetRegisterInfo *TRI) const {
1146  if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
1147    DestReg = TRI->getSubReg(DestReg, SubIdx);
1148    SubIdx = 0;
1149  }
1150
1151  unsigned Opcode = Orig->getOpcode();
1152  switch (Opcode) {
1153  default: {
1154    MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
1155    MI->getOperand(0).setReg(DestReg);
1156    MBB.insert(I, MI);
1157    break;
1158  }
1159  case ARM::tLDRpci_pic:
1160  case ARM::t2LDRpci_pic: {
1161    MachineFunction &MF = *MBB.getParent();
1162    unsigned CPI = Orig->getOperand(1).getIndex();
1163    unsigned PCLabelId = duplicateCPV(MF, CPI);
1164    MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
1165                                      DestReg)
1166      .addConstantPoolIndex(CPI).addImm(PCLabelId);
1167    (*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
1168    break;
1169  }
1170  }
1171
1172  MachineInstr *NewMI = prior(I);
1173  NewMI->getOperand(0).setSubReg(SubIdx);
1174}
1175
1176MachineInstr *
1177ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
1178  MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
1179  switch(Orig->getOpcode()) {
1180  case ARM::tLDRpci_pic:
1181  case ARM::t2LDRpci_pic: {
1182    unsigned CPI = Orig->getOperand(1).getIndex();
1183    unsigned PCLabelId = duplicateCPV(MF, CPI);
1184    Orig->getOperand(1).setIndex(CPI);
1185    Orig->getOperand(2).setImm(PCLabelId);
1186    break;
1187  }
1188  }
1189  return MI;
1190}
1191
1192bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
1193                                        const MachineInstr *MI1) const {
1194  int Opcode = MI0->getOpcode();
1195  if (Opcode == ARM::t2LDRpci ||
1196      Opcode == ARM::t2LDRpci_pic ||
1197      Opcode == ARM::tLDRpci ||
1198      Opcode == ARM::tLDRpci_pic) {
1199    if (MI1->getOpcode() != Opcode)
1200      return false;
1201    if (MI0->getNumOperands() != MI1->getNumOperands())
1202      return false;
1203
1204    const MachineOperand &MO0 = MI0->getOperand(1);
1205    const MachineOperand &MO1 = MI1->getOperand(1);
1206    if (MO0.getOffset() != MO1.getOffset())
1207      return false;
1208
1209    const MachineFunction *MF = MI0->getParent()->getParent();
1210    const MachineConstantPool *MCP = MF->getConstantPool();
1211    int CPI0 = MO0.getIndex();
1212    int CPI1 = MO1.getIndex();
1213    const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1214    const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1215    ARMConstantPoolValue *ACPV0 =
1216      static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1217    ARMConstantPoolValue *ACPV1 =
1218      static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1219    return ACPV0->hasSameValue(ACPV1);
1220  }
1221
1222  return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1223}
1224
1225/// getInstrPredicate - If instruction is predicated, returns its predicate
1226/// condition, otherwise returns AL. It also returns the condition code
1227/// register by reference.
1228ARMCC::CondCodes
1229llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
1230  int PIdx = MI->findFirstPredOperandIdx();
1231  if (PIdx == -1) {
1232    PredReg = 0;
1233    return ARMCC::AL;
1234  }
1235
1236  PredReg = MI->getOperand(PIdx+1).getReg();
1237  return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
1238}
1239
1240
1241int llvm::getMatchingCondBranchOpcode(int Opc) {
1242  if (Opc == ARM::B)
1243    return ARM::Bcc;
1244  else if (Opc == ARM::tB)
1245    return ARM::tBcc;
1246  else if (Opc == ARM::t2B)
1247      return ARM::t2Bcc;
1248
1249  llvm_unreachable("Unknown unconditional branch opcode!");
1250  return 0;
1251}
1252
1253
1254void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
1255                               MachineBasicBlock::iterator &MBBI, DebugLoc dl,
1256                               unsigned DestReg, unsigned BaseReg, int NumBytes,
1257                               ARMCC::CondCodes Pred, unsigned PredReg,
1258                               const ARMBaseInstrInfo &TII) {
1259  bool isSub = NumBytes < 0;
1260  if (isSub) NumBytes = -NumBytes;
1261
1262  while (NumBytes) {
1263    unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
1264    unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
1265    assert(ThisVal && "Didn't extract field correctly");
1266
1267    // We will handle these bits from offset, clear them.
1268    NumBytes &= ~ThisVal;
1269
1270    assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
1271
1272    // Build the new ADD / SUB.
1273    unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
1274    BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
1275      .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
1276      .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
1277    BaseReg = DestReg;
1278  }
1279}
1280
1281bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1282                                unsigned FrameReg, int &Offset,
1283                                const ARMBaseInstrInfo &TII) {
1284  unsigned Opcode = MI.getOpcode();
1285  const TargetInstrDesc &Desc = MI.getDesc();
1286  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1287  bool isSub = false;
1288
1289  // Memory operands in inline assembly always use AddrMode2.
1290  if (Opcode == ARM::INLINEASM)
1291    AddrMode = ARMII::AddrMode2;
1292
1293  if (Opcode == ARM::ADDri) {
1294    Offset += MI.getOperand(FrameRegIdx+1).getImm();
1295    if (Offset == 0) {
1296      // Turn it into a move.
1297      MI.setDesc(TII.get(ARM::MOVr));
1298      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1299      MI.RemoveOperand(FrameRegIdx+1);
1300      Offset = 0;
1301      return true;
1302    } else if (Offset < 0) {
1303      Offset = -Offset;
1304      isSub = true;
1305      MI.setDesc(TII.get(ARM::SUBri));
1306    }
1307
1308    // Common case: small offset, fits into instruction.
1309    if (ARM_AM::getSOImmVal(Offset) != -1) {
1310      // Replace the FrameIndex with sp / fp
1311      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1312      MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1313      Offset = 0;
1314      return true;
1315    }
1316
1317    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1318    // as possible.
1319    unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1320    unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1321
1322    // We will handle these bits from offset, clear them.
1323    Offset &= ~ThisImmVal;
1324
1325    // Get the properly encoded SOImmVal field.
1326    assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1327           "Bit extraction didn't work?");
1328    MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1329 } else {
1330    unsigned ImmIdx = 0;
1331    int InstrOffs = 0;
1332    unsigned NumBits = 0;
1333    unsigned Scale = 1;
1334    switch (AddrMode) {
1335    case ARMII::AddrMode2: {
1336      ImmIdx = FrameRegIdx+2;
1337      InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1338      if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1339        InstrOffs *= -1;
1340      NumBits = 12;
1341      break;
1342    }
1343    case ARMII::AddrMode3: {
1344      ImmIdx = FrameRegIdx+2;
1345      InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1346      if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1347        InstrOffs *= -1;
1348      NumBits = 8;
1349      break;
1350    }
1351    case ARMII::AddrMode4:
1352    case ARMII::AddrMode6:
1353      // Can't fold any offset even if it's zero.
1354      return false;
1355    case ARMII::AddrMode5: {
1356      ImmIdx = FrameRegIdx+1;
1357      InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1358      if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1359        InstrOffs *= -1;
1360      NumBits = 8;
1361      Scale = 4;
1362      break;
1363    }
1364    default:
1365      llvm_unreachable("Unsupported addressing mode!");
1366      break;
1367    }
1368
1369    Offset += InstrOffs * Scale;
1370    assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1371    if (Offset < 0) {
1372      Offset = -Offset;
1373      isSub = true;
1374    }
1375
1376    // Attempt to fold address comp. if opcode has offset bits
1377    if (NumBits > 0) {
1378      // Common case: small offset, fits into instruction.
1379      MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1380      int ImmedOffset = Offset / Scale;
1381      unsigned Mask = (1 << NumBits) - 1;
1382      if ((unsigned)Offset <= Mask * Scale) {
1383        // Replace the FrameIndex with sp
1384        MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1385        if (isSub)
1386          ImmedOffset |= 1 << NumBits;
1387        ImmOp.ChangeToImmediate(ImmedOffset);
1388        Offset = 0;
1389        return true;
1390      }
1391
1392      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1393      ImmedOffset = ImmedOffset & Mask;
1394      if (isSub)
1395        ImmedOffset |= 1 << NumBits;
1396      ImmOp.ChangeToImmediate(ImmedOffset);
1397      Offset &= ~(Mask*Scale);
1398    }
1399  }
1400
1401  Offset = (isSub) ? -Offset : Offset;
1402  return Offset == 0;
1403}
1404