ARMBaseInstrInfo.cpp revision 7baae87d8f188262e07922348d88201f32514b1c
1//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMGenInstrInfo.inc"
18#include "ARMMachineFunctionInfo.h"
19#include "ARMRegisterInfo.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/CodeGen/LiveVariables.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineJumpTableInfo.h"
25#include "llvm/CodeGen/MachineMemOperand.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/PseudoSourceValue.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/Support/ErrorHandling.h"
32using namespace llvm;
33
34static cl::opt<bool>
35EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
36               cl::desc("Enable ARM 2-addr to 3-addr conv"));
37
38ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
39  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
40    Subtarget(STI) {
41}
42
43MachineInstr *
44ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
45                                        MachineBasicBlock::iterator &MBBI,
46                                        LiveVariables *LV) const {
47  // FIXME: Thumb2 support.
48
49  if (!EnableARM3Addr)
50    return NULL;
51
52  MachineInstr *MI = MBBI;
53  MachineFunction &MF = *MI->getParent()->getParent();
54  unsigned TSFlags = MI->getDesc().TSFlags;
55  bool isPre = false;
56  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
57  default: return NULL;
58  case ARMII::IndexModePre:
59    isPre = true;
60    break;
61  case ARMII::IndexModePost:
62    break;
63  }
64
65  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
66  // operation.
67  unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
68  if (MemOpc == 0)
69    return NULL;
70
71  MachineInstr *UpdateMI = NULL;
72  MachineInstr *MemMI = NULL;
73  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
74  const TargetInstrDesc &TID = MI->getDesc();
75  unsigned NumOps = TID.getNumOperands();
76  bool isLoad = !TID.mayStore();
77  const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
78  const MachineOperand &Base = MI->getOperand(2);
79  const MachineOperand &Offset = MI->getOperand(NumOps-3);
80  unsigned WBReg = WB.getReg();
81  unsigned BaseReg = Base.getReg();
82  unsigned OffReg = Offset.getReg();
83  unsigned OffImm = MI->getOperand(NumOps-2).getImm();
84  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
85  switch (AddrMode) {
86  default:
87    assert(false && "Unknown indexed op!");
88    return NULL;
89  case ARMII::AddrMode2: {
90    bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
91    unsigned Amt = ARM_AM::getAM2Offset(OffImm);
92    if (OffReg == 0) {
93      if (ARM_AM::getSOImmVal(Amt) == -1)
94        // Can't encode it in a so_imm operand. This transformation will
95        // add more than 1 instruction. Abandon!
96        return NULL;
97      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
98                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
99        .addReg(BaseReg).addImm(Amt)
100        .addImm(Pred).addReg(0).addReg(0);
101    } else if (Amt != 0) {
102      ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
103      unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
104      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
105                         get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
106        .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
107        .addImm(Pred).addReg(0).addReg(0);
108    } else
109      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
110                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
111        .addReg(BaseReg).addReg(OffReg)
112        .addImm(Pred).addReg(0).addReg(0);
113    break;
114  }
115  case ARMII::AddrMode3 : {
116    bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
117    unsigned Amt = ARM_AM::getAM3Offset(OffImm);
118    if (OffReg == 0)
119      // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
120      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
121                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
122        .addReg(BaseReg).addImm(Amt)
123        .addImm(Pred).addReg(0).addReg(0);
124    else
125      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
126                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
127        .addReg(BaseReg).addReg(OffReg)
128        .addImm(Pred).addReg(0).addReg(0);
129    break;
130  }
131  }
132
133  std::vector<MachineInstr*> NewMIs;
134  if (isPre) {
135    if (isLoad)
136      MemMI = BuildMI(MF, MI->getDebugLoc(),
137                      get(MemOpc), MI->getOperand(0).getReg())
138        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
139    else
140      MemMI = BuildMI(MF, MI->getDebugLoc(),
141                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
142        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
143    NewMIs.push_back(MemMI);
144    NewMIs.push_back(UpdateMI);
145  } else {
146    if (isLoad)
147      MemMI = BuildMI(MF, MI->getDebugLoc(),
148                      get(MemOpc), MI->getOperand(0).getReg())
149        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
150    else
151      MemMI = BuildMI(MF, MI->getDebugLoc(),
152                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
153        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
154    if (WB.isDead())
155      UpdateMI->getOperand(0).setIsDead();
156    NewMIs.push_back(UpdateMI);
157    NewMIs.push_back(MemMI);
158  }
159
160  // Transfer LiveVariables states, kill / dead info.
161  if (LV) {
162    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
163      MachineOperand &MO = MI->getOperand(i);
164      if (MO.isReg() && MO.getReg() &&
165          TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
166        unsigned Reg = MO.getReg();
167
168        LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
169        if (MO.isDef()) {
170          MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
171          if (MO.isDead())
172            LV->addVirtualRegisterDead(Reg, NewMI);
173        }
174        if (MO.isUse() && MO.isKill()) {
175          for (unsigned j = 0; j < 2; ++j) {
176            // Look at the two new MI's in reverse order.
177            MachineInstr *NewMI = NewMIs[j];
178            if (!NewMI->readsRegister(Reg))
179              continue;
180            LV->addVirtualRegisterKilled(Reg, NewMI);
181            if (VI.removeKill(MI))
182              VI.Kills.push_back(NewMI);
183            break;
184          }
185        }
186      }
187    }
188  }
189
190  MFI->insert(MBBI, NewMIs[1]);
191  MFI->insert(MBBI, NewMIs[0]);
192  return NewMIs[0];
193}
194
195// Branch analysis.
196bool
197ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
198                                MachineBasicBlock *&FBB,
199                                SmallVectorImpl<MachineOperand> &Cond,
200                                bool AllowModify) const {
201  // If the block has no terminators, it just falls into the block after it.
202  MachineBasicBlock::iterator I = MBB.end();
203  if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
204    return false;
205
206  // Get the last instruction in the block.
207  MachineInstr *LastInst = I;
208
209  // If there is only one terminator instruction, process it.
210  unsigned LastOpc = LastInst->getOpcode();
211  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
212    if (isUncondBranchOpcode(LastOpc)) {
213      TBB = LastInst->getOperand(0).getMBB();
214      return false;
215    }
216    if (isCondBranchOpcode(LastOpc)) {
217      // Block ends with fall-through condbranch.
218      TBB = LastInst->getOperand(0).getMBB();
219      Cond.push_back(LastInst->getOperand(1));
220      Cond.push_back(LastInst->getOperand(2));
221      return false;
222    }
223    return true;  // Can't handle indirect branch.
224  }
225
226  // Get the instruction before it if it is a terminator.
227  MachineInstr *SecondLastInst = I;
228
229  // If there are three terminators, we don't know what sort of block this is.
230  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
231    return true;
232
233  // If the block ends with a B and a Bcc, handle it.
234  unsigned SecondLastOpc = SecondLastInst->getOpcode();
235  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
236    TBB =  SecondLastInst->getOperand(0).getMBB();
237    Cond.push_back(SecondLastInst->getOperand(1));
238    Cond.push_back(SecondLastInst->getOperand(2));
239    FBB = LastInst->getOperand(0).getMBB();
240    return false;
241  }
242
243  // If the block ends with two unconditional branches, handle it.  The second
244  // one is not executed, so remove it.
245  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
246    TBB = SecondLastInst->getOperand(0).getMBB();
247    I = LastInst;
248    if (AllowModify)
249      I->eraseFromParent();
250    return false;
251  }
252
253  // ...likewise if it ends with a branch table followed by an unconditional
254  // branch. The branch folder can create these, and we must get rid of them for
255  // correctness of Thumb constant islands.
256  if ((isJumpTableBranchOpcode(SecondLastOpc) ||
257       isIndirectBranchOpcode(SecondLastOpc)) &&
258      isUncondBranchOpcode(LastOpc)) {
259    I = LastInst;
260    if (AllowModify)
261      I->eraseFromParent();
262    return true;
263  }
264
265  // Otherwise, can't handle this.
266  return true;
267}
268
269
270unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
271  MachineBasicBlock::iterator I = MBB.end();
272  if (I == MBB.begin()) return 0;
273  --I;
274  if (!isUncondBranchOpcode(I->getOpcode()) &&
275      !isCondBranchOpcode(I->getOpcode()))
276    return 0;
277
278  // Remove the branch.
279  I->eraseFromParent();
280
281  I = MBB.end();
282
283  if (I == MBB.begin()) return 1;
284  --I;
285  if (!isCondBranchOpcode(I->getOpcode()))
286    return 1;
287
288  // Remove the branch.
289  I->eraseFromParent();
290  return 2;
291}
292
293unsigned
294ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
295                               MachineBasicBlock *FBB,
296                             const SmallVectorImpl<MachineOperand> &Cond) const {
297  // FIXME this should probably have a DebugLoc argument
298  DebugLoc dl = DebugLoc::getUnknownLoc();
299
300  ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
301  int BOpc   = !AFI->isThumbFunction()
302    ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
303  int BccOpc = !AFI->isThumbFunction()
304    ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
305
306  // Shouldn't be a fall through.
307  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
308  assert((Cond.size() == 2 || Cond.size() == 0) &&
309         "ARM branch conditions have two components!");
310
311  if (FBB == 0) {
312    if (Cond.empty()) // Unconditional branch?
313      BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
314    else
315      BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
316        .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
317    return 1;
318  }
319
320  // Two-way conditional branch.
321  BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
322    .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
323  BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
324  return 2;
325}
326
327bool ARMBaseInstrInfo::
328ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
329  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
330  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
331  return false;
332}
333
334bool ARMBaseInstrInfo::
335PredicateInstruction(MachineInstr *MI,
336                     const SmallVectorImpl<MachineOperand> &Pred) const {
337  unsigned Opc = MI->getOpcode();
338  if (isUncondBranchOpcode(Opc)) {
339    MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
340    MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
341    MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
342    return true;
343  }
344
345  int PIdx = MI->findFirstPredOperandIdx();
346  if (PIdx != -1) {
347    MachineOperand &PMO = MI->getOperand(PIdx);
348    PMO.setImm(Pred[0].getImm());
349    MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
350    return true;
351  }
352  return false;
353}
354
355bool ARMBaseInstrInfo::
356SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
357                  const SmallVectorImpl<MachineOperand> &Pred2) const {
358  if (Pred1.size() > 2 || Pred2.size() > 2)
359    return false;
360
361  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
362  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
363  if (CC1 == CC2)
364    return true;
365
366  switch (CC1) {
367  default:
368    return false;
369  case ARMCC::AL:
370    return true;
371  case ARMCC::HS:
372    return CC2 == ARMCC::HI;
373  case ARMCC::LS:
374    return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
375  case ARMCC::GE:
376    return CC2 == ARMCC::GT;
377  case ARMCC::LE:
378    return CC2 == ARMCC::LT;
379  }
380}
381
382bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
383                                    std::vector<MachineOperand> &Pred) const {
384  // FIXME: This confuses implicit_def with optional CPSR def.
385  const TargetInstrDesc &TID = MI->getDesc();
386  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
387    return false;
388
389  bool Found = false;
390  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
391    const MachineOperand &MO = MI->getOperand(i);
392    if (MO.isReg() && MO.getReg() == ARM::CPSR) {
393      Pred.push_back(MO);
394      Found = true;
395    }
396  }
397
398  return Found;
399}
400
401
402/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
403static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
404                                unsigned JTI) DISABLE_INLINE;
405static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
406                                unsigned JTI) {
407  return JT[JTI].MBBs.size();
408}
409
410/// GetInstSize - Return the size of the specified MachineInstr.
411///
412unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
413  const MachineBasicBlock &MBB = *MI->getParent();
414  const MachineFunction *MF = MBB.getParent();
415  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
416
417  // Basic size info comes from the TSFlags field.
418  const TargetInstrDesc &TID = MI->getDesc();
419  unsigned TSFlags = TID.TSFlags;
420
421  unsigned Opc = MI->getOpcode();
422  switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
423  default: {
424    // If this machine instr is an inline asm, measure it.
425    if (MI->getOpcode() == ARM::INLINEASM)
426      return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
427    if (MI->isLabel())
428      return 0;
429    switch (Opc) {
430    default:
431      llvm_unreachable("Unknown or unset size field for instr!");
432    case TargetInstrInfo::IMPLICIT_DEF:
433    case TargetInstrInfo::KILL:
434    case TargetInstrInfo::DBG_LABEL:
435    case TargetInstrInfo::EH_LABEL:
436      return 0;
437    }
438    break;
439  }
440  case ARMII::Size8Bytes: return 8;          // ARM instruction x 2.
441  case ARMII::Size4Bytes: return 4;          // ARM / Thumb2 instruction.
442  case ARMII::Size2Bytes: return 2;          // Thumb1 instruction.
443  case ARMII::SizeSpecial: {
444    switch (Opc) {
445    case ARM::CONSTPOOL_ENTRY:
446      // If this machine instr is a constant pool entry, its size is recorded as
447      // operand #2.
448      return MI->getOperand(2).getImm();
449    case ARM::Int_eh_sjlj_setjmp:
450      return 24;
451    case ARM::t2Int_eh_sjlj_setjmp:
452      return 20;
453    case ARM::BR_JTr:
454    case ARM::BR_JTm:
455    case ARM::BR_JTadd:
456    case ARM::tBR_JTr:
457    case ARM::t2BR_JT:
458    case ARM::t2TBB:
459    case ARM::t2TBH: {
460      // These are jumptable branches, i.e. a branch followed by an inlined
461      // jumptable. The size is 4 + 4 * number of entries. For TBB, each
462      // entry is one byte; TBH two byte each.
463      unsigned EntrySize = (Opc == ARM::t2TBB)
464        ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
465      unsigned NumOps = TID.getNumOperands();
466      MachineOperand JTOP =
467        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
468      unsigned JTI = JTOP.getIndex();
469      const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
470      const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
471      assert(JTI < JT.size());
472      // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
473      // 4 aligned. The assembler / linker may add 2 byte padding just before
474      // the JT entries.  The size does not include this padding; the
475      // constant islands pass does separate bookkeeping for it.
476      // FIXME: If we know the size of the function is less than (1 << 16) *2
477      // bytes, we can use 16-bit entries instead. Then there won't be an
478      // alignment issue.
479      unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
480      unsigned NumEntries = getNumJTEntries(JT, JTI);
481      if (Opc == ARM::t2TBB && (NumEntries & 1))
482        // Make sure the instruction that follows TBB is 2-byte aligned.
483        // FIXME: Constant island pass should insert an "ALIGN" instruction
484        // instead.
485        ++NumEntries;
486      return NumEntries * EntrySize + InstSize;
487    }
488    default:
489      // Otherwise, pseudo-instruction sizes are zero.
490      return 0;
491    }
492  }
493  }
494  return 0; // Not reached
495}
496
497/// Return true if the instruction is a register to register move and
498/// leave the source and dest operands in the passed parameters.
499///
500bool
501ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
502                              unsigned &SrcReg, unsigned &DstReg,
503                              unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
504  SrcSubIdx = DstSubIdx = 0; // No sub-registers.
505
506  switch (MI.getOpcode()) {
507  default: break;
508  case ARM::FCPYS:
509  case ARM::FCPYD:
510  case ARM::VMOVD:
511  case ARM::VMOVQ: {
512    SrcReg = MI.getOperand(1).getReg();
513    DstReg = MI.getOperand(0).getReg();
514    return true;
515  }
516  case ARM::MOVr:
517  case ARM::tMOVr:
518  case ARM::tMOVgpr2tgpr:
519  case ARM::tMOVtgpr2gpr:
520  case ARM::tMOVgpr2gpr:
521  case ARM::t2MOVr: {
522    assert(MI.getDesc().getNumOperands() >= 2 &&
523           MI.getOperand(0).isReg() &&
524           MI.getOperand(1).isReg() &&
525           "Invalid ARM MOV instruction");
526    SrcReg = MI.getOperand(1).getReg();
527    DstReg = MI.getOperand(0).getReg();
528    return true;
529  }
530  }
531
532  return false;
533}
534
535unsigned
536ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
537                                      int &FrameIndex) const {
538  switch (MI->getOpcode()) {
539  default: break;
540  case ARM::LDR:
541  case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
542    if (MI->getOperand(1).isFI() &&
543        MI->getOperand(2).isReg() &&
544        MI->getOperand(3).isImm() &&
545        MI->getOperand(2).getReg() == 0 &&
546        MI->getOperand(3).getImm() == 0) {
547      FrameIndex = MI->getOperand(1).getIndex();
548      return MI->getOperand(0).getReg();
549    }
550    break;
551  case ARM::t2LDRi12:
552  case ARM::tRestore:
553    if (MI->getOperand(1).isFI() &&
554        MI->getOperand(2).isImm() &&
555        MI->getOperand(2).getImm() == 0) {
556      FrameIndex = MI->getOperand(1).getIndex();
557      return MI->getOperand(0).getReg();
558    }
559    break;
560  case ARM::FLDD:
561  case ARM::FLDS:
562    if (MI->getOperand(1).isFI() &&
563        MI->getOperand(2).isImm() &&
564        MI->getOperand(2).getImm() == 0) {
565      FrameIndex = MI->getOperand(1).getIndex();
566      return MI->getOperand(0).getReg();
567    }
568    break;
569  }
570
571  return 0;
572}
573
574unsigned
575ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
576                                     int &FrameIndex) const {
577  switch (MI->getOpcode()) {
578  default: break;
579  case ARM::STR:
580  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
581    if (MI->getOperand(1).isFI() &&
582        MI->getOperand(2).isReg() &&
583        MI->getOperand(3).isImm() &&
584        MI->getOperand(2).getReg() == 0 &&
585        MI->getOperand(3).getImm() == 0) {
586      FrameIndex = MI->getOperand(1).getIndex();
587      return MI->getOperand(0).getReg();
588    }
589    break;
590  case ARM::t2STRi12:
591  case ARM::tSpill:
592    if (MI->getOperand(1).isFI() &&
593        MI->getOperand(2).isImm() &&
594        MI->getOperand(2).getImm() == 0) {
595      FrameIndex = MI->getOperand(1).getIndex();
596      return MI->getOperand(0).getReg();
597    }
598    break;
599  case ARM::FSTD:
600  case ARM::FSTS:
601    if (MI->getOperand(1).isFI() &&
602        MI->getOperand(2).isImm() &&
603        MI->getOperand(2).getImm() == 0) {
604      FrameIndex = MI->getOperand(1).getIndex();
605      return MI->getOperand(0).getReg();
606    }
607    break;
608  }
609
610  return 0;
611}
612
613bool
614ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
615                               MachineBasicBlock::iterator I,
616                               unsigned DestReg, unsigned SrcReg,
617                               const TargetRegisterClass *DestRC,
618                               const TargetRegisterClass *SrcRC) const {
619  DebugLoc DL = DebugLoc::getUnknownLoc();
620  if (I != MBB.end()) DL = I->getDebugLoc();
621
622  if (DestRC != SrcRC) {
623    // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies
624    // Allow QPR / QPR_VFP2 cross-class copies
625    if (DestRC == ARM::DPRRegisterClass) {
626      if (SrcRC == ARM::DPR_VFP2RegisterClass ||
627          SrcRC == ARM::DPR_8RegisterClass) {
628      } else
629        return false;
630    } else if (DestRC == ARM::DPR_VFP2RegisterClass) {
631      if (SrcRC == ARM::DPRRegisterClass ||
632          SrcRC == ARM::DPR_8RegisterClass) {
633      } else
634        return false;
635    } else if (DestRC == ARM::DPR_8RegisterClass) {
636      if (SrcRC == ARM::DPRRegisterClass ||
637          SrcRC == ARM::DPR_VFP2RegisterClass) {
638      } else
639        return false;
640    } else if ((DestRC == ARM::QPRRegisterClass &&
641                SrcRC == ARM::QPR_VFP2RegisterClass) ||
642               (DestRC == ARM::QPR_VFP2RegisterClass &&
643                SrcRC == ARM::QPRRegisterClass)) {
644    } else
645      return false;
646  }
647
648  if (DestRC == ARM::GPRRegisterClass) {
649    AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
650                                        DestReg).addReg(SrcReg)));
651  } else if (DestRC == ARM::SPRRegisterClass) {
652    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYS), DestReg)
653                   .addReg(SrcReg));
654  } else if (DestRC == ARM::DPR_VFP2RegisterClass ||
655             DestRC == ARM::DPR_8RegisterClass ||
656             SrcRC == ARM::DPR_VFP2RegisterClass ||
657             SrcRC == ARM::DPR_8RegisterClass) {
658    // Always use neon reg-reg move if source or dest is NEON-only regclass.
659    BuildMI(MBB, I, DL, get(ARM::VMOVD), DestReg).addReg(SrcReg);
660  } else if (DestRC == ARM::DPRRegisterClass) {
661    const ARMBaseRegisterInfo* TRI = &getRegisterInfo();
662
663    // If we do not found an instruction defining the reg, this means the
664    // register should be live-in for this BB. It's always to better to use
665    // NEON reg-reg moves.
666    unsigned Domain = ARMII::DomainNEON;
667
668    // Find the Machine Instruction which defines SrcReg.
669    if (!MBB.empty()) {
670      MachineBasicBlock::iterator J = (I == MBB.begin() ? I : prior(I));
671      while (J != MBB.begin()) {
672        if (J->modifiesRegister(SrcReg, TRI))
673          break;
674        --J;
675      }
676
677      if (J->modifiesRegister(SrcReg, TRI)) {
678        Domain = J->getDesc().TSFlags & ARMII::DomainMask;
679        // Instructions in general domain are subreg accesses.
680        // Map them to NEON reg-reg moves.
681        if (Domain == ARMII::DomainGeneral)
682          Domain = ARMII::DomainNEON;
683      }
684    }
685
686    if ((Domain & ARMII::DomainNEON) && getSubtarget().hasNEON()) {
687      BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg);
688    } else {
689      assert((Domain & ARMII::DomainVFP ||
690              !getSubtarget().hasNEON()) && "Invalid domain!");
691      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYD), DestReg)
692                     .addReg(SrcReg));
693    }
694  } else if (DestRC == ARM::QPRRegisterClass ||
695             DestRC == ARM::QPR_VFP2RegisterClass) {
696    BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg);
697  } else {
698    return false;
699  }
700
701  return true;
702}
703
704void ARMBaseInstrInfo::
705storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
706                    unsigned SrcReg, bool isKill, int FI,
707                    const TargetRegisterClass *RC) const {
708  DebugLoc DL = DebugLoc::getUnknownLoc();
709  if (I != MBB.end()) DL = I->getDebugLoc();
710  MachineFunction &MF = *MBB.getParent();
711  MachineFrameInfo &MFI = *MF.getFrameInfo();
712
713  MachineMemOperand *MMO =
714    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
715                            MachineMemOperand::MOStore, 0,
716                            MFI.getObjectSize(FI),
717                            MFI.getObjectAlignment(FI));
718
719  if (RC == ARM::GPRRegisterClass) {
720    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
721                   .addReg(SrcReg, getKillRegState(isKill))
722                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
723  } else if (RC == ARM::DPRRegisterClass ||
724             RC == ARM::DPR_VFP2RegisterClass ||
725             RC == ARM::DPR_8RegisterClass) {
726    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTD))
727                   .addReg(SrcReg, getKillRegState(isKill))
728                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
729  } else if (RC == ARM::SPRRegisterClass) {
730    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS))
731                   .addReg(SrcReg, getKillRegState(isKill))
732                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
733  } else {
734    assert((RC == ARM::QPRRegisterClass ||
735            RC == ARM::QPR_VFP2RegisterClass) && "Unknown regclass!");
736    // FIXME: Neon instructions should support predicates
737    BuildMI(MBB, I, DL, get(ARM::VSTRQ)).addReg(SrcReg, getKillRegState(isKill))
738      .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
739  }
740}
741
742void ARMBaseInstrInfo::
743loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
744                     unsigned DestReg, int FI,
745                     const TargetRegisterClass *RC) const {
746  DebugLoc DL = DebugLoc::getUnknownLoc();
747  if (I != MBB.end()) DL = I->getDebugLoc();
748  MachineFunction &MF = *MBB.getParent();
749  MachineFrameInfo &MFI = *MF.getFrameInfo();
750
751  MachineMemOperand *MMO =
752    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
753                            MachineMemOperand::MOLoad, 0,
754                            MFI.getObjectSize(FI),
755                            MFI.getObjectAlignment(FI));
756
757  if (RC == ARM::GPRRegisterClass) {
758    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
759                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
760  } else if (RC == ARM::DPRRegisterClass ||
761             RC == ARM::DPR_VFP2RegisterClass ||
762             RC == ARM::DPR_8RegisterClass) {
763    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg)
764                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
765  } else if (RC == ARM::SPRRegisterClass) {
766    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg)
767                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
768  } else {
769    assert((RC == ARM::QPRRegisterClass ||
770            RC == ARM::QPR_VFP2RegisterClass) && "Unknown regclass!");
771    // FIXME: Neon instructions should support predicates
772    BuildMI(MBB, I, DL, get(ARM::VLDRQ), DestReg).addFrameIndex(FI).addImm(0).
773      addMemOperand(MMO);
774  }
775}
776
777MachineInstr *ARMBaseInstrInfo::
778foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
779                      const SmallVectorImpl<unsigned> &Ops, int FI) const {
780  if (Ops.size() != 1) return NULL;
781
782  unsigned OpNum = Ops[0];
783  unsigned Opc = MI->getOpcode();
784  MachineInstr *NewMI = NULL;
785  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
786    // If it is updating CPSR, then it cannot be folded.
787    if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
788      return NULL;
789    unsigned Pred = MI->getOperand(2).getImm();
790    unsigned PredReg = MI->getOperand(3).getReg();
791    if (OpNum == 0) { // move -> store
792      unsigned SrcReg = MI->getOperand(1).getReg();
793      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
794      bool isKill = MI->getOperand(1).isKill();
795      bool isUndef = MI->getOperand(1).isUndef();
796      if (Opc == ARM::MOVr)
797        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
798          .addReg(SrcReg,
799                  getKillRegState(isKill) | getUndefRegState(isUndef),
800                  SrcSubReg)
801          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
802      else // ARM::t2MOVr
803        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
804          .addReg(SrcReg,
805                  getKillRegState(isKill) | getUndefRegState(isUndef),
806                  SrcSubReg)
807          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
808    } else {          // move -> load
809      unsigned DstReg = MI->getOperand(0).getReg();
810      unsigned DstSubReg = MI->getOperand(0).getSubReg();
811      bool isDead = MI->getOperand(0).isDead();
812      bool isUndef = MI->getOperand(0).isUndef();
813      if (Opc == ARM::MOVr)
814        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
815          .addReg(DstReg,
816                  RegState::Define |
817                  getDeadRegState(isDead) |
818                  getUndefRegState(isUndef), DstSubReg)
819          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
820      else // ARM::t2MOVr
821        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
822          .addReg(DstReg,
823                  RegState::Define |
824                  getDeadRegState(isDead) |
825                  getUndefRegState(isUndef), DstSubReg)
826          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
827    }
828  } else if (Opc == ARM::tMOVgpr2gpr ||
829             Opc == ARM::tMOVtgpr2gpr ||
830             Opc == ARM::tMOVgpr2tgpr) {
831    if (OpNum == 0) { // move -> store
832      unsigned SrcReg = MI->getOperand(1).getReg();
833      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
834      bool isKill = MI->getOperand(1).isKill();
835      bool isUndef = MI->getOperand(1).isUndef();
836      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
837        .addReg(SrcReg,
838                getKillRegState(isKill) | getUndefRegState(isUndef),
839                SrcSubReg)
840        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
841    } else {          // move -> load
842      unsigned DstReg = MI->getOperand(0).getReg();
843      unsigned DstSubReg = MI->getOperand(0).getSubReg();
844      bool isDead = MI->getOperand(0).isDead();
845      bool isUndef = MI->getOperand(0).isUndef();
846      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
847        .addReg(DstReg,
848                RegState::Define |
849                getDeadRegState(isDead) |
850                getUndefRegState(isUndef),
851                DstSubReg)
852        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
853    }
854  } else if (Opc == ARM::FCPYS) {
855    unsigned Pred = MI->getOperand(2).getImm();
856    unsigned PredReg = MI->getOperand(3).getReg();
857    if (OpNum == 0) { // move -> store
858      unsigned SrcReg = MI->getOperand(1).getReg();
859      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
860      bool isKill = MI->getOperand(1).isKill();
861      bool isUndef = MI->getOperand(1).isUndef();
862      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS))
863        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
864                SrcSubReg)
865        .addFrameIndex(FI)
866        .addImm(0).addImm(Pred).addReg(PredReg);
867    } else {          // move -> load
868      unsigned DstReg = MI->getOperand(0).getReg();
869      unsigned DstSubReg = MI->getOperand(0).getSubReg();
870      bool isDead = MI->getOperand(0).isDead();
871      bool isUndef = MI->getOperand(0).isUndef();
872      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS))
873        .addReg(DstReg,
874                RegState::Define |
875                getDeadRegState(isDead) |
876                getUndefRegState(isUndef),
877                DstSubReg)
878        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
879    }
880  }
881  else if (Opc == ARM::FCPYD) {
882    unsigned Pred = MI->getOperand(2).getImm();
883    unsigned PredReg = MI->getOperand(3).getReg();
884    if (OpNum == 0) { // move -> store
885      unsigned SrcReg = MI->getOperand(1).getReg();
886      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
887      bool isKill = MI->getOperand(1).isKill();
888      bool isUndef = MI->getOperand(1).isUndef();
889      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD))
890        .addReg(SrcReg,
891                getKillRegState(isKill) | getUndefRegState(isUndef),
892                SrcSubReg)
893        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
894    } else {          // move -> load
895      unsigned DstReg = MI->getOperand(0).getReg();
896      unsigned DstSubReg = MI->getOperand(0).getSubReg();
897      bool isDead = MI->getOperand(0).isDead();
898      bool isUndef = MI->getOperand(0).isUndef();
899      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD))
900        .addReg(DstReg,
901                RegState::Define |
902                getDeadRegState(isDead) |
903                getUndefRegState(isUndef),
904                DstSubReg)
905        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
906    }
907  }
908
909  return NewMI;
910}
911
912MachineInstr*
913ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
914                                        MachineInstr* MI,
915                                        const SmallVectorImpl<unsigned> &Ops,
916                                        MachineInstr* LoadMI) const {
917  // FIXME
918  return 0;
919}
920
921bool
922ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
923                                   const SmallVectorImpl<unsigned> &Ops) const {
924  if (Ops.size() != 1) return false;
925
926  unsigned Opc = MI->getOpcode();
927  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
928    // If it is updating CPSR, then it cannot be folded.
929    return MI->getOperand(4).getReg() != ARM::CPSR ||
930      MI->getOperand(4).isDead();
931  } else if (Opc == ARM::tMOVgpr2gpr ||
932             Opc == ARM::tMOVtgpr2gpr ||
933             Opc == ARM::tMOVgpr2tgpr) {
934    return true;
935  } else if (Opc == ARM::FCPYS || Opc == ARM::FCPYD) {
936    return true;
937  } else if (Opc == ARM::VMOVD || Opc == ARM::VMOVQ) {
938    return false; // FIXME
939  }
940
941  return false;
942}
943
944/// getInstrPredicate - If instruction is predicated, returns its predicate
945/// condition, otherwise returns AL. It also returns the condition code
946/// register by reference.
947ARMCC::CondCodes
948llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
949  int PIdx = MI->findFirstPredOperandIdx();
950  if (PIdx == -1) {
951    PredReg = 0;
952    return ARMCC::AL;
953  }
954
955  PredReg = MI->getOperand(PIdx+1).getReg();
956  return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
957}
958
959
960int llvm::getMatchingCondBranchOpcode(int Opc) {
961  if (Opc == ARM::B)
962    return ARM::Bcc;
963  else if (Opc == ARM::tB)
964    return ARM::tBcc;
965  else if (Opc == ARM::t2B)
966      return ARM::t2Bcc;
967
968  llvm_unreachable("Unknown unconditional branch opcode!");
969  return 0;
970}
971
972
973void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
974                               MachineBasicBlock::iterator &MBBI, DebugLoc dl,
975                               unsigned DestReg, unsigned BaseReg, int NumBytes,
976                               ARMCC::CondCodes Pred, unsigned PredReg,
977                               const ARMBaseInstrInfo &TII) {
978  bool isSub = NumBytes < 0;
979  if (isSub) NumBytes = -NumBytes;
980
981  while (NumBytes) {
982    unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
983    unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
984    assert(ThisVal && "Didn't extract field correctly");
985
986    // We will handle these bits from offset, clear them.
987    NumBytes &= ~ThisVal;
988
989    assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
990
991    // Build the new ADD / SUB.
992    unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
993    BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
994      .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
995      .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
996    BaseReg = DestReg;
997  }
998}
999
1000bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1001                                unsigned FrameReg, int &Offset,
1002                                const ARMBaseInstrInfo &TII) {
1003  unsigned Opcode = MI.getOpcode();
1004  const TargetInstrDesc &Desc = MI.getDesc();
1005  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1006  bool isSub = false;
1007
1008  // Memory operands in inline assembly always use AddrMode2.
1009  if (Opcode == ARM::INLINEASM)
1010    AddrMode = ARMII::AddrMode2;
1011
1012  if (Opcode == ARM::ADDri) {
1013    Offset += MI.getOperand(FrameRegIdx+1).getImm();
1014    if (Offset == 0) {
1015      // Turn it into a move.
1016      MI.setDesc(TII.get(ARM::MOVr));
1017      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1018      MI.RemoveOperand(FrameRegIdx+1);
1019      Offset = 0;
1020      return true;
1021    } else if (Offset < 0) {
1022      Offset = -Offset;
1023      isSub = true;
1024      MI.setDesc(TII.get(ARM::SUBri));
1025    }
1026
1027    // Common case: small offset, fits into instruction.
1028    if (ARM_AM::getSOImmVal(Offset) != -1) {
1029      // Replace the FrameIndex with sp / fp
1030      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1031      MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1032      Offset = 0;
1033      return true;
1034    }
1035
1036    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1037    // as possible.
1038    unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1039    unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1040
1041    // We will handle these bits from offset, clear them.
1042    Offset &= ~ThisImmVal;
1043
1044    // Get the properly encoded SOImmVal field.
1045    assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1046           "Bit extraction didn't work?");
1047    MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1048 } else {
1049    unsigned ImmIdx = 0;
1050    int InstrOffs = 0;
1051    unsigned NumBits = 0;
1052    unsigned Scale = 1;
1053    switch (AddrMode) {
1054    case ARMII::AddrMode2: {
1055      ImmIdx = FrameRegIdx+2;
1056      InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1057      if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1058        InstrOffs *= -1;
1059      NumBits = 12;
1060      break;
1061    }
1062    case ARMII::AddrMode3: {
1063      ImmIdx = FrameRegIdx+2;
1064      InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1065      if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1066        InstrOffs *= -1;
1067      NumBits = 8;
1068      break;
1069    }
1070    case ARMII::AddrMode4:
1071      // Can't fold any offset even if it's zero.
1072      return false;
1073    case ARMII::AddrMode5: {
1074      ImmIdx = FrameRegIdx+1;
1075      InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1076      if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1077        InstrOffs *= -1;
1078      NumBits = 8;
1079      Scale = 4;
1080      break;
1081    }
1082    default:
1083      llvm_unreachable("Unsupported addressing mode!");
1084      break;
1085    }
1086
1087    Offset += InstrOffs * Scale;
1088    assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1089    if (Offset < 0) {
1090      Offset = -Offset;
1091      isSub = true;
1092    }
1093
1094    // Attempt to fold address comp. if opcode has offset bits
1095    if (NumBits > 0) {
1096      // Common case: small offset, fits into instruction.
1097      MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1098      int ImmedOffset = Offset / Scale;
1099      unsigned Mask = (1 << NumBits) - 1;
1100      if ((unsigned)Offset <= Mask * Scale) {
1101        // Replace the FrameIndex with sp
1102        MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1103        if (isSub)
1104          ImmedOffset |= 1 << NumBits;
1105        ImmOp.ChangeToImmediate(ImmedOffset);
1106        Offset = 0;
1107        return true;
1108      }
1109
1110      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1111      ImmedOffset = ImmedOffset & Mask;
1112      if (isSub)
1113        ImmedOffset |= 1 << NumBits;
1114      ImmOp.ChangeToImmediate(ImmedOffset);
1115      Offset &= ~(Mask*Scale);
1116    }
1117  }
1118
1119  Offset = (isSub) ? -Offset : Offset;
1120  return Offset == 0;
1121}
1122