1//===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Thumb-1 implementation of the TargetRegisterInfo
11// class.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ThumbRegisterInfo.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMMachineFunctionInfo.h"
18#include "ARMSubtarget.h"
19#include "MCTargetDesc/ARMAddressingModes.h"
20#include "llvm/CodeGen/MachineConstantPool.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/RegisterScavenging.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DerivedTypes.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/LLVMContext.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Target/TargetFrameLowering.h"
33#include "llvm/Target/TargetMachine.h"
34
35namespace llvm {
36extern cl::opt<bool> ReuseFrameIndexVals;
37}
38
39using namespace llvm;
40
41ThumbRegisterInfo::ThumbRegisterInfo() : ARMBaseRegisterInfo() {}
42
43const TargetRegisterClass *
44ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
45                                              const MachineFunction &MF) const {
46  if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
47    return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
48
49  if (ARM::tGPRRegClass.hasSubClassEq(RC))
50    return &ARM::tGPRRegClass;
51  return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
52}
53
54const TargetRegisterClass *
55ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
56                                      unsigned Kind) const {
57  if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
58    return ARMBaseRegisterInfo::getPointerRegClass(MF, Kind);
59  return &ARM::tGPRRegClass;
60}
61
62static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
63                                    MachineBasicBlock::iterator &MBBI,
64                                    DebugLoc dl, unsigned DestReg,
65                                    unsigned SubIdx, int Val,
66                                    ARMCC::CondCodes Pred, unsigned PredReg,
67                                    unsigned MIFlags) {
68  MachineFunction &MF = *MBB.getParent();
69  const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
70  const TargetInstrInfo &TII = *STI.getInstrInfo();
71  MachineConstantPool *ConstantPool = MF.getConstantPool();
72  const Constant *C = ConstantInt::get(
73          Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
74  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
75
76  BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
77    .addReg(DestReg, getDefRegState(true), SubIdx)
78    .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
79    .setMIFlags(MIFlags);
80}
81
82static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
83                                    MachineBasicBlock::iterator &MBBI,
84                                    DebugLoc dl, unsigned DestReg,
85                                    unsigned SubIdx, int Val,
86                                    ARMCC::CondCodes Pred, unsigned PredReg,
87                                    unsigned MIFlags) {
88  MachineFunction &MF = *MBB.getParent();
89  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
90  MachineConstantPool *ConstantPool = MF.getConstantPool();
91  const Constant *C = ConstantInt::get(
92           Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
93  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
94
95  BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
96    .addReg(DestReg, getDefRegState(true), SubIdx)
97    .addConstantPoolIndex(Idx).addImm((int64_t)ARMCC::AL).addReg(0)
98    .setMIFlags(MIFlags);
99}
100
101/// emitLoadConstPool - Emits a load from constpool to materialize the
102/// specified immediate.
103void ThumbRegisterInfo::emitLoadConstPool(
104    MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, DebugLoc dl,
105    unsigned DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred,
106    unsigned PredReg, unsigned MIFlags) const {
107  MachineFunction &MF = *MBB.getParent();
108  const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
109  if (STI.isThumb1Only()) {
110    assert((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
111           "Thumb1 does not have ldr to high register");
112    return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
113                                   PredReg, MIFlags);
114  }
115  return emitThumb2LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
116                                 PredReg, MIFlags);
117}
118
119/// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
120/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
121/// in a register using mov / mvn sequences or load the immediate from a
122/// constpool entry.
123static
124void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
125                              MachineBasicBlock::iterator &MBBI,
126                              DebugLoc dl,
127                              unsigned DestReg, unsigned BaseReg,
128                              int NumBytes, bool CanChangeCC,
129                              const TargetInstrInfo &TII,
130                              const ARMBaseRegisterInfo& MRI,
131                              unsigned MIFlags = MachineInstr::NoFlags) {
132    MachineFunction &MF = *MBB.getParent();
133    bool isHigh = !isARMLowRegister(DestReg) ||
134                  (BaseReg != 0 && !isARMLowRegister(BaseReg));
135    bool isSub = false;
136    // Subtract doesn't have high register version. Load the negative value
137    // if either base or dest register is a high register. Also, if do not
138    // issue sub as part of the sequence if condition register is to be
139    // preserved.
140    if (NumBytes < 0 && !isHigh && CanChangeCC) {
141      isSub = true;
142      NumBytes = -NumBytes;
143    }
144    unsigned LdReg = DestReg;
145    if (DestReg == ARM::SP)
146      assert(BaseReg == ARM::SP && "Unexpected!");
147    if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
148      LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
149
150    if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
151      AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
152        .addImm(NumBytes).setMIFlags(MIFlags);
153    } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
154      AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
155        .addImm(NumBytes).setMIFlags(MIFlags);
156      AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
157        .addReg(LdReg, RegState::Kill).setMIFlags(MIFlags);
158    } else
159      MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes,
160                            ARMCC::AL, 0, MIFlags);
161
162    // Emit add / sub.
163    int Opc = (isSub) ? ARM::tSUBrr : ((isHigh || !CanChangeCC) ? ARM::tADDhirr
164                                                                : ARM::tADDrr);
165    MachineInstrBuilder MIB =
166      BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
167    if (Opc != ARM::tADDhirr)
168      MIB = AddDefaultT1CC(MIB);
169    if (DestReg == ARM::SP || isSub)
170      MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
171    else
172      MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
173    AddDefaultPred(MIB);
174}
175
176/// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
177/// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
178/// SUBs first, and uses a constant pool value if the instruction sequence would
179/// be too long. This is allowed to modify the condition flags.
180void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
181                                     MachineBasicBlock::iterator &MBBI,
182                                     DebugLoc dl,
183                                     unsigned DestReg, unsigned BaseReg,
184                                     int NumBytes, const TargetInstrInfo &TII,
185                                     const ARMBaseRegisterInfo& MRI,
186                                     unsigned MIFlags) {
187  bool isSub = NumBytes < 0;
188  unsigned Bytes = (unsigned)NumBytes;
189  if (isSub) Bytes = -NumBytes;
190
191  int CopyOpc = 0;
192  unsigned CopyBits = 0;
193  unsigned CopyScale = 1;
194  bool CopyNeedsCC = false;
195  int ExtraOpc = 0;
196  unsigned ExtraBits = 0;
197  unsigned ExtraScale = 1;
198  bool ExtraNeedsCC = false;
199
200  // Strategy:
201  // We need to select two types of instruction, maximizing the available
202  // immediate range of each. The instructions we use will depend on whether
203  // DestReg and BaseReg are low, high or the stack pointer.
204  // * CopyOpc  - DestReg = BaseReg + imm
205  //              This will be emitted once if DestReg != BaseReg, and never if
206  //              DestReg == BaseReg.
207  // * ExtraOpc - DestReg = DestReg + imm
208  //              This will be emitted as many times as necessary to add the
209  //              full immediate.
210  // If the immediate ranges of these instructions are not large enough to cover
211  // NumBytes with a reasonable number of instructions, we fall back to using a
212  // value loaded from a constant pool.
213  if (DestReg == ARM::SP) {
214    if (BaseReg == ARM::SP) {
215      // sp -> sp
216      // Already in right reg, no copy needed
217    } else {
218      // low -> sp or high -> sp
219      CopyOpc = ARM::tMOVr;
220      CopyBits = 0;
221    }
222    ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
223    ExtraBits = 7;
224    ExtraScale = 4;
225  } else if (isARMLowRegister(DestReg)) {
226    if (BaseReg == ARM::SP) {
227      // sp -> low
228      assert(!isSub && "Thumb1 does not have tSUBrSPi");
229      CopyOpc = ARM::tADDrSPi;
230      CopyBits = 8;
231      CopyScale = 4;
232    } else if (DestReg == BaseReg) {
233      // low -> same low
234      // Already in right reg, no copy needed
235    } else if (isARMLowRegister(BaseReg)) {
236      // low -> different low
237      CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
238      CopyBits = 3;
239      CopyNeedsCC = true;
240    } else {
241      // high -> low
242      CopyOpc = ARM::tMOVr;
243      CopyBits = 0;
244    }
245    ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
246    ExtraBits = 8;
247    ExtraNeedsCC = true;
248  } else /* DestReg is high */ {
249    if (DestReg == BaseReg) {
250      // high -> same high
251      // Already in right reg, no copy needed
252    } else {
253      // {low,high,sp} -> high
254      CopyOpc = ARM::tMOVr;
255      CopyBits = 0;
256    }
257    ExtraOpc = 0;
258  }
259
260  // We could handle an unaligned immediate with an unaligned copy instruction
261  // and an aligned extra instruction, but this case is not currently needed.
262  assert(((Bytes & 3) == 0 || ExtraScale == 1) &&
263         "Unaligned offset, but all instructions require alignment");
264
265  unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
266  // If we would emit the copy with an immediate of 0, just use tMOVr.
267  if (CopyOpc && Bytes < CopyScale) {
268    CopyOpc = ARM::tMOVr;
269    CopyScale = 1;
270    CopyNeedsCC = false;
271    CopyRange = 0;
272  }
273  unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
274  unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
275  unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
276
277  // We could handle this case when the copy instruction does not require an
278  // aligned immediate, but we do not currently do this.
279  assert(RangeAfterCopy % ExtraScale == 0 &&
280         "Extra instruction requires immediate to be aligned");
281
282  unsigned RequiredExtraInstrs;
283  if (ExtraRange)
284    RequiredExtraInstrs = RoundUpToAlignment(RangeAfterCopy, ExtraRange) / ExtraRange;
285  else if (RangeAfterCopy > 0)
286    // We need an extra instruction but none is available
287    RequiredExtraInstrs = 1000000;
288  else
289    RequiredExtraInstrs = 0;
290  unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
291  unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
292
293  // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
294  if (RequiredInstrs > Threshold) {
295    emitThumbRegPlusImmInReg(MBB, MBBI, dl,
296                             DestReg, BaseReg, NumBytes, true,
297                             TII, MRI, MIFlags);
298    return;
299  }
300
301  // Emit zero or one copy instructions
302  if (CopyOpc) {
303    unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
304    Bytes -= CopyImm * CopyScale;
305
306    MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
307    if (CopyNeedsCC)
308      MIB = AddDefaultT1CC(MIB);
309    MIB.addReg(BaseReg, RegState::Kill);
310    if (CopyOpc != ARM::tMOVr) {
311      MIB.addImm(CopyImm);
312    }
313    AddDefaultPred(MIB.setMIFlags(MIFlags));
314
315    BaseReg = DestReg;
316  }
317
318  // Emit zero or more in-place add/sub instructions
319  while (Bytes) {
320    unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
321    Bytes -= ExtraImm * ExtraScale;
322
323    MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
324    if (ExtraNeedsCC)
325      MIB = AddDefaultT1CC(MIB);
326    MIB.addReg(BaseReg).addImm(ExtraImm);
327    MIB = AddDefaultPred(MIB);
328    MIB.setMIFlags(MIFlags);
329  }
330}
331
332static void removeOperands(MachineInstr &MI, unsigned i) {
333  unsigned Op = i;
334  for (unsigned e = MI.getNumOperands(); i != e; ++i)
335    MI.RemoveOperand(Op);
336}
337
338/// convertToNonSPOpcode - Change the opcode to the non-SP version, because
339/// we're replacing the frame index with a non-SP register.
340static unsigned convertToNonSPOpcode(unsigned Opcode) {
341  switch (Opcode) {
342  case ARM::tLDRspi:
343    return ARM::tLDRi;
344
345  case ARM::tSTRspi:
346    return ARM::tSTRi;
347  }
348
349  return Opcode;
350}
351
352bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II,
353                                          unsigned FrameRegIdx,
354                                          unsigned FrameReg, int &Offset,
355                                          const ARMBaseInstrInfo &TII) const {
356  MachineInstr &MI = *II;
357  MachineBasicBlock &MBB = *MI.getParent();
358  assert(MBB.getParent()->getSubtarget<ARMSubtarget>().isThumb1Only() &&
359         "This isn't needed for thumb2!");
360  DebugLoc dl = MI.getDebugLoc();
361  MachineInstrBuilder MIB(*MBB.getParent(), &MI);
362  unsigned Opcode = MI.getOpcode();
363  const MCInstrDesc &Desc = MI.getDesc();
364  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
365
366  if (Opcode == ARM::tADDframe) {
367    Offset += MI.getOperand(FrameRegIdx+1).getImm();
368    unsigned DestReg = MI.getOperand(0).getReg();
369
370    emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
371                              *this);
372    MBB.erase(II);
373    return true;
374  } else {
375    if (AddrMode != ARMII::AddrModeT1_s)
376      llvm_unreachable("Unsupported addressing mode!");
377
378    unsigned ImmIdx = FrameRegIdx + 1;
379    int InstrOffs = MI.getOperand(ImmIdx).getImm();
380    unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
381    unsigned Scale = 4;
382
383    Offset += InstrOffs * Scale;
384    assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
385
386    // Common case: small offset, fits into instruction.
387    MachineOperand &ImmOp = MI.getOperand(ImmIdx);
388    int ImmedOffset = Offset / Scale;
389    unsigned Mask = (1 << NumBits) - 1;
390
391    if ((unsigned)Offset <= Mask * Scale) {
392      // Replace the FrameIndex with the frame register (e.g., sp).
393      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
394      ImmOp.ChangeToImmediate(ImmedOffset);
395
396      // If we're using a register where sp was stored, convert the instruction
397      // to the non-SP version.
398      unsigned NewOpc = convertToNonSPOpcode(Opcode);
399      if (NewOpc != Opcode && FrameReg != ARM::SP)
400        MI.setDesc(TII.get(NewOpc));
401
402      return true;
403    }
404
405    NumBits = 5;
406    Mask = (1 << NumBits) - 1;
407
408    // If this is a thumb spill / restore, we will be using a constpool load to
409    // materialize the offset.
410    if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
411      ImmOp.ChangeToImmediate(0);
412    } else {
413      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
414      ImmedOffset = ImmedOffset & Mask;
415      ImmOp.ChangeToImmediate(ImmedOffset);
416      Offset &= ~(Mask * Scale);
417    }
418  }
419
420  return Offset == 0;
421}
422
423void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
424                                           int64_t Offset) const {
425  const MachineFunction &MF = *MI.getParent()->getParent();
426  const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
427  if (!STI.isThumb1Only())
428    return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);
429
430  const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
431  int Off = Offset; // ARM doesn't need the general 64-bit offsets
432  unsigned i = 0;
433
434  while (!MI.getOperand(i).isFI()) {
435    ++i;
436    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
437  }
438  bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
439  assert (Done && "Unable to resolve frame index!");
440  (void)Done;
441}
442
443/// saveScavengerRegister - Spill the register so it can be used by the
444/// register scavenger. Return true.
445bool ThumbRegisterInfo::saveScavengerRegister(
446    MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
447    MachineBasicBlock::iterator &UseMI, const TargetRegisterClass *RC,
448    unsigned Reg) const {
449
450  const ARMSubtarget &STI = MBB.getParent()->getSubtarget<ARMSubtarget>();
451  if (!STI.isThumb1Only())
452    return ARMBaseRegisterInfo::saveScavengerRegister(MBB, I, UseMI, RC, Reg);
453
454  // Thumb1 can't use the emergency spill slot on the stack because
455  // ldr/str immediate offsets must be positive, and if we're referencing
456  // off the frame pointer (if, for example, there are alloca() calls in
457  // the function, the offset will be negative. Use R12 instead since that's
458  // a call clobbered register that we know won't be used in Thumb1 mode.
459  const TargetInstrInfo &TII = *STI.getInstrInfo();
460  DebugLoc DL;
461  AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr))
462    .addReg(ARM::R12, RegState::Define)
463    .addReg(Reg, RegState::Kill));
464
465  // The UseMI is where we would like to restore the register. If there's
466  // interference with R12 before then, however, we'll need to restore it
467  // before that instead and adjust the UseMI.
468  bool done = false;
469  for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
470    if (II->isDebugValue())
471      continue;
472    // If this instruction affects R12, adjust our restore point.
473    for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
474      const MachineOperand &MO = II->getOperand(i);
475      if (MO.isRegMask() && MO.clobbersPhysReg(ARM::R12)) {
476        UseMI = II;
477        done = true;
478        break;
479      }
480      if (!MO.isReg() || MO.isUndef() || !MO.getReg() ||
481          TargetRegisterInfo::isVirtualRegister(MO.getReg()))
482        continue;
483      if (MO.getReg() == ARM::R12) {
484        UseMI = II;
485        done = true;
486        break;
487      }
488    }
489  }
490  // Restore the register from R12
491  AddDefaultPred(BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr)).
492    addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill));
493
494  return true;
495}
496
497void ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
498                                            int SPAdj, unsigned FIOperandNum,
499                                            RegScavenger *RS) const {
500  MachineInstr &MI = *II;
501  MachineBasicBlock &MBB = *MI.getParent();
502  MachineFunction &MF = *MBB.getParent();
503  const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
504  if (!STI.isThumb1Only())
505    return ARMBaseRegisterInfo::eliminateFrameIndex(II, SPAdj, FIOperandNum,
506                                                    RS);
507
508  unsigned VReg = 0;
509  const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
510  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
511  DebugLoc dl = MI.getDebugLoc();
512  MachineInstrBuilder MIB(*MBB.getParent(), &MI);
513
514  unsigned FrameReg = ARM::SP;
515  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
516  int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
517               MF.getFrameInfo()->getStackSize() + SPAdj;
518
519  if (MF.getFrameInfo()->hasVarSizedObjects()) {
520    assert(SPAdj == 0 && STI.getFrameLowering()->hasFP(MF) && "Unexpected");
521    // There are alloca()'s in this function, must reference off the frame
522    // pointer or base pointer instead.
523    if (!hasBasePointer(MF)) {
524      FrameReg = getFrameRegister(MF);
525      Offset -= AFI->getFramePtrSpillOffset();
526    } else
527      FrameReg = BasePtr;
528  }
529
530  // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
531  // call frame setup/destroy instructions have already been eliminated.  That
532  // means the stack pointer cannot be used to access the emergency spill slot
533  // when !hasReservedCallFrame().
534#ifndef NDEBUG
535  if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
536    assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
537           "Cannot use SP to access the emergency spill slot in "
538           "functions without a reserved call frame");
539    assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
540           "Cannot use SP to access the emergency spill slot in "
541           "functions with variable sized frame objects");
542  }
543#endif // NDEBUG
544
545  // Special handling of dbg_value instructions.
546  if (MI.isDebugValue()) {
547    MI.getOperand(FIOperandNum).  ChangeToRegister(FrameReg, false /*isDef*/);
548    MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
549    return;
550  }
551
552  // Modify MI as necessary to handle as much of 'Offset' as possible
553  assert(AFI->isThumbFunction() &&
554         "This eliminateFrameIndex only supports Thumb1!");
555  if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
556    return;
557
558  // If we get here, the immediate doesn't fit into the instruction.  We folded
559  // as much as possible above, handle the rest, providing a register that is
560  // SP+LargeImm.
561  assert(Offset && "This code isn't needed if offset already handled!");
562
563  unsigned Opcode = MI.getOpcode();
564
565  // Remove predicate first.
566  int PIdx = MI.findFirstPredOperandIdx();
567  if (PIdx != -1)
568    removeOperands(MI, PIdx);
569
570  if (MI.mayLoad()) {
571    // Use the destination register to materialize sp + offset.
572    unsigned TmpReg = MI.getOperand(0).getReg();
573    bool UseRR = false;
574    if (Opcode == ARM::tLDRspi) {
575      if (FrameReg == ARM::SP)
576        emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
577                                 Offset, false, TII, *this);
578      else {
579        emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
580        UseRR = true;
581      }
582    } else {
583      emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
584                                *this);
585    }
586
587    MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
588    MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
589    if (UseRR)
590      // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
591      // register. The offset is already handled in the vreg value.
592      MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
593                                                     false);
594  } else if (MI.mayStore()) {
595      VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
596      bool UseRR = false;
597
598      if (Opcode == ARM::tSTRspi) {
599        if (FrameReg == ARM::SP)
600          emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
601                                   Offset, false, TII, *this);
602        else {
603          emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
604          UseRR = true;
605        }
606      } else
607        emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
608                                  *this);
609      MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
610      MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
611      if (UseRR)
612        // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
613        // register. The offset is already handled in the vreg value.
614        MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
615                                                       false);
616  } else {
617    llvm_unreachable("Unexpected opcode!");
618  }
619
620  // Add predicate back if it's needed.
621  if (MI.isPredicable())
622    AddDefaultPred(MIB);
623}
624