ARMBaseRegisterInfo.cpp revision 397fc4874efe9c17e737d4c5c50bd19dc3bf27f5
1//===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the base ARM implementation of TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseRegisterInfo.h"
15#include "ARM.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMFrameLowering.h"
18#include "ARMMachineFunctionInfo.h"
19#include "ARMSubtarget.h"
20#include "MCTargetDesc/ARMAddressingModes.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/LLVMContext.h"
25#include "llvm/CodeGen/MachineConstantPool.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/RegisterScavenging.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Target/TargetFrameLowering.h"
35#include "llvm/Target/TargetMachine.h"
36#include "llvm/Target/TargetOptions.h"
37#include "llvm/ADT/BitVector.h"
38#include "llvm/ADT/SmallVector.h"
39#include "llvm/Support/CommandLine.h"
40
41#define GET_REGINFO_TARGET_DESC
42#include "ARMGenRegisterInfo.inc"
43
44using namespace llvm;
45
46static cl::opt<bool>
47ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
48          cl::desc("Force use of virtual base registers for stack load/store"));
49static cl::opt<bool>
50EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
51          cl::desc("Enable pre-regalloc stack frame index allocation"));
52static cl::opt<bool>
53EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
54          cl::desc("Enable use of a base pointer for complex stack frames"));
55
56ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
57                                         const ARMSubtarget &sti)
58  : ARMGenRegisterInfo(ARM::LR), TII(tii), STI(sti),
59    FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
60    BasePtr(ARM::R6) {
61}
62
63const uint16_t*
64ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
65  return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
66    ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
67}
68
69const uint32_t*
70ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
71  return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
72    ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
73}
74
75BitVector ARMBaseRegisterInfo::
76getReservedRegs(const MachineFunction &MF) const {
77  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
78
79  // FIXME: avoid re-calculating this every time.
80  BitVector Reserved(getNumRegs());
81  Reserved.set(ARM::SP);
82  Reserved.set(ARM::PC);
83  Reserved.set(ARM::FPSCR);
84  if (TFI->hasFP(MF))
85    Reserved.set(FramePtr);
86  if (hasBasePointer(MF))
87    Reserved.set(BasePtr);
88  // Some targets reserve R9.
89  if (STI.isR9Reserved())
90    Reserved.set(ARM::R9);
91  // Reserve D16-D31 if the subtarget doesn't support them.
92  if (!STI.hasVFP3() || STI.hasD16()) {
93    assert(ARM::D31 == ARM::D16 + 15);
94    for (unsigned i = 0; i != 16; ++i)
95      Reserved.set(ARM::D16 + i);
96  }
97  return Reserved;
98}
99
100bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
101                                        unsigned Reg) const {
102  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
103
104  switch (Reg) {
105  default: break;
106  case ARM::SP:
107  case ARM::PC:
108    return true;
109  case ARM::R6:
110    if (hasBasePointer(MF))
111      return true;
112    break;
113  case ARM::R7:
114  case ARM::R11:
115    if (FramePtr == Reg && TFI->hasFP(MF))
116      return true;
117    break;
118  case ARM::R9:
119    return STI.isR9Reserved();
120  }
121
122  return false;
123}
124
125bool
126ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
127                                          SmallVectorImpl<unsigned> &SubIndices,
128                                          unsigned &NewSubIdx) const {
129
130  unsigned Size = RC->getSize() * 8;
131  if (Size < 6)
132    return 0;
133
134  NewSubIdx = 0;  // Whole register.
135  unsigned NumRegs = SubIndices.size();
136  if (NumRegs == 8) {
137    // 8 D registers -> 1 QQQQ register.
138    return (Size == 512 &&
139            SubIndices[0] == ARM::dsub_0 &&
140            SubIndices[1] == ARM::dsub_1 &&
141            SubIndices[2] == ARM::dsub_2 &&
142            SubIndices[3] == ARM::dsub_3 &&
143            SubIndices[4] == ARM::dsub_4 &&
144            SubIndices[5] == ARM::dsub_5 &&
145            SubIndices[6] == ARM::dsub_6 &&
146            SubIndices[7] == ARM::dsub_7);
147  } else if (NumRegs == 4) {
148    if (SubIndices[0] == ARM::qsub_0) {
149      // 4 Q registers -> 1 QQQQ register.
150      return (Size == 512 &&
151              SubIndices[1] == ARM::qsub_1 &&
152              SubIndices[2] == ARM::qsub_2 &&
153              SubIndices[3] == ARM::qsub_3);
154    } else if (SubIndices[0] == ARM::dsub_0) {
155      // 4 D registers -> 1 QQ register.
156      if (Size >= 256 &&
157          SubIndices[1] == ARM::dsub_1 &&
158          SubIndices[2] == ARM::dsub_2 &&
159          SubIndices[3] == ARM::dsub_3) {
160        if (Size == 512)
161          NewSubIdx = ARM::qqsub_0;
162        return true;
163      }
164    } else if (SubIndices[0] == ARM::dsub_4) {
165      // 4 D registers -> 1 QQ register (2nd).
166      if (Size == 512 &&
167          SubIndices[1] == ARM::dsub_5 &&
168          SubIndices[2] == ARM::dsub_6 &&
169          SubIndices[3] == ARM::dsub_7) {
170        NewSubIdx = ARM::qqsub_1;
171        return true;
172      }
173    } else if (SubIndices[0] == ARM::ssub_0) {
174      // 4 S registers -> 1 Q register.
175      if (Size >= 128 &&
176          SubIndices[1] == ARM::ssub_1 &&
177          SubIndices[2] == ARM::ssub_2 &&
178          SubIndices[3] == ARM::ssub_3) {
179        if (Size >= 256)
180          NewSubIdx = ARM::qsub_0;
181        return true;
182      }
183    }
184  } else if (NumRegs == 2) {
185    if (SubIndices[0] == ARM::qsub_0) {
186      // 2 Q registers -> 1 QQ register.
187      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
188        if (Size == 512)
189          NewSubIdx = ARM::qqsub_0;
190        return true;
191      }
192    } else if (SubIndices[0] == ARM::qsub_2) {
193      // 2 Q registers -> 1 QQ register (2nd).
194      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
195        NewSubIdx = ARM::qqsub_1;
196        return true;
197      }
198    } else if (SubIndices[0] == ARM::dsub_0) {
199      // 2 D registers -> 1 Q register.
200      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
201        if (Size >= 256)
202          NewSubIdx = ARM::qsub_0;
203        return true;
204      }
205    } else if (SubIndices[0] == ARM::dsub_2) {
206      // 2 D registers -> 1 Q register (2nd).
207      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
208        NewSubIdx = ARM::qsub_1;
209        return true;
210      }
211    } else if (SubIndices[0] == ARM::dsub_4) {
212      // 2 D registers -> 1 Q register (3rd).
213      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
214        NewSubIdx = ARM::qsub_2;
215        return true;
216      }
217    } else if (SubIndices[0] == ARM::dsub_6) {
218      // 2 D registers -> 1 Q register (3rd).
219      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
220        NewSubIdx = ARM::qsub_3;
221        return true;
222      }
223    } else if (SubIndices[0] == ARM::ssub_0) {
224      // 2 S registers -> 1 D register.
225      if (SubIndices[1] == ARM::ssub_1) {
226        if (Size >= 128)
227          NewSubIdx = ARM::dsub_0;
228        return true;
229      }
230    } else if (SubIndices[0] == ARM::ssub_2) {
231      // 2 S registers -> 1 D register (2nd).
232      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
233        NewSubIdx = ARM::dsub_1;
234        return true;
235      }
236    }
237  }
238  return false;
239}
240
241const TargetRegisterClass*
242ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
243                                                                         const {
244  const TargetRegisterClass *Super = RC;
245  TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
246  do {
247    switch (Super->getID()) {
248    case ARM::GPRRegClassID:
249    case ARM::SPRRegClassID:
250    case ARM::DPRRegClassID:
251    case ARM::QPRRegClassID:
252    case ARM::QQPRRegClassID:
253    case ARM::QQQQPRRegClassID:
254      return Super;
255    }
256    Super = *I++;
257  } while (Super);
258  return RC;
259}
260
261const TargetRegisterClass *
262ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
263                                                                         const {
264  return &ARM::GPRRegClass;
265}
266
267const TargetRegisterClass *
268ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
269  if (RC == &ARM::CCRRegClass)
270    return 0;  // Can't copy CCR registers.
271  return RC;
272}
273
274unsigned
275ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
276                                         MachineFunction &MF) const {
277  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
278
279  switch (RC->getID()) {
280  default:
281    return 0;
282  case ARM::tGPRRegClassID:
283    return TFI->hasFP(MF) ? 4 : 5;
284  case ARM::GPRRegClassID: {
285    unsigned FP = TFI->hasFP(MF) ? 1 : 0;
286    return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
287  }
288  case ARM::SPRRegClassID:  // Currently not used as 'rep' register class.
289  case ARM::DPRRegClassID:
290    return 32 - 10;
291  }
292}
293
294/// getRawAllocationOrder - Returns the register allocation order for a
295/// specified register class with a target-dependent hint.
296ArrayRef<uint16_t>
297ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
298                                           unsigned HintType, unsigned HintReg,
299                                           const MachineFunction &MF) const {
300  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
301  // Alternative register allocation orders when favoring even / odd registers
302  // of register pairs.
303
304  // No FP, R9 is available.
305  static const uint16_t GPREven1[] = {
306    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
307    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
308    ARM::R9, ARM::R11
309  };
310  static const uint16_t GPROdd1[] = {
311    ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
312    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
313    ARM::R8, ARM::R10
314  };
315
316  // FP is R7, R9 is available.
317  static const uint16_t GPREven2[] = {
318    ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
319    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
320    ARM::R9, ARM::R11
321  };
322  static const uint16_t GPROdd2[] = {
323    ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
324    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
325    ARM::R8, ARM::R10
326  };
327
328  // FP is R11, R9 is available.
329  static const uint16_t GPREven3[] = {
330    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
331    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
332    ARM::R9
333  };
334  static const uint16_t GPROdd3[] = {
335    ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
336    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
337    ARM::R8
338  };
339
340  // No FP, R9 is not available.
341  static const uint16_t GPREven4[] = {
342    ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
343    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
344    ARM::R11
345  };
346  static const uint16_t GPROdd4[] = {
347    ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
348    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
349    ARM::R10
350  };
351
352  // FP is R7, R9 is not available.
353  static const uint16_t GPREven5[] = {
354    ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
355    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
356    ARM::R11
357  };
358  static const uint16_t GPROdd5[] = {
359    ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
360    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
361    ARM::R10
362  };
363
364  // FP is R11, R9 is not available.
365  static const uint16_t GPREven6[] = {
366    ARM::R0, ARM::R2, ARM::R4, ARM::R6,
367    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
368  };
369  static const uint16_t GPROdd6[] = {
370    ARM::R1, ARM::R3, ARM::R5, ARM::R7,
371    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
372  };
373
374  // We only support even/odd hints for GPR and rGPR.
375  if (RC != &ARM::GPRRegClass && RC != &ARM::rGPRRegClass)
376    return RC->getRawAllocationOrder(MF);
377
378  if (HintType == ARMRI::RegPairEven) {
379    if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
380      // It's no longer possible to fulfill this hint. Return the default
381      // allocation order.
382      return RC->getRawAllocationOrder(MF);
383
384    if (!TFI->hasFP(MF)) {
385      if (!STI.isR9Reserved())
386        return makeArrayRef(GPREven1);
387      else
388        return makeArrayRef(GPREven4);
389    } else if (FramePtr == ARM::R7) {
390      if (!STI.isR9Reserved())
391        return makeArrayRef(GPREven2);
392      else
393        return makeArrayRef(GPREven5);
394    } else { // FramePtr == ARM::R11
395      if (!STI.isR9Reserved())
396        return makeArrayRef(GPREven3);
397      else
398        return makeArrayRef(GPREven6);
399    }
400  } else if (HintType == ARMRI::RegPairOdd) {
401    if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
402      // It's no longer possible to fulfill this hint. Return the default
403      // allocation order.
404      return RC->getRawAllocationOrder(MF);
405
406    if (!TFI->hasFP(MF)) {
407      if (!STI.isR9Reserved())
408        return makeArrayRef(GPROdd1);
409      else
410        return makeArrayRef(GPROdd4);
411    } else if (FramePtr == ARM::R7) {
412      if (!STI.isR9Reserved())
413        return makeArrayRef(GPROdd2);
414      else
415        return makeArrayRef(GPROdd5);
416    } else { // FramePtr == ARM::R11
417      if (!STI.isR9Reserved())
418        return makeArrayRef(GPROdd3);
419      else
420        return makeArrayRef(GPROdd6);
421    }
422  }
423  return RC->getRawAllocationOrder(MF);
424}
425
426/// ResolveRegAllocHint - Resolves the specified register allocation hint
427/// to a physical register. Returns the physical register if it is successful.
428unsigned
429ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
430                                         const MachineFunction &MF) const {
431  if (Reg == 0 || !isPhysicalRegister(Reg))
432    return 0;
433  if (Type == 0)
434    return Reg;
435  else if (Type == (unsigned)ARMRI::RegPairOdd)
436    // Odd register.
437    return getRegisterPairOdd(Reg, MF);
438  else if (Type == (unsigned)ARMRI::RegPairEven)
439    // Even register.
440    return getRegisterPairEven(Reg, MF);
441  return 0;
442}
443
444void
445ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
446                                        MachineFunction &MF) const {
447  MachineRegisterInfo *MRI = &MF.getRegInfo();
448  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
449  if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
450       Hint.first == (unsigned)ARMRI::RegPairEven) &&
451      TargetRegisterInfo::isVirtualRegister(Hint.second)) {
452    // If 'Reg' is one of the even / odd register pair and it's now changed
453    // (e.g. coalesced) into a different register. The other register of the
454    // pair allocation hint must be updated to reflect the relationship
455    // change.
456    unsigned OtherReg = Hint.second;
457    Hint = MRI->getRegAllocationHint(OtherReg);
458    if (Hint.second == Reg)
459      // Make sure the pair has not already divorced.
460      MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
461  }
462}
463
464bool
465ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
466  // CortexA9 has a Write-after-write hazard for NEON registers.
467  if (!STI.isCortexA9())
468    return false;
469
470  switch (RC->getID()) {
471  case ARM::DPRRegClassID:
472  case ARM::DPR_8RegClassID:
473  case ARM::DPR_VFP2RegClassID:
474  case ARM::QPRRegClassID:
475  case ARM::QPR_8RegClassID:
476  case ARM::QPR_VFP2RegClassID:
477  case ARM::SPRRegClassID:
478  case ARM::SPR_8RegClassID:
479    // Avoid reusing S, D, and Q registers.
480    // Don't increase register pressure for QQ and QQQQ.
481    return true;
482  default:
483    return false;
484  }
485}
486
487bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
488  const MachineFrameInfo *MFI = MF.getFrameInfo();
489  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
490  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
491
492  if (!EnableBasePointer)
493    return false;
494
495  // When outgoing call frames are so large that we adjust the stack pointer
496  // around the call, we can no longer use the stack pointer to reach the
497  // emergency spill slot.
498  if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
499    return true;
500
501  // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
502  // negative range for ldr/str (255), and thumb1 is positive offsets only.
503  // It's going to be better to use the SP or Base Pointer instead. When there
504  // are variable sized objects, we can't reference off of the SP, so we
505  // reserve a Base Pointer.
506  if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
507    // Conservatively estimate whether the negative offset from the frame
508    // pointer will be sufficient to reach. If a function has a smallish
509    // frame, it's less likely to have lots of spills and callee saved
510    // space, so it's all more likely to be within range of the frame pointer.
511    // If it's wrong, the scavenger will still enable access to work, it just
512    // won't be optimal.
513    if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
514      return false;
515    return true;
516  }
517
518  return false;
519}
520
521bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
522  const MachineRegisterInfo *MRI = &MF.getRegInfo();
523  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
524  // We can't realign the stack if:
525  // 1. Dynamic stack realignment is explicitly disabled,
526  // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
527  // 3. There are VLAs in the function and the base pointer is disabled.
528  if (!MF.getTarget().Options.RealignStack)
529    return false;
530  if (AFI->isThumb1OnlyFunction())
531    return false;
532  // Stack realignment requires a frame pointer.  If we already started
533  // register allocation with frame pointer elimination, it is too late now.
534  if (!MRI->canReserveReg(FramePtr))
535    return false;
536  // We may also need a base pointer if there are dynamic allocas or stack
537  // pointer adjustments around calls.
538  if (MF.getTarget().getFrameLowering()->hasReservedCallFrame(MF))
539    return true;
540  if (!EnableBasePointer)
541    return false;
542  // A base pointer is required and allowed.  Check that it isn't too late to
543  // reserve it.
544  return MRI->canReserveReg(BasePtr);
545}
546
547bool ARMBaseRegisterInfo::
548needsStackRealignment(const MachineFunction &MF) const {
549  const MachineFrameInfo *MFI = MF.getFrameInfo();
550  const Function *F = MF.getFunction();
551  unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
552  bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
553                               F->hasFnAttr(Attribute::StackAlignment));
554
555  return requiresRealignment && canRealignStack(MF);
556}
557
558bool ARMBaseRegisterInfo::
559cannotEliminateFrame(const MachineFunction &MF) const {
560  const MachineFrameInfo *MFI = MF.getFrameInfo();
561  if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack())
562    return true;
563  return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
564    || needsStackRealignment(MF);
565}
566
567unsigned
568ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
569  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
570
571  if (TFI->hasFP(MF))
572    return FramePtr;
573  return ARM::SP;
574}
575
576unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
577  llvm_unreachable("What is the exception register");
578}
579
580unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
581  llvm_unreachable("What is the exception handler register");
582}
583
584unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
585                                              const MachineFunction &MF) const {
586  switch (Reg) {
587  default: break;
588  // Return 0 if either register of the pair is a special register.
589  // So no R12, etc.
590  case ARM::R1: return ARM::R0;
591  case ARM::R3: return ARM::R2;
592  case ARM::R5: return ARM::R4;
593  case ARM::R7:
594    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
595      ? 0 : ARM::R6;
596  case ARM::R9: return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
597  case ARM::R11: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
598
599  case ARM::S1: return ARM::S0;
600  case ARM::S3: return ARM::S2;
601  case ARM::S5: return ARM::S4;
602  case ARM::S7: return ARM::S6;
603  case ARM::S9: return ARM::S8;
604  case ARM::S11: return ARM::S10;
605  case ARM::S13: return ARM::S12;
606  case ARM::S15: return ARM::S14;
607  case ARM::S17: return ARM::S16;
608  case ARM::S19: return ARM::S18;
609  case ARM::S21: return ARM::S20;
610  case ARM::S23: return ARM::S22;
611  case ARM::S25: return ARM::S24;
612  case ARM::S27: return ARM::S26;
613  case ARM::S29: return ARM::S28;
614  case ARM::S31: return ARM::S30;
615
616  case ARM::D1: return ARM::D0;
617  case ARM::D3: return ARM::D2;
618  case ARM::D5: return ARM::D4;
619  case ARM::D7: return ARM::D6;
620  case ARM::D9: return ARM::D8;
621  case ARM::D11: return ARM::D10;
622  case ARM::D13: return ARM::D12;
623  case ARM::D15: return ARM::D14;
624  case ARM::D17: return ARM::D16;
625  case ARM::D19: return ARM::D18;
626  case ARM::D21: return ARM::D20;
627  case ARM::D23: return ARM::D22;
628  case ARM::D25: return ARM::D24;
629  case ARM::D27: return ARM::D26;
630  case ARM::D29: return ARM::D28;
631  case ARM::D31: return ARM::D30;
632  }
633
634  return 0;
635}
636
637unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
638                                             const MachineFunction &MF) const {
639  switch (Reg) {
640  default: break;
641  // Return 0 if either register of the pair is a special register.
642  // So no R12, etc.
643  case ARM::R0: return ARM::R1;
644  case ARM::R2: return ARM::R3;
645  case ARM::R4: return ARM::R5;
646  case ARM::R6:
647    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
648      ? 0 : ARM::R7;
649  case ARM::R8: return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
650  case ARM::R10: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
651
652  case ARM::S0: return ARM::S1;
653  case ARM::S2: return ARM::S3;
654  case ARM::S4: return ARM::S5;
655  case ARM::S6: return ARM::S7;
656  case ARM::S8: return ARM::S9;
657  case ARM::S10: return ARM::S11;
658  case ARM::S12: return ARM::S13;
659  case ARM::S14: return ARM::S15;
660  case ARM::S16: return ARM::S17;
661  case ARM::S18: return ARM::S19;
662  case ARM::S20: return ARM::S21;
663  case ARM::S22: return ARM::S23;
664  case ARM::S24: return ARM::S25;
665  case ARM::S26: return ARM::S27;
666  case ARM::S28: return ARM::S29;
667  case ARM::S30: return ARM::S31;
668
669  case ARM::D0: return ARM::D1;
670  case ARM::D2: return ARM::D3;
671  case ARM::D4: return ARM::D5;
672  case ARM::D6: return ARM::D7;
673  case ARM::D8: return ARM::D9;
674  case ARM::D10: return ARM::D11;
675  case ARM::D12: return ARM::D13;
676  case ARM::D14: return ARM::D15;
677  case ARM::D16: return ARM::D17;
678  case ARM::D18: return ARM::D19;
679  case ARM::D20: return ARM::D21;
680  case ARM::D22: return ARM::D23;
681  case ARM::D24: return ARM::D25;
682  case ARM::D26: return ARM::D27;
683  case ARM::D28: return ARM::D29;
684  case ARM::D30: return ARM::D31;
685  }
686
687  return 0;
688}
689
690/// emitLoadConstPool - Emits a load from constpool to materialize the
691/// specified immediate.
692void ARMBaseRegisterInfo::
693emitLoadConstPool(MachineBasicBlock &MBB,
694                  MachineBasicBlock::iterator &MBBI,
695                  DebugLoc dl,
696                  unsigned DestReg, unsigned SubIdx, int Val,
697                  ARMCC::CondCodes Pred,
698                  unsigned PredReg, unsigned MIFlags) const {
699  MachineFunction &MF = *MBB.getParent();
700  MachineConstantPool *ConstantPool = MF.getConstantPool();
701  const Constant *C =
702        ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
703  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
704
705  BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
706    .addReg(DestReg, getDefRegState(true), SubIdx)
707    .addConstantPoolIndex(Idx)
708    .addImm(0).addImm(Pred).addReg(PredReg)
709    .setMIFlags(MIFlags);
710}
711
712bool ARMBaseRegisterInfo::
713requiresRegisterScavenging(const MachineFunction &MF) const {
714  return true;
715}
716
717bool ARMBaseRegisterInfo::
718trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
719  return true;
720}
721
722bool ARMBaseRegisterInfo::
723requiresFrameIndexScavenging(const MachineFunction &MF) const {
724  return true;
725}
726
727bool ARMBaseRegisterInfo::
728requiresVirtualBaseRegisters(const MachineFunction &MF) const {
729  return EnableLocalStackAlloc;
730}
731
732static void
733emitSPUpdate(bool isARM,
734             MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
735             DebugLoc dl, const ARMBaseInstrInfo &TII,
736             int NumBytes,
737             ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
738  if (isARM)
739    emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
740                            Pred, PredReg, TII);
741  else
742    emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
743                           Pred, PredReg, TII);
744}
745
746
747void ARMBaseRegisterInfo::
748eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
749                              MachineBasicBlock::iterator I) const {
750  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
751  if (!TFI->hasReservedCallFrame(MF)) {
752    // If we have alloca, convert as follows:
753    // ADJCALLSTACKDOWN -> sub, sp, sp, amount
754    // ADJCALLSTACKUP   -> add, sp, sp, amount
755    MachineInstr *Old = I;
756    DebugLoc dl = Old->getDebugLoc();
757    unsigned Amount = Old->getOperand(0).getImm();
758    if (Amount != 0) {
759      // We need to keep the stack aligned properly.  To do this, we round the
760      // amount of space needed for the outgoing arguments up to the next
761      // alignment boundary.
762      unsigned Align = TFI->getStackAlignment();
763      Amount = (Amount+Align-1)/Align*Align;
764
765      ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
766      assert(!AFI->isThumb1OnlyFunction() &&
767             "This eliminateCallFramePseudoInstr does not support Thumb1!");
768      bool isARM = !AFI->isThumbFunction();
769
770      // Replace the pseudo instruction with a new instruction...
771      unsigned Opc = Old->getOpcode();
772      int PIdx = Old->findFirstPredOperandIdx();
773      ARMCC::CondCodes Pred = (PIdx == -1)
774        ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
775      if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
776        // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
777        unsigned PredReg = Old->getOperand(2).getReg();
778        emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
779      } else {
780        // Note: PredReg is operand 3 for ADJCALLSTACKUP.
781        unsigned PredReg = Old->getOperand(3).getReg();
782        assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
783        emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
784      }
785    }
786  }
787  MBB.erase(I);
788}
789
790int64_t ARMBaseRegisterInfo::
791getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
792  const MCInstrDesc &Desc = MI->getDesc();
793  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
794  int64_t InstrOffs = 0;
795  int Scale = 1;
796  unsigned ImmIdx = 0;
797  switch (AddrMode) {
798  case ARMII::AddrModeT2_i8:
799  case ARMII::AddrModeT2_i12:
800  case ARMII::AddrMode_i12:
801    InstrOffs = MI->getOperand(Idx+1).getImm();
802    Scale = 1;
803    break;
804  case ARMII::AddrMode5: {
805    // VFP address mode.
806    const MachineOperand &OffOp = MI->getOperand(Idx+1);
807    InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
808    if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
809      InstrOffs = -InstrOffs;
810    Scale = 4;
811    break;
812  }
813  case ARMII::AddrMode2: {
814    ImmIdx = Idx+2;
815    InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
816    if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
817      InstrOffs = -InstrOffs;
818    break;
819  }
820  case ARMII::AddrMode3: {
821    ImmIdx = Idx+2;
822    InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
823    if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
824      InstrOffs = -InstrOffs;
825    break;
826  }
827  case ARMII::AddrModeT1_s: {
828    ImmIdx = Idx+1;
829    InstrOffs = MI->getOperand(ImmIdx).getImm();
830    Scale = 4;
831    break;
832  }
833  default:
834    llvm_unreachable("Unsupported addressing mode!");
835  }
836
837  return InstrOffs * Scale;
838}
839
840/// needsFrameBaseReg - Returns true if the instruction's frame index
841/// reference would be better served by a base register other than FP
842/// or SP. Used by LocalStackFrameAllocation to determine which frame index
843/// references it should create new base registers for.
844bool ARMBaseRegisterInfo::
845needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
846  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
847    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
848  }
849
850  // It's the load/store FI references that cause issues, as it can be difficult
851  // to materialize the offset if it won't fit in the literal field. Estimate
852  // based on the size of the local frame and some conservative assumptions
853  // about the rest of the stack frame (note, this is pre-regalloc, so
854  // we don't know everything for certain yet) whether this offset is likely
855  // to be out of range of the immediate. Return true if so.
856
857  // We only generate virtual base registers for loads and stores, so
858  // return false for everything else.
859  unsigned Opc = MI->getOpcode();
860  switch (Opc) {
861  case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
862  case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
863  case ARM::t2LDRi12: case ARM::t2LDRi8:
864  case ARM::t2STRi12: case ARM::t2STRi8:
865  case ARM::VLDRS: case ARM::VLDRD:
866  case ARM::VSTRS: case ARM::VSTRD:
867  case ARM::tSTRspi: case ARM::tLDRspi:
868    if (ForceAllBaseRegAlloc)
869      return true;
870    break;
871  default:
872    return false;
873  }
874
875  // Without a virtual base register, if the function has variable sized
876  // objects, all fixed-size local references will be via the frame pointer,
877  // Approximate the offset and see if it's legal for the instruction.
878  // Note that the incoming offset is based on the SP value at function entry,
879  // so it'll be negative.
880  MachineFunction &MF = *MI->getParent()->getParent();
881  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
882  MachineFrameInfo *MFI = MF.getFrameInfo();
883  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
884
885  // Estimate an offset from the frame pointer.
886  // Conservatively assume all callee-saved registers get pushed. R4-R6
887  // will be earlier than the FP, so we ignore those.
888  // R7, LR
889  int64_t FPOffset = Offset - 8;
890  // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
891  if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
892    FPOffset -= 80;
893  // Estimate an offset from the stack pointer.
894  // The incoming offset is relating to the SP at the start of the function,
895  // but when we access the local it'll be relative to the SP after local
896  // allocation, so adjust our SP-relative offset by that allocation size.
897  Offset = -Offset;
898  Offset += MFI->getLocalFrameSize();
899  // Assume that we'll have at least some spill slots allocated.
900  // FIXME: This is a total SWAG number. We should run some statistics
901  //        and pick a real one.
902  Offset += 128; // 128 bytes of spill slots
903
904  // If there is a frame pointer, try using it.
905  // The FP is only available if there is no dynamic realignment. We
906  // don't know for sure yet whether we'll need that, so we guess based
907  // on whether there are any local variables that would trigger it.
908  unsigned StackAlign = TFI->getStackAlignment();
909  if (TFI->hasFP(MF) &&
910      !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
911    if (isFrameOffsetLegal(MI, FPOffset))
912      return false;
913  }
914  // If we can reference via the stack pointer, try that.
915  // FIXME: This (and the code that resolves the references) can be improved
916  //        to only disallow SP relative references in the live range of
917  //        the VLA(s). In practice, it's unclear how much difference that
918  //        would make, but it may be worth doing.
919  if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
920    return false;
921
922  // The offset likely isn't legal, we want to allocate a virtual base register.
923  return true;
924}
925
926/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
927/// be a pointer to FrameIdx at the beginning of the basic block.
928void ARMBaseRegisterInfo::
929materializeFrameBaseRegister(MachineBasicBlock *MBB,
930                             unsigned BaseReg, int FrameIdx,
931                             int64_t Offset) const {
932  ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
933  unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
934    (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
935
936  MachineBasicBlock::iterator Ins = MBB->begin();
937  DebugLoc DL;                  // Defaults to "unknown"
938  if (Ins != MBB->end())
939    DL = Ins->getDebugLoc();
940
941  const MCInstrDesc &MCID = TII.get(ADDriOpc);
942  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
943  const MachineFunction &MF = *MBB->getParent();
944  MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
945
946  MachineInstrBuilder MIB = AddDefaultPred(BuildMI(*MBB, Ins, DL, MCID, BaseReg)
947    .addFrameIndex(FrameIdx).addImm(Offset));
948
949  if (!AFI->isThumb1OnlyFunction())
950    AddDefaultCC(MIB);
951}
952
953void
954ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
955                                       unsigned BaseReg, int64_t Offset) const {
956  MachineInstr &MI = *I;
957  MachineBasicBlock &MBB = *MI.getParent();
958  MachineFunction &MF = *MBB.getParent();
959  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
960  int Off = Offset; // ARM doesn't need the general 64-bit offsets
961  unsigned i = 0;
962
963  assert(!AFI->isThumb1OnlyFunction() &&
964         "This resolveFrameIndex does not support Thumb1!");
965
966  while (!MI.getOperand(i).isFI()) {
967    ++i;
968    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
969  }
970  bool Done = false;
971  if (!AFI->isThumbFunction())
972    Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
973  else {
974    assert(AFI->isThumb2Function());
975    Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
976  }
977  assert (Done && "Unable to resolve frame index!");
978  (void)Done;
979}
980
981bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
982                                             int64_t Offset) const {
983  const MCInstrDesc &Desc = MI->getDesc();
984  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
985  unsigned i = 0;
986
987  while (!MI->getOperand(i).isFI()) {
988    ++i;
989    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
990  }
991
992  // AddrMode4 and AddrMode6 cannot handle any offset.
993  if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
994    return Offset == 0;
995
996  unsigned NumBits = 0;
997  unsigned Scale = 1;
998  bool isSigned = true;
999  switch (AddrMode) {
1000  case ARMII::AddrModeT2_i8:
1001  case ARMII::AddrModeT2_i12:
1002    // i8 supports only negative, and i12 supports only positive, so
1003    // based on Offset sign, consider the appropriate instruction
1004    Scale = 1;
1005    if (Offset < 0) {
1006      NumBits = 8;
1007      Offset = -Offset;
1008    } else {
1009      NumBits = 12;
1010    }
1011    break;
1012  case ARMII::AddrMode5:
1013    // VFP address mode.
1014    NumBits = 8;
1015    Scale = 4;
1016    break;
1017  case ARMII::AddrMode_i12:
1018  case ARMII::AddrMode2:
1019    NumBits = 12;
1020    break;
1021  case ARMII::AddrMode3:
1022    NumBits = 8;
1023    break;
1024  case ARMII::AddrModeT1_s:
1025    NumBits = 5;
1026    Scale = 4;
1027    isSigned = false;
1028    break;
1029  default:
1030    llvm_unreachable("Unsupported addressing mode!");
1031  }
1032
1033  Offset += getFrameIndexInstrOffset(MI, i);
1034  // Make sure the offset is encodable for instructions that scale the
1035  // immediate.
1036  if ((Offset & (Scale-1)) != 0)
1037    return false;
1038
1039  if (isSigned && Offset < 0)
1040    Offset = -Offset;
1041
1042  unsigned Mask = (1 << NumBits) - 1;
1043  if ((unsigned)Offset <= Mask * Scale)
1044    return true;
1045
1046  return false;
1047}
1048
1049void
1050ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1051                                         int SPAdj, RegScavenger *RS) const {
1052  unsigned i = 0;
1053  MachineInstr &MI = *II;
1054  MachineBasicBlock &MBB = *MI.getParent();
1055  MachineFunction &MF = *MBB.getParent();
1056  const ARMFrameLowering *TFI =
1057    static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
1058  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1059  assert(!AFI->isThumb1OnlyFunction() &&
1060         "This eliminateFrameIndex does not support Thumb1!");
1061
1062  while (!MI.getOperand(i).isFI()) {
1063    ++i;
1064    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1065  }
1066
1067  int FrameIndex = MI.getOperand(i).getIndex();
1068  unsigned FrameReg;
1069
1070  int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1071
1072  // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
1073  // call frame setup/destroy instructions have already been eliminated.  That
1074  // means the stack pointer cannot be used to access the emergency spill slot
1075  // when !hasReservedCallFrame().
1076#ifndef NDEBUG
1077  if (RS && FrameReg == ARM::SP && FrameIndex == RS->getScavengingFrameIndex()){
1078    assert(TFI->hasReservedCallFrame(MF) &&
1079           "Cannot use SP to access the emergency spill slot in "
1080           "functions without a reserved call frame");
1081    assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
1082           "Cannot use SP to access the emergency spill slot in "
1083           "functions with variable sized frame objects");
1084  }
1085#endif // NDEBUG
1086
1087  // Special handling of dbg_value instructions.
1088  if (MI.isDebugValue()) {
1089    MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1090    MI.getOperand(i+1).ChangeToImmediate(Offset);
1091    return;
1092  }
1093
1094  // Modify MI as necessary to handle as much of 'Offset' as possible
1095  bool Done = false;
1096  if (!AFI->isThumbFunction())
1097    Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1098  else {
1099    assert(AFI->isThumb2Function());
1100    Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1101  }
1102  if (Done)
1103    return;
1104
1105  // If we get here, the immediate doesn't fit into the instruction.  We folded
1106  // as much as possible above, handle the rest, providing a register that is
1107  // SP+LargeImm.
1108  assert((Offset ||
1109          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1110          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1111         "This code isn't needed if offset already handled!");
1112
1113  unsigned ScratchReg = 0;
1114  int PIdx = MI.findFirstPredOperandIdx();
1115  ARMCC::CondCodes Pred = (PIdx == -1)
1116    ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1117  unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1118  if (Offset == 0)
1119    // Must be addrmode4/6.
1120    MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1121  else {
1122    ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass);
1123    if (!AFI->isThumbFunction())
1124      emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1125                              Offset, Pred, PredReg, TII);
1126    else {
1127      assert(AFI->isThumb2Function());
1128      emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1129                             Offset, Pred, PredReg, TII);
1130    }
1131    // Update the original instruction to use the scratch register.
1132    MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1133  }
1134}
1135