1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the base ARM implementation of TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARM.h"
15#include "ARMBaseInstrInfo.h"
16#include "ARMBaseRegisterInfo.h"
17#include "ARMFrameLowering.h"
18#include "ARMInstrInfo.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMSubtarget.h"
21#include "MCTargetDesc/ARMAddressingModes.h"
22#include "llvm/Constants.h"
23#include "llvm/DerivedTypes.h"
24#include "llvm/Function.h"
25#include "llvm/LLVMContext.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/RegisterScavenging.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetFrameLowering.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetOptions.h"
38#include "llvm/ADT/BitVector.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/Support/CommandLine.h"
41
42#define GET_REGINFO_TARGET_DESC
43#include "ARMGenRegisterInfo.inc"
44
45using namespace llvm;
46
47static cl::opt<bool>
48ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
49          cl::desc("Force use of virtual base registers for stack load/store"));
50static cl::opt<bool>
51EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
52          cl::desc("Enable pre-regalloc stack frame index allocation"));
53static cl::opt<bool>
54EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
55          cl::desc("Enable use of a base pointer for complex stack frames"));
56
57ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
58                                         const ARMSubtarget &sti)
59  : ARMGenRegisterInfo(ARM::LR), TII(tii), STI(sti),
60    FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
61    BasePtr(ARM::R6) {
62}
63
64const unsigned*
65ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
66  static const unsigned CalleeSavedRegs[] = {
67    ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
68    ARM::R7, ARM::R6,  ARM::R5,  ARM::R4,
69
70    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
71    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
72    0
73  };
74
75  static const unsigned DarwinCalleeSavedRegs[] = {
76    // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
77    // register.
78    ARM::LR,  ARM::R7,  ARM::R6, ARM::R5, ARM::R4,
79    ARM::R11, ARM::R10, ARM::R8,
80
81    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
82    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
83    0
84  };
85  return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
86}
87
88BitVector ARMBaseRegisterInfo::
89getReservedRegs(const MachineFunction &MF) const {
90  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
91
92  // FIXME: avoid re-calculating this every time.
93  BitVector Reserved(getNumRegs());
94  Reserved.set(ARM::SP);
95  Reserved.set(ARM::PC);
96  Reserved.set(ARM::FPSCR);
97  if (TFI->hasFP(MF))
98    Reserved.set(FramePtr);
99  if (hasBasePointer(MF))
100    Reserved.set(BasePtr);
101  // Some targets reserve R9.
102  if (STI.isR9Reserved())
103    Reserved.set(ARM::R9);
104  // Reserve D16-D31 if the subtarget doesn't support them.
105  if (!STI.hasVFP3() || STI.hasD16()) {
106    assert(ARM::D31 == ARM::D16 + 15);
107    for (unsigned i = 0; i != 16; ++i)
108      Reserved.set(ARM::D16 + i);
109  }
110  return Reserved;
111}
112
113bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
114                                        unsigned Reg) const {
115  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
116
117  switch (Reg) {
118  default: break;
119  case ARM::SP:
120  case ARM::PC:
121    return true;
122  case ARM::R6:
123    if (hasBasePointer(MF))
124      return true;
125    break;
126  case ARM::R7:
127  case ARM::R11:
128    if (FramePtr == Reg && TFI->hasFP(MF))
129      return true;
130    break;
131  case ARM::R9:
132    return STI.isR9Reserved();
133  }
134
135  return false;
136}
137
138const TargetRegisterClass *
139ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
140                                              const TargetRegisterClass *B,
141                                              unsigned SubIdx) const {
142  switch (SubIdx) {
143  default: return 0;
144  case ARM::ssub_0:
145  case ARM::ssub_1:
146  case ARM::ssub_2:
147  case ARM::ssub_3: {
148    // S sub-registers.
149    if (A->getSize() == 8) {
150      if (B == &ARM::SPR_8RegClass)
151        return &ARM::DPR_8RegClass;
152      assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
153      if (A == &ARM::DPR_8RegClass)
154        return A;
155      return &ARM::DPR_VFP2RegClass;
156    }
157
158    if (A->getSize() == 16) {
159      if (B == &ARM::SPR_8RegClass)
160        return &ARM::QPR_8RegClass;
161      return &ARM::QPR_VFP2RegClass;
162    }
163
164    if (A->getSize() == 32) {
165      if (B == &ARM::SPR_8RegClass)
166        return 0;  // Do not allow coalescing!
167      return &ARM::QQPR_VFP2RegClass;
168    }
169
170    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
171    return 0;  // Do not allow coalescing!
172  }
173  case ARM::dsub_0:
174  case ARM::dsub_1:
175  case ARM::dsub_2:
176  case ARM::dsub_3: {
177    // D sub-registers.
178    if (A->getSize() == 16) {
179      if (B == &ARM::DPR_VFP2RegClass)
180        return &ARM::QPR_VFP2RegClass;
181      if (B == &ARM::DPR_8RegClass)
182        return 0;  // Do not allow coalescing!
183      return A;
184    }
185
186    if (A->getSize() == 32) {
187      if (B == &ARM::DPR_VFP2RegClass)
188        return &ARM::QQPR_VFP2RegClass;
189      if (B == &ARM::DPR_8RegClass)
190        return 0;  // Do not allow coalescing!
191      return A;
192    }
193
194    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
195    if (B != &ARM::DPRRegClass)
196      return 0;  // Do not allow coalescing!
197    return A;
198  }
199  case ARM::dsub_4:
200  case ARM::dsub_5:
201  case ARM::dsub_6:
202  case ARM::dsub_7: {
203    // D sub-registers of QQQQ registers.
204    if (A->getSize() == 64 && B == &ARM::DPRRegClass)
205      return A;
206    return 0;  // Do not allow coalescing!
207  }
208
209  case ARM::qsub_0:
210  case ARM::qsub_1: {
211    // Q sub-registers.
212    if (A->getSize() == 32) {
213      if (B == &ARM::QPR_VFP2RegClass)
214        return &ARM::QQPR_VFP2RegClass;
215      if (B == &ARM::QPR_8RegClass)
216        return 0;  // Do not allow coalescing!
217      return A;
218    }
219
220    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
221    if (B == &ARM::QPRRegClass)
222      return A;
223    return 0;  // Do not allow coalescing!
224  }
225  case ARM::qsub_2:
226  case ARM::qsub_3: {
227    // Q sub-registers of QQQQ registers.
228    if (A->getSize() == 64 && B == &ARM::QPRRegClass)
229      return A;
230    return 0;  // Do not allow coalescing!
231  }
232  }
233  return 0;
234}
235
236bool
237ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
238                                          SmallVectorImpl<unsigned> &SubIndices,
239                                          unsigned &NewSubIdx) const {
240
241  unsigned Size = RC->getSize() * 8;
242  if (Size < 6)
243    return 0;
244
245  NewSubIdx = 0;  // Whole register.
246  unsigned NumRegs = SubIndices.size();
247  if (NumRegs == 8) {
248    // 8 D registers -> 1 QQQQ register.
249    return (Size == 512 &&
250            SubIndices[0] == ARM::dsub_0 &&
251            SubIndices[1] == ARM::dsub_1 &&
252            SubIndices[2] == ARM::dsub_2 &&
253            SubIndices[3] == ARM::dsub_3 &&
254            SubIndices[4] == ARM::dsub_4 &&
255            SubIndices[5] == ARM::dsub_5 &&
256            SubIndices[6] == ARM::dsub_6 &&
257            SubIndices[7] == ARM::dsub_7);
258  } else if (NumRegs == 4) {
259    if (SubIndices[0] == ARM::qsub_0) {
260      // 4 Q registers -> 1 QQQQ register.
261      return (Size == 512 &&
262              SubIndices[1] == ARM::qsub_1 &&
263              SubIndices[2] == ARM::qsub_2 &&
264              SubIndices[3] == ARM::qsub_3);
265    } else if (SubIndices[0] == ARM::dsub_0) {
266      // 4 D registers -> 1 QQ register.
267      if (Size >= 256 &&
268          SubIndices[1] == ARM::dsub_1 &&
269          SubIndices[2] == ARM::dsub_2 &&
270          SubIndices[3] == ARM::dsub_3) {
271        if (Size == 512)
272          NewSubIdx = ARM::qqsub_0;
273        return true;
274      }
275    } else if (SubIndices[0] == ARM::dsub_4) {
276      // 4 D registers -> 1 QQ register (2nd).
277      if (Size == 512 &&
278          SubIndices[1] == ARM::dsub_5 &&
279          SubIndices[2] == ARM::dsub_6 &&
280          SubIndices[3] == ARM::dsub_7) {
281        NewSubIdx = ARM::qqsub_1;
282        return true;
283      }
284    } else if (SubIndices[0] == ARM::ssub_0) {
285      // 4 S registers -> 1 Q register.
286      if (Size >= 128 &&
287          SubIndices[1] == ARM::ssub_1 &&
288          SubIndices[2] == ARM::ssub_2 &&
289          SubIndices[3] == ARM::ssub_3) {
290        if (Size >= 256)
291          NewSubIdx = ARM::qsub_0;
292        return true;
293      }
294    }
295  } else if (NumRegs == 2) {
296    if (SubIndices[0] == ARM::qsub_0) {
297      // 2 Q registers -> 1 QQ register.
298      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
299        if (Size == 512)
300          NewSubIdx = ARM::qqsub_0;
301        return true;
302      }
303    } else if (SubIndices[0] == ARM::qsub_2) {
304      // 2 Q registers -> 1 QQ register (2nd).
305      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
306        NewSubIdx = ARM::qqsub_1;
307        return true;
308      }
309    } else if (SubIndices[0] == ARM::dsub_0) {
310      // 2 D registers -> 1 Q register.
311      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
312        if (Size >= 256)
313          NewSubIdx = ARM::qsub_0;
314        return true;
315      }
316    } else if (SubIndices[0] == ARM::dsub_2) {
317      // 2 D registers -> 1 Q register (2nd).
318      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
319        NewSubIdx = ARM::qsub_1;
320        return true;
321      }
322    } else if (SubIndices[0] == ARM::dsub_4) {
323      // 2 D registers -> 1 Q register (3rd).
324      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
325        NewSubIdx = ARM::qsub_2;
326        return true;
327      }
328    } else if (SubIndices[0] == ARM::dsub_6) {
329      // 2 D registers -> 1 Q register (3rd).
330      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
331        NewSubIdx = ARM::qsub_3;
332        return true;
333      }
334    } else if (SubIndices[0] == ARM::ssub_0) {
335      // 2 S registers -> 1 D register.
336      if (SubIndices[1] == ARM::ssub_1) {
337        if (Size >= 128)
338          NewSubIdx = ARM::dsub_0;
339        return true;
340      }
341    } else if (SubIndices[0] == ARM::ssub_2) {
342      // 2 S registers -> 1 D register (2nd).
343      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
344        NewSubIdx = ARM::dsub_1;
345        return true;
346      }
347    }
348  }
349  return false;
350}
351
352const TargetRegisterClass*
353ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
354                                                                         const {
355  const TargetRegisterClass *Super = RC;
356  TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
357  do {
358    switch (Super->getID()) {
359    case ARM::GPRRegClassID:
360    case ARM::SPRRegClassID:
361    case ARM::DPRRegClassID:
362    case ARM::QPRRegClassID:
363    case ARM::QQPRRegClassID:
364    case ARM::QQQQPRRegClassID:
365      return Super;
366    }
367    Super = *I++;
368  } while (Super);
369  return RC;
370}
371
372const TargetRegisterClass *
373ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
374  return ARM::GPRRegisterClass;
375}
376
377const TargetRegisterClass *
378ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
379  if (RC == &ARM::CCRRegClass)
380    return 0;  // Can't copy CCR registers.
381  return RC;
382}
383
384unsigned
385ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
386                                         MachineFunction &MF) const {
387  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
388
389  switch (RC->getID()) {
390  default:
391    return 0;
392  case ARM::tGPRRegClassID:
393    return TFI->hasFP(MF) ? 4 : 5;
394  case ARM::GPRRegClassID: {
395    unsigned FP = TFI->hasFP(MF) ? 1 : 0;
396    return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
397  }
398  case ARM::SPRRegClassID:  // Currently not used as 'rep' register class.
399  case ARM::DPRRegClassID:
400    return 32 - 10;
401  }
402}
403
404/// getRawAllocationOrder - Returns the register allocation order for a
405/// specified register class with a target-dependent hint.
406ArrayRef<unsigned>
407ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
408                                           unsigned HintType, unsigned HintReg,
409                                           const MachineFunction &MF) const {
410  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
411  // Alternative register allocation orders when favoring even / odd registers
412  // of register pairs.
413
414  // No FP, R9 is available.
415  static const unsigned GPREven1[] = {
416    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
417    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
418    ARM::R9, ARM::R11
419  };
420  static const unsigned GPROdd1[] = {
421    ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
422    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
423    ARM::R8, ARM::R10
424  };
425
426  // FP is R7, R9 is available.
427  static const unsigned GPREven2[] = {
428    ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
429    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
430    ARM::R9, ARM::R11
431  };
432  static const unsigned GPROdd2[] = {
433    ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
434    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
435    ARM::R8, ARM::R10
436  };
437
438  // FP is R11, R9 is available.
439  static const unsigned GPREven3[] = {
440    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
441    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
442    ARM::R9
443  };
444  static const unsigned GPROdd3[] = {
445    ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
446    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
447    ARM::R8
448  };
449
450  // No FP, R9 is not available.
451  static const unsigned GPREven4[] = {
452    ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
453    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
454    ARM::R11
455  };
456  static const unsigned GPROdd4[] = {
457    ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
458    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
459    ARM::R10
460  };
461
462  // FP is R7, R9 is not available.
463  static const unsigned GPREven5[] = {
464    ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
465    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
466    ARM::R11
467  };
468  static const unsigned GPROdd5[] = {
469    ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
470    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
471    ARM::R10
472  };
473
474  // FP is R11, R9 is not available.
475  static const unsigned GPREven6[] = {
476    ARM::R0, ARM::R2, ARM::R4, ARM::R6,
477    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
478  };
479  static const unsigned GPROdd6[] = {
480    ARM::R1, ARM::R3, ARM::R5, ARM::R7,
481    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
482  };
483
484  // We only support even/odd hints for GPR and rGPR.
485  if (RC != ARM::GPRRegisterClass && RC != ARM::rGPRRegisterClass)
486    return RC->getRawAllocationOrder(MF);
487
488  if (HintType == ARMRI::RegPairEven) {
489    if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
490      // It's no longer possible to fulfill this hint. Return the default
491      // allocation order.
492      return RC->getRawAllocationOrder(MF);
493
494    if (!TFI->hasFP(MF)) {
495      if (!STI.isR9Reserved())
496        return makeArrayRef(GPREven1);
497      else
498        return makeArrayRef(GPREven4);
499    } else if (FramePtr == ARM::R7) {
500      if (!STI.isR9Reserved())
501        return makeArrayRef(GPREven2);
502      else
503        return makeArrayRef(GPREven5);
504    } else { // FramePtr == ARM::R11
505      if (!STI.isR9Reserved())
506        return makeArrayRef(GPREven3);
507      else
508        return makeArrayRef(GPREven6);
509    }
510  } else if (HintType == ARMRI::RegPairOdd) {
511    if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
512      // It's no longer possible to fulfill this hint. Return the default
513      // allocation order.
514      return RC->getRawAllocationOrder(MF);
515
516    if (!TFI->hasFP(MF)) {
517      if (!STI.isR9Reserved())
518        return makeArrayRef(GPROdd1);
519      else
520        return makeArrayRef(GPROdd4);
521    } else if (FramePtr == ARM::R7) {
522      if (!STI.isR9Reserved())
523        return makeArrayRef(GPROdd2);
524      else
525        return makeArrayRef(GPROdd5);
526    } else { // FramePtr == ARM::R11
527      if (!STI.isR9Reserved())
528        return makeArrayRef(GPROdd3);
529      else
530        return makeArrayRef(GPROdd6);
531    }
532  }
533  return RC->getRawAllocationOrder(MF);
534}
535
536/// ResolveRegAllocHint - Resolves the specified register allocation hint
537/// to a physical register. Returns the physical register if it is successful.
538unsigned
539ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
540                                         const MachineFunction &MF) const {
541  if (Reg == 0 || !isPhysicalRegister(Reg))
542    return 0;
543  if (Type == 0)
544    return Reg;
545  else if (Type == (unsigned)ARMRI::RegPairOdd)
546    // Odd register.
547    return getRegisterPairOdd(Reg, MF);
548  else if (Type == (unsigned)ARMRI::RegPairEven)
549    // Even register.
550    return getRegisterPairEven(Reg, MF);
551  return 0;
552}
553
554void
555ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
556                                        MachineFunction &MF) const {
557  MachineRegisterInfo *MRI = &MF.getRegInfo();
558  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
559  if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
560       Hint.first == (unsigned)ARMRI::RegPairEven) &&
561      TargetRegisterInfo::isVirtualRegister(Hint.second)) {
562    // If 'Reg' is one of the even / odd register pair and it's now changed
563    // (e.g. coalesced) into a different register. The other register of the
564    // pair allocation hint must be updated to reflect the relationship
565    // change.
566    unsigned OtherReg = Hint.second;
567    Hint = MRI->getRegAllocationHint(OtherReg);
568    if (Hint.second == Reg)
569      // Make sure the pair has not already divorced.
570      MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
571  }
572}
573
574bool
575ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
576  // CortexA9 has a Write-after-write hazard for NEON registers.
577  if (!STI.isCortexA9())
578    return false;
579
580  switch (RC->getID()) {
581  case ARM::DPRRegClassID:
582  case ARM::DPR_8RegClassID:
583  case ARM::DPR_VFP2RegClassID:
584  case ARM::QPRRegClassID:
585  case ARM::QPR_8RegClassID:
586  case ARM::QPR_VFP2RegClassID:
587  case ARM::SPRRegClassID:
588  case ARM::SPR_8RegClassID:
589    // Avoid reusing S, D, and Q registers.
590    // Don't increase register pressure for QQ and QQQQ.
591    return true;
592  default:
593    return false;
594  }
595}
596
597bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
598  const MachineFrameInfo *MFI = MF.getFrameInfo();
599  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
600
601  if (!EnableBasePointer)
602    return false;
603
604  if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
605    return true;
606
607  // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
608  // negative range for ldr/str (255), and thumb1 is positive offsets only.
609  // It's going to be better to use the SP or Base Pointer instead. When there
610  // are variable sized objects, we can't reference off of the SP, so we
611  // reserve a Base Pointer.
612  if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
613    // Conservatively estimate whether the negative offset from the frame
614    // pointer will be sufficient to reach. If a function has a smallish
615    // frame, it's less likely to have lots of spills and callee saved
616    // space, so it's all more likely to be within range of the frame pointer.
617    // If it's wrong, the scavenger will still enable access to work, it just
618    // won't be optimal.
619    if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
620      return false;
621    return true;
622  }
623
624  return false;
625}
626
627bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
628  const MachineFrameInfo *MFI = MF.getFrameInfo();
629  // We can't realign the stack if:
630  // 1. Dynamic stack realignment is explicitly disabled,
631  // 2. There are VLAs in the function and the base pointer is disabled.
632  return (RealignStack && (!MFI->hasVarSizedObjects() || EnableBasePointer));
633}
634
635bool ARMBaseRegisterInfo::
636needsStackRealignment(const MachineFunction &MF) const {
637  const MachineFrameInfo *MFI = MF.getFrameInfo();
638  const Function *F = MF.getFunction();
639  unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
640  bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
641                               F->hasFnAttr(Attribute::StackAlignment));
642
643  return requiresRealignment && canRealignStack(MF);
644}
645
646bool ARMBaseRegisterInfo::
647cannotEliminateFrame(const MachineFunction &MF) const {
648  const MachineFrameInfo *MFI = MF.getFrameInfo();
649  if (DisableFramePointerElim(MF) && MFI->adjustsStack())
650    return true;
651  return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
652    || needsStackRealignment(MF);
653}
654
655unsigned
656ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
657  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
658
659  if (TFI->hasFP(MF))
660    return FramePtr;
661  return ARM::SP;
662}
663
664unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
665  llvm_unreachable("What is the exception register");
666  return 0;
667}
668
669unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
670  llvm_unreachable("What is the exception handler register");
671  return 0;
672}
673
674unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
675                                              const MachineFunction &MF) const {
676  switch (Reg) {
677  default: break;
678  // Return 0 if either register of the pair is a special register.
679  // So no R12, etc.
680  case ARM::R1: return ARM::R0;
681  case ARM::R3: return ARM::R2;
682  case ARM::R5: return ARM::R4;
683  case ARM::R7:
684    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
685      ? 0 : ARM::R6;
686  case ARM::R9: return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
687  case ARM::R11: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
688
689  case ARM::S1: return ARM::S0;
690  case ARM::S3: return ARM::S2;
691  case ARM::S5: return ARM::S4;
692  case ARM::S7: return ARM::S6;
693  case ARM::S9: return ARM::S8;
694  case ARM::S11: return ARM::S10;
695  case ARM::S13: return ARM::S12;
696  case ARM::S15: return ARM::S14;
697  case ARM::S17: return ARM::S16;
698  case ARM::S19: return ARM::S18;
699  case ARM::S21: return ARM::S20;
700  case ARM::S23: return ARM::S22;
701  case ARM::S25: return ARM::S24;
702  case ARM::S27: return ARM::S26;
703  case ARM::S29: return ARM::S28;
704  case ARM::S31: return ARM::S30;
705
706  case ARM::D1: return ARM::D0;
707  case ARM::D3: return ARM::D2;
708  case ARM::D5: return ARM::D4;
709  case ARM::D7: return ARM::D6;
710  case ARM::D9: return ARM::D8;
711  case ARM::D11: return ARM::D10;
712  case ARM::D13: return ARM::D12;
713  case ARM::D15: return ARM::D14;
714  case ARM::D17: return ARM::D16;
715  case ARM::D19: return ARM::D18;
716  case ARM::D21: return ARM::D20;
717  case ARM::D23: return ARM::D22;
718  case ARM::D25: return ARM::D24;
719  case ARM::D27: return ARM::D26;
720  case ARM::D29: return ARM::D28;
721  case ARM::D31: return ARM::D30;
722  }
723
724  return 0;
725}
726
727unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
728                                             const MachineFunction &MF) const {
729  switch (Reg) {
730  default: break;
731  // Return 0 if either register of the pair is a special register.
732  // So no R12, etc.
733  case ARM::R0: return ARM::R1;
734  case ARM::R2: return ARM::R3;
735  case ARM::R4: return ARM::R5;
736  case ARM::R6:
737    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
738      ? 0 : ARM::R7;
739  case ARM::R8: return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
740  case ARM::R10: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
741
742  case ARM::S0: return ARM::S1;
743  case ARM::S2: return ARM::S3;
744  case ARM::S4: return ARM::S5;
745  case ARM::S6: return ARM::S7;
746  case ARM::S8: return ARM::S9;
747  case ARM::S10: return ARM::S11;
748  case ARM::S12: return ARM::S13;
749  case ARM::S14: return ARM::S15;
750  case ARM::S16: return ARM::S17;
751  case ARM::S18: return ARM::S19;
752  case ARM::S20: return ARM::S21;
753  case ARM::S22: return ARM::S23;
754  case ARM::S24: return ARM::S25;
755  case ARM::S26: return ARM::S27;
756  case ARM::S28: return ARM::S29;
757  case ARM::S30: return ARM::S31;
758
759  case ARM::D0: return ARM::D1;
760  case ARM::D2: return ARM::D3;
761  case ARM::D4: return ARM::D5;
762  case ARM::D6: return ARM::D7;
763  case ARM::D8: return ARM::D9;
764  case ARM::D10: return ARM::D11;
765  case ARM::D12: return ARM::D13;
766  case ARM::D14: return ARM::D15;
767  case ARM::D16: return ARM::D17;
768  case ARM::D18: return ARM::D19;
769  case ARM::D20: return ARM::D21;
770  case ARM::D22: return ARM::D23;
771  case ARM::D24: return ARM::D25;
772  case ARM::D26: return ARM::D27;
773  case ARM::D28: return ARM::D29;
774  case ARM::D30: return ARM::D31;
775  }
776
777  return 0;
778}
779
780/// emitLoadConstPool - Emits a load from constpool to materialize the
781/// specified immediate.
782void ARMBaseRegisterInfo::
783emitLoadConstPool(MachineBasicBlock &MBB,
784                  MachineBasicBlock::iterator &MBBI,
785                  DebugLoc dl,
786                  unsigned DestReg, unsigned SubIdx, int Val,
787                  ARMCC::CondCodes Pred,
788                  unsigned PredReg, unsigned MIFlags) const {
789  MachineFunction &MF = *MBB.getParent();
790  MachineConstantPool *ConstantPool = MF.getConstantPool();
791  const Constant *C =
792        ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
793  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
794
795  BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
796    .addReg(DestReg, getDefRegState(true), SubIdx)
797    .addConstantPoolIndex(Idx)
798    .addImm(0).addImm(Pred).addReg(PredReg)
799    .setMIFlags(MIFlags);
800}
801
802bool ARMBaseRegisterInfo::
803requiresRegisterScavenging(const MachineFunction &MF) const {
804  return true;
805}
806
807bool ARMBaseRegisterInfo::
808requiresFrameIndexScavenging(const MachineFunction &MF) const {
809  return true;
810}
811
812bool ARMBaseRegisterInfo::
813requiresVirtualBaseRegisters(const MachineFunction &MF) const {
814  return EnableLocalStackAlloc;
815}
816
817static void
818emitSPUpdate(bool isARM,
819             MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
820             DebugLoc dl, const ARMBaseInstrInfo &TII,
821             int NumBytes,
822             ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
823  if (isARM)
824    emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
825                            Pred, PredReg, TII);
826  else
827    emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
828                           Pred, PredReg, TII);
829}
830
831
832void ARMBaseRegisterInfo::
833eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
834                              MachineBasicBlock::iterator I) const {
835  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
836  if (!TFI->hasReservedCallFrame(MF)) {
837    // If we have alloca, convert as follows:
838    // ADJCALLSTACKDOWN -> sub, sp, sp, amount
839    // ADJCALLSTACKUP   -> add, sp, sp, amount
840    MachineInstr *Old = I;
841    DebugLoc dl = Old->getDebugLoc();
842    unsigned Amount = Old->getOperand(0).getImm();
843    if (Amount != 0) {
844      // We need to keep the stack aligned properly.  To do this, we round the
845      // amount of space needed for the outgoing arguments up to the next
846      // alignment boundary.
847      unsigned Align = TFI->getStackAlignment();
848      Amount = (Amount+Align-1)/Align*Align;
849
850      ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
851      assert(!AFI->isThumb1OnlyFunction() &&
852             "This eliminateCallFramePseudoInstr does not support Thumb1!");
853      bool isARM = !AFI->isThumbFunction();
854
855      // Replace the pseudo instruction with a new instruction...
856      unsigned Opc = Old->getOpcode();
857      int PIdx = Old->findFirstPredOperandIdx();
858      ARMCC::CondCodes Pred = (PIdx == -1)
859        ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
860      if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
861        // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
862        unsigned PredReg = Old->getOperand(2).getReg();
863        emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
864      } else {
865        // Note: PredReg is operand 3 for ADJCALLSTACKUP.
866        unsigned PredReg = Old->getOperand(3).getReg();
867        assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
868        emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
869      }
870    }
871  }
872  MBB.erase(I);
873}
874
875int64_t ARMBaseRegisterInfo::
876getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
877  const MCInstrDesc &Desc = MI->getDesc();
878  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
879  int64_t InstrOffs = 0;;
880  int Scale = 1;
881  unsigned ImmIdx = 0;
882  switch (AddrMode) {
883  case ARMII::AddrModeT2_i8:
884  case ARMII::AddrModeT2_i12:
885  case ARMII::AddrMode_i12:
886    InstrOffs = MI->getOperand(Idx+1).getImm();
887    Scale = 1;
888    break;
889  case ARMII::AddrMode5: {
890    // VFP address mode.
891    const MachineOperand &OffOp = MI->getOperand(Idx+1);
892    InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
893    if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
894      InstrOffs = -InstrOffs;
895    Scale = 4;
896    break;
897  }
898  case ARMII::AddrMode2: {
899    ImmIdx = Idx+2;
900    InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
901    if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
902      InstrOffs = -InstrOffs;
903    break;
904  }
905  case ARMII::AddrMode3: {
906    ImmIdx = Idx+2;
907    InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
908    if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
909      InstrOffs = -InstrOffs;
910    break;
911  }
912  case ARMII::AddrModeT1_s: {
913    ImmIdx = Idx+1;
914    InstrOffs = MI->getOperand(ImmIdx).getImm();
915    Scale = 4;
916    break;
917  }
918  default:
919    llvm_unreachable("Unsupported addressing mode!");
920    break;
921  }
922
923  return InstrOffs * Scale;
924}
925
926/// needsFrameBaseReg - Returns true if the instruction's frame index
927/// reference would be better served by a base register other than FP
928/// or SP. Used by LocalStackFrameAllocation to determine which frame index
929/// references it should create new base registers for.
930bool ARMBaseRegisterInfo::
931needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
932  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
933    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
934  }
935
936  // It's the load/store FI references that cause issues, as it can be difficult
937  // to materialize the offset if it won't fit in the literal field. Estimate
938  // based on the size of the local frame and some conservative assumptions
939  // about the rest of the stack frame (note, this is pre-regalloc, so
940  // we don't know everything for certain yet) whether this offset is likely
941  // to be out of range of the immediate. Return true if so.
942
943  // We only generate virtual base registers for loads and stores, so
944  // return false for everything else.
945  unsigned Opc = MI->getOpcode();
946  switch (Opc) {
947  case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
948  case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
949  case ARM::t2LDRi12: case ARM::t2LDRi8:
950  case ARM::t2STRi12: case ARM::t2STRi8:
951  case ARM::VLDRS: case ARM::VLDRD:
952  case ARM::VSTRS: case ARM::VSTRD:
953  case ARM::tSTRspi: case ARM::tLDRspi:
954    if (ForceAllBaseRegAlloc)
955      return true;
956    break;
957  default:
958    return false;
959  }
960
961  // Without a virtual base register, if the function has variable sized
962  // objects, all fixed-size local references will be via the frame pointer,
963  // Approximate the offset and see if it's legal for the instruction.
964  // Note that the incoming offset is based on the SP value at function entry,
965  // so it'll be negative.
966  MachineFunction &MF = *MI->getParent()->getParent();
967  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
968  MachineFrameInfo *MFI = MF.getFrameInfo();
969  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
970
971  // Estimate an offset from the frame pointer.
972  // Conservatively assume all callee-saved registers get pushed. R4-R6
973  // will be earlier than the FP, so we ignore those.
974  // R7, LR
975  int64_t FPOffset = Offset - 8;
976  // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
977  if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
978    FPOffset -= 80;
979  // Estimate an offset from the stack pointer.
980  // The incoming offset is relating to the SP at the start of the function,
981  // but when we access the local it'll be relative to the SP after local
982  // allocation, so adjust our SP-relative offset by that allocation size.
983  Offset = -Offset;
984  Offset += MFI->getLocalFrameSize();
985  // Assume that we'll have at least some spill slots allocated.
986  // FIXME: This is a total SWAG number. We should run some statistics
987  //        and pick a real one.
988  Offset += 128; // 128 bytes of spill slots
989
990  // If there is a frame pointer, try using it.
991  // The FP is only available if there is no dynamic realignment. We
992  // don't know for sure yet whether we'll need that, so we guess based
993  // on whether there are any local variables that would trigger it.
994  unsigned StackAlign = TFI->getStackAlignment();
995  if (TFI->hasFP(MF) &&
996      !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
997    if (isFrameOffsetLegal(MI, FPOffset))
998      return false;
999  }
1000  // If we can reference via the stack pointer, try that.
1001  // FIXME: This (and the code that resolves the references) can be improved
1002  //        to only disallow SP relative references in the live range of
1003  //        the VLA(s). In practice, it's unclear how much difference that
1004  //        would make, but it may be worth doing.
1005  if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1006    return false;
1007
1008  // The offset likely isn't legal, we want to allocate a virtual base register.
1009  return true;
1010}
1011
1012/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
1013/// be a pointer to FrameIdx at the beginning of the basic block.
1014void ARMBaseRegisterInfo::
1015materializeFrameBaseRegister(MachineBasicBlock *MBB,
1016                             unsigned BaseReg, int FrameIdx,
1017                             int64_t Offset) const {
1018  ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
1019  unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1020    (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1021
1022  MachineBasicBlock::iterator Ins = MBB->begin();
1023  DebugLoc DL;                  // Defaults to "unknown"
1024  if (Ins != MBB->end())
1025    DL = Ins->getDebugLoc();
1026
1027  const MCInstrDesc &MCID = TII.get(ADDriOpc);
1028  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1029  MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
1030
1031  MachineInstrBuilder MIB = AddDefaultPred(BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1032    .addFrameIndex(FrameIdx).addImm(Offset));
1033
1034  if (!AFI->isThumb1OnlyFunction())
1035    AddDefaultCC(MIB);
1036}
1037
1038void
1039ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1040                                       unsigned BaseReg, int64_t Offset) const {
1041  MachineInstr &MI = *I;
1042  MachineBasicBlock &MBB = *MI.getParent();
1043  MachineFunction &MF = *MBB.getParent();
1044  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1045  int Off = Offset; // ARM doesn't need the general 64-bit offsets
1046  unsigned i = 0;
1047
1048  assert(!AFI->isThumb1OnlyFunction() &&
1049         "This resolveFrameIndex does not support Thumb1!");
1050
1051  while (!MI.getOperand(i).isFI()) {
1052    ++i;
1053    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1054  }
1055  bool Done = false;
1056  if (!AFI->isThumbFunction())
1057    Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1058  else {
1059    assert(AFI->isThumb2Function());
1060    Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1061  }
1062  assert (Done && "Unable to resolve frame index!");
1063  (void)Done;
1064}
1065
1066bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1067                                             int64_t Offset) const {
1068  const MCInstrDesc &Desc = MI->getDesc();
1069  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1070  unsigned i = 0;
1071
1072  while (!MI->getOperand(i).isFI()) {
1073    ++i;
1074    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1075  }
1076
1077  // AddrMode4 and AddrMode6 cannot handle any offset.
1078  if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1079    return Offset == 0;
1080
1081  unsigned NumBits = 0;
1082  unsigned Scale = 1;
1083  bool isSigned = true;
1084  switch (AddrMode) {
1085  case ARMII::AddrModeT2_i8:
1086  case ARMII::AddrModeT2_i12:
1087    // i8 supports only negative, and i12 supports only positive, so
1088    // based on Offset sign, consider the appropriate instruction
1089    Scale = 1;
1090    if (Offset < 0) {
1091      NumBits = 8;
1092      Offset = -Offset;
1093    } else {
1094      NumBits = 12;
1095    }
1096    break;
1097  case ARMII::AddrMode5:
1098    // VFP address mode.
1099    NumBits = 8;
1100    Scale = 4;
1101    break;
1102  case ARMII::AddrMode_i12:
1103  case ARMII::AddrMode2:
1104    NumBits = 12;
1105    break;
1106  case ARMII::AddrMode3:
1107    NumBits = 8;
1108    break;
1109  case ARMII::AddrModeT1_s:
1110    NumBits = 5;
1111    Scale = 4;
1112    isSigned = false;
1113    break;
1114  default:
1115    llvm_unreachable("Unsupported addressing mode!");
1116    break;
1117  }
1118
1119  Offset += getFrameIndexInstrOffset(MI, i);
1120  // Make sure the offset is encodable for instructions that scale the
1121  // immediate.
1122  if ((Offset & (Scale-1)) != 0)
1123    return false;
1124
1125  if (isSigned && Offset < 0)
1126    Offset = -Offset;
1127
1128  unsigned Mask = (1 << NumBits) - 1;
1129  if ((unsigned)Offset <= Mask * Scale)
1130    return true;
1131
1132  return false;
1133}
1134
1135void
1136ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1137                                         int SPAdj, RegScavenger *RS) const {
1138  unsigned i = 0;
1139  MachineInstr &MI = *II;
1140  MachineBasicBlock &MBB = *MI.getParent();
1141  MachineFunction &MF = *MBB.getParent();
1142  const ARMFrameLowering *TFI =
1143    static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
1144  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1145  assert(!AFI->isThumb1OnlyFunction() &&
1146         "This eliminateFrameIndex does not support Thumb1!");
1147
1148  while (!MI.getOperand(i).isFI()) {
1149    ++i;
1150    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1151  }
1152
1153  int FrameIndex = MI.getOperand(i).getIndex();
1154  unsigned FrameReg;
1155
1156  int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1157
1158  // Special handling of dbg_value instructions.
1159  if (MI.isDebugValue()) {
1160    MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1161    MI.getOperand(i+1).ChangeToImmediate(Offset);
1162    return;
1163  }
1164
1165  // Modify MI as necessary to handle as much of 'Offset' as possible
1166  bool Done = false;
1167  if (!AFI->isThumbFunction())
1168    Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1169  else {
1170    assert(AFI->isThumb2Function());
1171    Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1172  }
1173  if (Done)
1174    return;
1175
1176  // If we get here, the immediate doesn't fit into the instruction.  We folded
1177  // as much as possible above, handle the rest, providing a register that is
1178  // SP+LargeImm.
1179  assert((Offset ||
1180          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1181          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1182         "This code isn't needed if offset already handled!");
1183
1184  unsigned ScratchReg = 0;
1185  int PIdx = MI.findFirstPredOperandIdx();
1186  ARMCC::CondCodes Pred = (PIdx == -1)
1187    ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1188  unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1189  if (Offset == 0)
1190    // Must be addrmode4/6.
1191    MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1192  else {
1193    ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1194    if (!AFI->isThumbFunction())
1195      emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1196                              Offset, Pred, PredReg, TII);
1197    else {
1198      assert(AFI->isThumb2Function());
1199      emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1200                             Offset, Pred, PredReg, TII);
1201    }
1202    // Update the original instruction to use the scratch register.
1203    MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1204  }
1205}
1206