ARMBaseRegisterInfo.cpp revision 16c29b5f285f375be53dabaa73e3e91107485fe4
1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the base ARM implementation of TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARM.h"
15#include "ARMAddressingModes.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMBaseRegisterInfo.h"
18#include "ARMFrameLowering.h"
19#include "ARMInstrInfo.h"
20#include "ARMMachineFunctionInfo.h"
21#include "ARMSubtarget.h"
22#include "llvm/Constants.h"
23#include "llvm/DerivedTypes.h"
24#include "llvm/Function.h"
25#include "llvm/LLVMContext.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineLocation.h"
31#include "llvm/CodeGen/MachineRegisterInfo.h"
32#include "llvm/CodeGen/RegisterScavenging.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/raw_ostream.h"
36#include "llvm/Target/TargetFrameLowering.h"
37#include "llvm/Target/TargetMachine.h"
38#include "llvm/Target/TargetOptions.h"
39#include "llvm/ADT/BitVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Support/CommandLine.h"
42
43using namespace llvm;
44
45static cl::opt<bool>
46ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
47          cl::desc("Force use of virtual base registers for stack load/store"));
48static cl::opt<bool>
49EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
50          cl::desc("Enable pre-regalloc stack frame index allocation"));
51static cl::opt<bool>
52EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
53          cl::desc("Enable use of a base pointer for complex stack frames"));
54
55ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
56                                         const ARMSubtarget &sti)
57  : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
58    TII(tii), STI(sti),
59    FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
60    BasePtr(ARM::R6) {
61}
62
63const unsigned*
64ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
65  static const unsigned CalleeSavedRegs[] = {
66    ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
67    ARM::R7, ARM::R6,  ARM::R5,  ARM::R4,
68
69    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
70    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
71    0
72  };
73
74  static const unsigned DarwinCalleeSavedRegs[] = {
75    // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
76    // register.
77    ARM::LR,  ARM::R7,  ARM::R6, ARM::R5, ARM::R4,
78    ARM::R11, ARM::R10, ARM::R8,
79
80    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
81    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
82    0
83  };
84  return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
85}
86
87BitVector ARMBaseRegisterInfo::
88getReservedRegs(const MachineFunction &MF) const {
89  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
90
91  // FIXME: avoid re-calculating this everytime.
92  BitVector Reserved(getNumRegs());
93  Reserved.set(ARM::SP);
94  Reserved.set(ARM::PC);
95  Reserved.set(ARM::FPSCR);
96  if (TFI->hasFP(MF))
97    Reserved.set(FramePtr);
98  if (hasBasePointer(MF))
99    Reserved.set(BasePtr);
100  // Some targets reserve R9.
101  if (STI.isR9Reserved())
102    Reserved.set(ARM::R9);
103  return Reserved;
104}
105
106bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
107                                        unsigned Reg) const {
108  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
109
110  switch (Reg) {
111  default: break;
112  case ARM::SP:
113  case ARM::PC:
114    return true;
115  case ARM::R6:
116    if (hasBasePointer(MF))
117      return true;
118    break;
119  case ARM::R7:
120  case ARM::R11:
121    if (FramePtr == Reg && TFI->hasFP(MF))
122      return true;
123    break;
124  case ARM::R9:
125    return STI.isR9Reserved();
126  }
127
128  return false;
129}
130
131const TargetRegisterClass *
132ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
133                                              const TargetRegisterClass *B,
134                                              unsigned SubIdx) const {
135  switch (SubIdx) {
136  default: return 0;
137  case ARM::ssub_0:
138  case ARM::ssub_1:
139  case ARM::ssub_2:
140  case ARM::ssub_3: {
141    // S sub-registers.
142    if (A->getSize() == 8) {
143      if (B == &ARM::SPR_8RegClass)
144        return &ARM::DPR_8RegClass;
145      assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
146      if (A == &ARM::DPR_8RegClass)
147        return A;
148      return &ARM::DPR_VFP2RegClass;
149    }
150
151    if (A->getSize() == 16) {
152      if (B == &ARM::SPR_8RegClass)
153        return &ARM::QPR_8RegClass;
154      return &ARM::QPR_VFP2RegClass;
155    }
156
157    if (A->getSize() == 32) {
158      if (B == &ARM::SPR_8RegClass)
159        return 0;  // Do not allow coalescing!
160      return &ARM::QQPR_VFP2RegClass;
161    }
162
163    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
164    return 0;  // Do not allow coalescing!
165  }
166  case ARM::dsub_0:
167  case ARM::dsub_1:
168  case ARM::dsub_2:
169  case ARM::dsub_3: {
170    // D sub-registers.
171    if (A->getSize() == 16) {
172      if (B == &ARM::DPR_VFP2RegClass)
173        return &ARM::QPR_VFP2RegClass;
174      if (B == &ARM::DPR_8RegClass)
175        return 0;  // Do not allow coalescing!
176      return A;
177    }
178
179    if (A->getSize() == 32) {
180      if (B == &ARM::DPR_VFP2RegClass)
181        return &ARM::QQPR_VFP2RegClass;
182      if (B == &ARM::DPR_8RegClass)
183        return 0;  // Do not allow coalescing!
184      return A;
185    }
186
187    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
188    if (B != &ARM::DPRRegClass)
189      return 0;  // Do not allow coalescing!
190    return A;
191  }
192  case ARM::dsub_4:
193  case ARM::dsub_5:
194  case ARM::dsub_6:
195  case ARM::dsub_7: {
196    // D sub-registers of QQQQ registers.
197    if (A->getSize() == 64 && B == &ARM::DPRRegClass)
198      return A;
199    return 0;  // Do not allow coalescing!
200  }
201
202  case ARM::qsub_0:
203  case ARM::qsub_1: {
204    // Q sub-registers.
205    if (A->getSize() == 32) {
206      if (B == &ARM::QPR_VFP2RegClass)
207        return &ARM::QQPR_VFP2RegClass;
208      if (B == &ARM::QPR_8RegClass)
209        return 0;  // Do not allow coalescing!
210      return A;
211    }
212
213    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
214    if (B == &ARM::QPRRegClass)
215      return A;
216    return 0;  // Do not allow coalescing!
217  }
218  case ARM::qsub_2:
219  case ARM::qsub_3: {
220    // Q sub-registers of QQQQ registers.
221    if (A->getSize() == 64 && B == &ARM::QPRRegClass)
222      return A;
223    return 0;  // Do not allow coalescing!
224  }
225  }
226  return 0;
227}
228
229bool
230ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
231                                          SmallVectorImpl<unsigned> &SubIndices,
232                                          unsigned &NewSubIdx) const {
233
234  unsigned Size = RC->getSize() * 8;
235  if (Size < 6)
236    return 0;
237
238  NewSubIdx = 0;  // Whole register.
239  unsigned NumRegs = SubIndices.size();
240  if (NumRegs == 8) {
241    // 8 D registers -> 1 QQQQ register.
242    return (Size == 512 &&
243            SubIndices[0] == ARM::dsub_0 &&
244            SubIndices[1] == ARM::dsub_1 &&
245            SubIndices[2] == ARM::dsub_2 &&
246            SubIndices[3] == ARM::dsub_3 &&
247            SubIndices[4] == ARM::dsub_4 &&
248            SubIndices[5] == ARM::dsub_5 &&
249            SubIndices[6] == ARM::dsub_6 &&
250            SubIndices[7] == ARM::dsub_7);
251  } else if (NumRegs == 4) {
252    if (SubIndices[0] == ARM::qsub_0) {
253      // 4 Q registers -> 1 QQQQ register.
254      return (Size == 512 &&
255              SubIndices[1] == ARM::qsub_1 &&
256              SubIndices[2] == ARM::qsub_2 &&
257              SubIndices[3] == ARM::qsub_3);
258    } else if (SubIndices[0] == ARM::dsub_0) {
259      // 4 D registers -> 1 QQ register.
260      if (Size >= 256 &&
261          SubIndices[1] == ARM::dsub_1 &&
262          SubIndices[2] == ARM::dsub_2 &&
263          SubIndices[3] == ARM::dsub_3) {
264        if (Size == 512)
265          NewSubIdx = ARM::qqsub_0;
266        return true;
267      }
268    } else if (SubIndices[0] == ARM::dsub_4) {
269      // 4 D registers -> 1 QQ register (2nd).
270      if (Size == 512 &&
271          SubIndices[1] == ARM::dsub_5 &&
272          SubIndices[2] == ARM::dsub_6 &&
273          SubIndices[3] == ARM::dsub_7) {
274        NewSubIdx = ARM::qqsub_1;
275        return true;
276      }
277    } else if (SubIndices[0] == ARM::ssub_0) {
278      // 4 S registers -> 1 Q register.
279      if (Size >= 128 &&
280          SubIndices[1] == ARM::ssub_1 &&
281          SubIndices[2] == ARM::ssub_2 &&
282          SubIndices[3] == ARM::ssub_3) {
283        if (Size >= 256)
284          NewSubIdx = ARM::qsub_0;
285        return true;
286      }
287    }
288  } else if (NumRegs == 2) {
289    if (SubIndices[0] == ARM::qsub_0) {
290      // 2 Q registers -> 1 QQ register.
291      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
292        if (Size == 512)
293          NewSubIdx = ARM::qqsub_0;
294        return true;
295      }
296    } else if (SubIndices[0] == ARM::qsub_2) {
297      // 2 Q registers -> 1 QQ register (2nd).
298      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
299        NewSubIdx = ARM::qqsub_1;
300        return true;
301      }
302    } else if (SubIndices[0] == ARM::dsub_0) {
303      // 2 D registers -> 1 Q register.
304      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
305        if (Size >= 256)
306          NewSubIdx = ARM::qsub_0;
307        return true;
308      }
309    } else if (SubIndices[0] == ARM::dsub_2) {
310      // 2 D registers -> 1 Q register (2nd).
311      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
312        NewSubIdx = ARM::qsub_1;
313        return true;
314      }
315    } else if (SubIndices[0] == ARM::dsub_4) {
316      // 2 D registers -> 1 Q register (3rd).
317      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
318        NewSubIdx = ARM::qsub_2;
319        return true;
320      }
321    } else if (SubIndices[0] == ARM::dsub_6) {
322      // 2 D registers -> 1 Q register (3rd).
323      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
324        NewSubIdx = ARM::qsub_3;
325        return true;
326      }
327    } else if (SubIndices[0] == ARM::ssub_0) {
328      // 2 S registers -> 1 D register.
329      if (SubIndices[1] == ARM::ssub_1) {
330        if (Size >= 128)
331          NewSubIdx = ARM::dsub_0;
332        return true;
333      }
334    } else if (SubIndices[0] == ARM::ssub_2) {
335      // 2 S registers -> 1 D register (2nd).
336      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
337        NewSubIdx = ARM::dsub_1;
338        return true;
339      }
340    }
341  }
342  return false;
343}
344
345
346const TargetRegisterClass *
347ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
348  return ARM::GPRRegisterClass;
349}
350
351/// getAllocationOrder - Returns the register allocation order for a specified
352/// register class in the form of a pair of TargetRegisterClass iterators.
353std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
354ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
355                                        unsigned HintType, unsigned HintReg,
356                                        const MachineFunction &MF) const {
357  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
358  // Alternative register allocation orders when favoring even / odd registers
359  // of register pairs.
360
361  // No FP, R9 is available.
362  static const unsigned GPREven1[] = {
363    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
364    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
365    ARM::R9, ARM::R11
366  };
367  static const unsigned GPROdd1[] = {
368    ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
369    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
370    ARM::R8, ARM::R10
371  };
372
373  // FP is R7, R9 is available.
374  static const unsigned GPREven2[] = {
375    ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
376    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
377    ARM::R9, ARM::R11
378  };
379  static const unsigned GPROdd2[] = {
380    ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
381    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
382    ARM::R8, ARM::R10
383  };
384
385  // FP is R11, R9 is available.
386  static const unsigned GPREven3[] = {
387    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
388    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
389    ARM::R9
390  };
391  static const unsigned GPROdd3[] = {
392    ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
393    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
394    ARM::R8
395  };
396
397  // No FP, R9 is not available.
398  static const unsigned GPREven4[] = {
399    ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
400    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
401    ARM::R11
402  };
403  static const unsigned GPROdd4[] = {
404    ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
405    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
406    ARM::R10
407  };
408
409  // FP is R7, R9 is not available.
410  static const unsigned GPREven5[] = {
411    ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
412    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
413    ARM::R11
414  };
415  static const unsigned GPROdd5[] = {
416    ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
417    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
418    ARM::R10
419  };
420
421  // FP is R11, R9 is not available.
422  static const unsigned GPREven6[] = {
423    ARM::R0, ARM::R2, ARM::R4, ARM::R6,
424    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
425  };
426  static const unsigned GPROdd6[] = {
427    ARM::R1, ARM::R3, ARM::R5, ARM::R7,
428    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
429  };
430
431
432  if (HintType == ARMRI::RegPairEven) {
433    if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
434      // It's no longer possible to fulfill this hint. Return the default
435      // allocation order.
436      return std::make_pair(RC->allocation_order_begin(MF),
437                            RC->allocation_order_end(MF));
438
439    if (!TFI->hasFP(MF)) {
440      if (!STI.isR9Reserved())
441        return std::make_pair(GPREven1,
442                              GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
443      else
444        return std::make_pair(GPREven4,
445                              GPREven4 + (sizeof(GPREven4)/sizeof(unsigned)));
446    } else if (FramePtr == ARM::R7) {
447      if (!STI.isR9Reserved())
448        return std::make_pair(GPREven2,
449                              GPREven2 + (sizeof(GPREven2)/sizeof(unsigned)));
450      else
451        return std::make_pair(GPREven5,
452                              GPREven5 + (sizeof(GPREven5)/sizeof(unsigned)));
453    } else { // FramePtr == ARM::R11
454      if (!STI.isR9Reserved())
455        return std::make_pair(GPREven3,
456                              GPREven3 + (sizeof(GPREven3)/sizeof(unsigned)));
457      else
458        return std::make_pair(GPREven6,
459                              GPREven6 + (sizeof(GPREven6)/sizeof(unsigned)));
460    }
461  } else if (HintType == ARMRI::RegPairOdd) {
462    if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
463      // It's no longer possible to fulfill this hint. Return the default
464      // allocation order.
465      return std::make_pair(RC->allocation_order_begin(MF),
466                            RC->allocation_order_end(MF));
467
468    if (!TFI->hasFP(MF)) {
469      if (!STI.isR9Reserved())
470        return std::make_pair(GPROdd1,
471                              GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
472      else
473        return std::make_pair(GPROdd4,
474                              GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned)));
475    } else if (FramePtr == ARM::R7) {
476      if (!STI.isR9Reserved())
477        return std::make_pair(GPROdd2,
478                              GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned)));
479      else
480        return std::make_pair(GPROdd5,
481                              GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned)));
482    } else { // FramePtr == ARM::R11
483      if (!STI.isR9Reserved())
484        return std::make_pair(GPROdd3,
485                              GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned)));
486      else
487        return std::make_pair(GPROdd6,
488                              GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned)));
489    }
490  }
491  return std::make_pair(RC->allocation_order_begin(MF),
492                        RC->allocation_order_end(MF));
493}
494
495/// ResolveRegAllocHint - Resolves the specified register allocation hint
496/// to a physical register. Returns the physical register if it is successful.
497unsigned
498ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
499                                         const MachineFunction &MF) const {
500  if (Reg == 0 || !isPhysicalRegister(Reg))
501    return 0;
502  if (Type == 0)
503    return Reg;
504  else if (Type == (unsigned)ARMRI::RegPairOdd)
505    // Odd register.
506    return getRegisterPairOdd(Reg, MF);
507  else if (Type == (unsigned)ARMRI::RegPairEven)
508    // Even register.
509    return getRegisterPairEven(Reg, MF);
510  return 0;
511}
512
513void
514ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
515                                        MachineFunction &MF) const {
516  MachineRegisterInfo *MRI = &MF.getRegInfo();
517  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
518  if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
519       Hint.first == (unsigned)ARMRI::RegPairEven) &&
520      TargetRegisterInfo::isVirtualRegister(Hint.second)) {
521    // If 'Reg' is one of the even / odd register pair and it's now changed
522    // (e.g. coalesced) into a different register. The other register of the
523    // pair allocation hint must be updated to reflect the relationship
524    // change.
525    unsigned OtherReg = Hint.second;
526    Hint = MRI->getRegAllocationHint(OtherReg);
527    if (Hint.second == Reg)
528      // Make sure the pair has not already divorced.
529      MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
530  }
531}
532
533bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
534  const MachineFrameInfo *MFI = MF.getFrameInfo();
535  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
536
537  if (!EnableBasePointer)
538    return false;
539
540  if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
541    return true;
542
543  // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
544  // negative range for ldr/str (255), and thumb1 is positive offsets only.
545  // It's going to be better to use the SP or Base Pointer instead. When there
546  // are variable sized objects, we can't reference off of the SP, so we
547  // reserve a Base Pointer.
548  if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
549    // Conservatively estimate whether the negative offset from the frame
550    // pointer will be sufficient to reach. If a function has a smallish
551    // frame, it's less likely to have lots of spills and callee saved
552    // space, so it's all more likely to be within range of the frame pointer.
553    // If it's wrong, the scavenger will still enable access to work, it just
554    // won't be optimal.
555    if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
556      return false;
557    return true;
558  }
559
560  return false;
561}
562
563bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
564  const MachineFrameInfo *MFI = MF.getFrameInfo();
565  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
566  // We can't realign the stack if:
567  // 1. Dynamic stack realignment is explicitly disabled,
568  // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
569  // 3. There are VLAs in the function and the base pointer is disabled.
570  return (RealignStack && !AFI->isThumb1OnlyFunction() &&
571          (!MFI->hasVarSizedObjects() || EnableBasePointer));
572}
573
574bool ARMBaseRegisterInfo::
575needsStackRealignment(const MachineFunction &MF) const {
576  const MachineFrameInfo *MFI = MF.getFrameInfo();
577  const Function *F = MF.getFunction();
578  unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
579  bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
580                               F->hasFnAttr(Attribute::StackAlignment));
581
582  return requiresRealignment && canRealignStack(MF);
583}
584
585bool ARMBaseRegisterInfo::
586cannotEliminateFrame(const MachineFunction &MF) const {
587  const MachineFrameInfo *MFI = MF.getFrameInfo();
588  if (DisableFramePointerElim(MF) && MFI->adjustsStack())
589    return true;
590  return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
591    || needsStackRealignment(MF);
592}
593
594unsigned ARMBaseRegisterInfo::getRARegister() const {
595  return ARM::LR;
596}
597
598unsigned
599ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
600  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
601
602  if (TFI->hasFP(MF))
603    return FramePtr;
604  return ARM::SP;
605}
606
607unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
608  llvm_unreachable("What is the exception register");
609  return 0;
610}
611
612unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
613  llvm_unreachable("What is the exception handler register");
614  return 0;
615}
616
617int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
618  return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
619}
620
621unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
622                                              const MachineFunction &MF) const {
623  switch (Reg) {
624  default: break;
625  // Return 0 if either register of the pair is a special register.
626  // So no R12, etc.
627  case ARM::R1:
628    return ARM::R0;
629  case ARM::R3:
630    return ARM::R2;
631  case ARM::R5:
632    return ARM::R4;
633  case ARM::R7:
634    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
635      ? 0 : ARM::R6;
636  case ARM::R9:
637    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
638  case ARM::R11:
639    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
640
641  case ARM::S1:
642    return ARM::S0;
643  case ARM::S3:
644    return ARM::S2;
645  case ARM::S5:
646    return ARM::S4;
647  case ARM::S7:
648    return ARM::S6;
649  case ARM::S9:
650    return ARM::S8;
651  case ARM::S11:
652    return ARM::S10;
653  case ARM::S13:
654    return ARM::S12;
655  case ARM::S15:
656    return ARM::S14;
657  case ARM::S17:
658    return ARM::S16;
659  case ARM::S19:
660    return ARM::S18;
661  case ARM::S21:
662    return ARM::S20;
663  case ARM::S23:
664    return ARM::S22;
665  case ARM::S25:
666    return ARM::S24;
667  case ARM::S27:
668    return ARM::S26;
669  case ARM::S29:
670    return ARM::S28;
671  case ARM::S31:
672    return ARM::S30;
673
674  case ARM::D1:
675    return ARM::D0;
676  case ARM::D3:
677    return ARM::D2;
678  case ARM::D5:
679    return ARM::D4;
680  case ARM::D7:
681    return ARM::D6;
682  case ARM::D9:
683    return ARM::D8;
684  case ARM::D11:
685    return ARM::D10;
686  case ARM::D13:
687    return ARM::D12;
688  case ARM::D15:
689    return ARM::D14;
690  case ARM::D17:
691    return ARM::D16;
692  case ARM::D19:
693    return ARM::D18;
694  case ARM::D21:
695    return ARM::D20;
696  case ARM::D23:
697    return ARM::D22;
698  case ARM::D25:
699    return ARM::D24;
700  case ARM::D27:
701    return ARM::D26;
702  case ARM::D29:
703    return ARM::D28;
704  case ARM::D31:
705    return ARM::D30;
706  }
707
708  return 0;
709}
710
711unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
712                                             const MachineFunction &MF) const {
713  switch (Reg) {
714  default: break;
715  // Return 0 if either register of the pair is a special register.
716  // So no R12, etc.
717  case ARM::R0:
718    return ARM::R1;
719  case ARM::R2:
720    return ARM::R3;
721  case ARM::R4:
722    return ARM::R5;
723  case ARM::R6:
724    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
725      ? 0 : ARM::R7;
726  case ARM::R8:
727    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
728  case ARM::R10:
729    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
730
731  case ARM::S0:
732    return ARM::S1;
733  case ARM::S2:
734    return ARM::S3;
735  case ARM::S4:
736    return ARM::S5;
737  case ARM::S6:
738    return ARM::S7;
739  case ARM::S8:
740    return ARM::S9;
741  case ARM::S10:
742    return ARM::S11;
743  case ARM::S12:
744    return ARM::S13;
745  case ARM::S14:
746    return ARM::S15;
747  case ARM::S16:
748    return ARM::S17;
749  case ARM::S18:
750    return ARM::S19;
751  case ARM::S20:
752    return ARM::S21;
753  case ARM::S22:
754    return ARM::S23;
755  case ARM::S24:
756    return ARM::S25;
757  case ARM::S26:
758    return ARM::S27;
759  case ARM::S28:
760    return ARM::S29;
761  case ARM::S30:
762    return ARM::S31;
763
764  case ARM::D0:
765    return ARM::D1;
766  case ARM::D2:
767    return ARM::D3;
768  case ARM::D4:
769    return ARM::D5;
770  case ARM::D6:
771    return ARM::D7;
772  case ARM::D8:
773    return ARM::D9;
774  case ARM::D10:
775    return ARM::D11;
776  case ARM::D12:
777    return ARM::D13;
778  case ARM::D14:
779    return ARM::D15;
780  case ARM::D16:
781    return ARM::D17;
782  case ARM::D18:
783    return ARM::D19;
784  case ARM::D20:
785    return ARM::D21;
786  case ARM::D22:
787    return ARM::D23;
788  case ARM::D24:
789    return ARM::D25;
790  case ARM::D26:
791    return ARM::D27;
792  case ARM::D28:
793    return ARM::D29;
794  case ARM::D30:
795    return ARM::D31;
796  }
797
798  return 0;
799}
800
801/// emitLoadConstPool - Emits a load from constpool to materialize the
802/// specified immediate.
803void ARMBaseRegisterInfo::
804emitLoadConstPool(MachineBasicBlock &MBB,
805                  MachineBasicBlock::iterator &MBBI,
806                  DebugLoc dl,
807                  unsigned DestReg, unsigned SubIdx, int Val,
808                  ARMCC::CondCodes Pred,
809                  unsigned PredReg) const {
810  MachineFunction &MF = *MBB.getParent();
811  MachineConstantPool *ConstantPool = MF.getConstantPool();
812  const Constant *C =
813        ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
814  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
815
816  BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
817    .addReg(DestReg, getDefRegState(true), SubIdx)
818    .addConstantPoolIndex(Idx)
819    .addImm(0).addImm(Pred).addReg(PredReg);
820}
821
822bool ARMBaseRegisterInfo::
823requiresRegisterScavenging(const MachineFunction &MF) const {
824  return true;
825}
826
827bool ARMBaseRegisterInfo::
828requiresFrameIndexScavenging(const MachineFunction &MF) const {
829  return true;
830}
831
832bool ARMBaseRegisterInfo::
833requiresVirtualBaseRegisters(const MachineFunction &MF) const {
834  return EnableLocalStackAlloc;
835}
836
837static void
838emitSPUpdate(bool isARM,
839             MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
840             DebugLoc dl, const ARMBaseInstrInfo &TII,
841             int NumBytes,
842             ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
843  if (isARM)
844    emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
845                            Pred, PredReg, TII);
846  else
847    emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
848                           Pred, PredReg, TII);
849}
850
851
852void ARMBaseRegisterInfo::
853eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
854                              MachineBasicBlock::iterator I) const {
855  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
856  if (!TFI->hasReservedCallFrame(MF)) {
857    // If we have alloca, convert as follows:
858    // ADJCALLSTACKDOWN -> sub, sp, sp, amount
859    // ADJCALLSTACKUP   -> add, sp, sp, amount
860    MachineInstr *Old = I;
861    DebugLoc dl = Old->getDebugLoc();
862    unsigned Amount = Old->getOperand(0).getImm();
863    if (Amount != 0) {
864      // We need to keep the stack aligned properly.  To do this, we round the
865      // amount of space needed for the outgoing arguments up to the next
866      // alignment boundary.
867      unsigned Align = TFI->getStackAlignment();
868      Amount = (Amount+Align-1)/Align*Align;
869
870      ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
871      assert(!AFI->isThumb1OnlyFunction() &&
872             "This eliminateCallFramePseudoInstr does not support Thumb1!");
873      bool isARM = !AFI->isThumbFunction();
874
875      // Replace the pseudo instruction with a new instruction...
876      unsigned Opc = Old->getOpcode();
877      int PIdx = Old->findFirstPredOperandIdx();
878      ARMCC::CondCodes Pred = (PIdx == -1)
879        ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
880      if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
881        // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
882        unsigned PredReg = Old->getOperand(2).getReg();
883        emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
884      } else {
885        // Note: PredReg is operand 3 for ADJCALLSTACKUP.
886        unsigned PredReg = Old->getOperand(3).getReg();
887        assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
888        emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
889      }
890    }
891  }
892  MBB.erase(I);
893}
894
895int64_t ARMBaseRegisterInfo::
896getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
897  const TargetInstrDesc &Desc = MI->getDesc();
898  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
899  int64_t InstrOffs = 0;;
900  int Scale = 1;
901  unsigned ImmIdx = 0;
902  switch (AddrMode) {
903  case ARMII::AddrModeT2_i8:
904  case ARMII::AddrModeT2_i12:
905  case ARMII::AddrMode_i12:
906    InstrOffs = MI->getOperand(Idx+1).getImm();
907    Scale = 1;
908    break;
909  case ARMII::AddrMode5: {
910    // VFP address mode.
911    const MachineOperand &OffOp = MI->getOperand(Idx+1);
912    InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
913    if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
914      InstrOffs = -InstrOffs;
915    Scale = 4;
916    break;
917  }
918  case ARMII::AddrMode2: {
919    ImmIdx = Idx+2;
920    InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
921    if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
922      InstrOffs = -InstrOffs;
923    break;
924  }
925  case ARMII::AddrMode3: {
926    ImmIdx = Idx+2;
927    InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
928    if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
929      InstrOffs = -InstrOffs;
930    break;
931  }
932  case ARMII::AddrModeT1_s: {
933    ImmIdx = Idx+1;
934    InstrOffs = MI->getOperand(ImmIdx).getImm();
935    Scale = 4;
936    break;
937  }
938  default:
939    llvm_unreachable("Unsupported addressing mode!");
940    break;
941  }
942
943  return InstrOffs * Scale;
944}
945
946/// needsFrameBaseReg - Returns true if the instruction's frame index
947/// reference would be better served by a base register other than FP
948/// or SP. Used by LocalStackFrameAllocation to determine which frame index
949/// references it should create new base registers for.
950bool ARMBaseRegisterInfo::
951needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
952  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
953    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
954  }
955
956  // It's the load/store FI references that cause issues, as it can be difficult
957  // to materialize the offset if it won't fit in the literal field. Estimate
958  // based on the size of the local frame and some conservative assumptions
959  // about the rest of the stack frame (note, this is pre-regalloc, so
960  // we don't know everything for certain yet) whether this offset is likely
961  // to be out of range of the immediate. Return true if so.
962
963  // We only generate virtual base registers for loads and stores, so
964  // return false for everything else.
965  unsigned Opc = MI->getOpcode();
966  switch (Opc) {
967  case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
968  case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
969  case ARM::t2LDRi12: case ARM::t2LDRi8:
970  case ARM::t2STRi12: case ARM::t2STRi8:
971  case ARM::VLDRS: case ARM::VLDRD:
972  case ARM::VSTRS: case ARM::VSTRD:
973  case ARM::tSTRspi: case ARM::tLDRspi:
974    if (ForceAllBaseRegAlloc)
975      return true;
976    break;
977  default:
978    return false;
979  }
980
981  // Without a virtual base register, if the function has variable sized
982  // objects, all fixed-size local references will be via the frame pointer,
983  // Approximate the offset and see if it's legal for the instruction.
984  // Note that the incoming offset is based on the SP value at function entry,
985  // so it'll be negative.
986  MachineFunction &MF = *MI->getParent()->getParent();
987  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
988  MachineFrameInfo *MFI = MF.getFrameInfo();
989  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
990
991  // Estimate an offset from the frame pointer.
992  // Conservatively assume all callee-saved registers get pushed. R4-R6
993  // will be earlier than the FP, so we ignore those.
994  // R7, LR
995  int64_t FPOffset = Offset - 8;
996  // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
997  if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
998    FPOffset -= 80;
999  // Estimate an offset from the stack pointer.
1000  // The incoming offset is relating to the SP at the start of the function,
1001  // but when we access the local it'll be relative to the SP after local
1002  // allocation, so adjust our SP-relative offset by that allocation size.
1003  Offset = -Offset;
1004  Offset += MFI->getLocalFrameSize();
1005  // Assume that we'll have at least some spill slots allocated.
1006  // FIXME: This is a total SWAG number. We should run some statistics
1007  //        and pick a real one.
1008  Offset += 128; // 128 bytes of spill slots
1009
1010  // If there is a frame pointer, try using it.
1011  // The FP is only available if there is no dynamic realignment. We
1012  // don't know for sure yet whether we'll need that, so we guess based
1013  // on whether there are any local variables that would trigger it.
1014  unsigned StackAlign = TFI->getStackAlignment();
1015  if (TFI->hasFP(MF) &&
1016      !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1017    if (isFrameOffsetLegal(MI, FPOffset))
1018      return false;
1019  }
1020  // If we can reference via the stack pointer, try that.
1021  // FIXME: This (and the code that resolves the references) can be improved
1022  //        to only disallow SP relative references in the live range of
1023  //        the VLA(s). In practice, it's unclear how much difference that
1024  //        would make, but it may be worth doing.
1025  if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1026    return false;
1027
1028  // The offset likely isn't legal, we want to allocate a virtual base register.
1029  return true;
1030}
1031
1032/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
1033/// be a pointer to FrameIdx at the beginning of the basic block.
1034void ARMBaseRegisterInfo::
1035materializeFrameBaseRegister(MachineBasicBlock *MBB,
1036                             unsigned BaseReg, int FrameIdx,
1037                             int64_t Offset) const {
1038  ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
1039  unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1040    (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1041
1042  MachineBasicBlock::iterator Ins = MBB->begin();
1043  DebugLoc DL;                  // Defaults to "unknown"
1044  if (Ins != MBB->end())
1045    DL = Ins->getDebugLoc();
1046
1047  MachineInstrBuilder MIB =
1048    BuildMI(*MBB, Ins, DL, TII.get(ADDriOpc), BaseReg)
1049    .addFrameIndex(FrameIdx).addImm(Offset);
1050
1051  if (!AFI->isThumb1OnlyFunction())
1052    AddDefaultCC(AddDefaultPred(MIB));
1053}
1054
1055void
1056ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1057                                       unsigned BaseReg, int64_t Offset) const {
1058  MachineInstr &MI = *I;
1059  MachineBasicBlock &MBB = *MI.getParent();
1060  MachineFunction &MF = *MBB.getParent();
1061  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1062  int Off = Offset; // ARM doesn't need the general 64-bit offsets
1063  unsigned i = 0;
1064
1065  assert(!AFI->isThumb1OnlyFunction() &&
1066         "This resolveFrameIndex does not support Thumb1!");
1067
1068  while (!MI.getOperand(i).isFI()) {
1069    ++i;
1070    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1071  }
1072  bool Done = false;
1073  if (!AFI->isThumbFunction())
1074    Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1075  else {
1076    assert(AFI->isThumb2Function());
1077    Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1078  }
1079  assert (Done && "Unable to resolve frame index!");
1080}
1081
1082bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1083                                             int64_t Offset) const {
1084  const TargetInstrDesc &Desc = MI->getDesc();
1085  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1086  unsigned i = 0;
1087
1088  while (!MI->getOperand(i).isFI()) {
1089    ++i;
1090    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1091  }
1092
1093  // AddrMode4 and AddrMode6 cannot handle any offset.
1094  if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1095    return Offset == 0;
1096
1097  unsigned NumBits = 0;
1098  unsigned Scale = 1;
1099  bool isSigned = true;
1100  switch (AddrMode) {
1101  case ARMII::AddrModeT2_i8:
1102  case ARMII::AddrModeT2_i12:
1103    // i8 supports only negative, and i12 supports only positive, so
1104    // based on Offset sign, consider the appropriate instruction
1105    Scale = 1;
1106    if (Offset < 0) {
1107      NumBits = 8;
1108      Offset = -Offset;
1109    } else {
1110      NumBits = 12;
1111    }
1112    break;
1113  case ARMII::AddrMode5:
1114    // VFP address mode.
1115    NumBits = 8;
1116    Scale = 4;
1117    break;
1118  case ARMII::AddrMode_i12:
1119  case ARMII::AddrMode2:
1120    NumBits = 12;
1121    break;
1122  case ARMII::AddrMode3:
1123    NumBits = 8;
1124    break;
1125  case ARMII::AddrModeT1_s:
1126    NumBits = 5;
1127    Scale = 4;
1128    isSigned = false;
1129    break;
1130  default:
1131    llvm_unreachable("Unsupported addressing mode!");
1132    break;
1133  }
1134
1135  Offset += getFrameIndexInstrOffset(MI, i);
1136  // Make sure the offset is encodable for instructions that scale the
1137  // immediate.
1138  if ((Offset & (Scale-1)) != 0)
1139    return false;
1140
1141  if (isSigned && Offset < 0)
1142    Offset = -Offset;
1143
1144  unsigned Mask = (1 << NumBits) - 1;
1145  if ((unsigned)Offset <= Mask * Scale)
1146    return true;
1147
1148  return false;
1149}
1150
1151void
1152ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1153                                         int SPAdj, RegScavenger *RS) const {
1154  unsigned i = 0;
1155  MachineInstr &MI = *II;
1156  MachineBasicBlock &MBB = *MI.getParent();
1157  MachineFunction &MF = *MBB.getParent();
1158  const ARMFrameLowering *TFI =
1159    static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
1160  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1161  assert(!AFI->isThumb1OnlyFunction() &&
1162         "This eliminateFrameIndex does not support Thumb1!");
1163
1164  while (!MI.getOperand(i).isFI()) {
1165    ++i;
1166    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1167  }
1168
1169  int FrameIndex = MI.getOperand(i).getIndex();
1170  unsigned FrameReg;
1171
1172  int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1173
1174  // Special handling of dbg_value instructions.
1175  if (MI.isDebugValue()) {
1176    MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1177    MI.getOperand(i+1).ChangeToImmediate(Offset);
1178    return;
1179  }
1180
1181  // Modify MI as necessary to handle as much of 'Offset' as possible
1182  bool Done = false;
1183  if (!AFI->isThumbFunction())
1184    Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1185  else {
1186    assert(AFI->isThumb2Function());
1187    Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1188  }
1189  if (Done)
1190    return;
1191
1192  // If we get here, the immediate doesn't fit into the instruction.  We folded
1193  // as much as possible above, handle the rest, providing a register that is
1194  // SP+LargeImm.
1195  assert((Offset ||
1196          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1197          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1198         "This code isn't needed if offset already handled!");
1199
1200  unsigned ScratchReg = 0;
1201  int PIdx = MI.findFirstPredOperandIdx();
1202  ARMCC::CondCodes Pred = (PIdx == -1)
1203    ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1204  unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1205  if (Offset == 0)
1206    // Must be addrmode4/6.
1207    MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1208  else {
1209    ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1210    if (!AFI->isThumbFunction())
1211      emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1212                              Offset, Pred, PredReg, TII);
1213    else {
1214      assert(AFI->isThumb2Function());
1215      emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1216                             Offset, Pred, PredReg, TII);
1217    }
1218    // Update the original instruction to use the scratch register.
1219    MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1220    if (MI.getOpcode() == ARM::t2ADDrSPi)
1221      MI.setDesc(TII.get(ARM::t2ADDri));
1222    else if (MI.getOpcode() == ARM::t2SUBrSPi)
1223      MI.setDesc(TII.get(ARM::t2SUBri));
1224  }
1225}
1226
1227#include "ARMGenRegisterInfo.inc"
1228