ARMBaseRegisterInfo.cpp revision f1c3eb37ae96572e1df34bf980b9ecd149b5ee33
1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the base ARM implementation of TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARM.h"
15#include "ARMAddressingModes.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMBaseRegisterInfo.h"
18#include "ARMInstrInfo.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMSubtarget.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/LLVMContext.h"
25#include "llvm/CodeGen/MachineConstantPool.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineLocation.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/RegisterScavenging.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetFrameInfo.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetOptions.h"
38#include "llvm/ADT/BitVector.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/Support/CommandLine.h"
41
42namespace llvm {
43static cl::opt<bool>
44ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
45          cl::desc("Force use of virtual base registers for stack load/store"));
46static cl::opt<bool>
47EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
48          cl::desc("Enable pre-regalloc stack frame index allocation"));
49}
50
51using namespace llvm;
52
53static cl::opt<bool>
54EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
55          cl::desc("Enable use of a base pointer for complex stack frames"));
56
57unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned Reg) {
58  using namespace ARM;
59  switch (Reg) {
60  default:
61    llvm_unreachable("Unknown ARM register!");
62  case R0:  case S0:  case D0:  case Q0:  return 0;
63  case R1:  case S1:  case D1:  case Q1:  return 1;
64  case R2:  case S2:  case D2:  case Q2:  return 2;
65  case R3:  case S3:  case D3:  case Q3:  return 3;
66  case R4:  case S4:  case D4:  case Q4:  return 4;
67  case R5:  case S5:  case D5:  case Q5:  return 5;
68  case R6:  case S6:  case D6:  case Q6:  return 6;
69  case R7:  case S7:  case D7:  case Q7:  return 7;
70  case R8:  case S8:  case D8:  case Q8:  return 8;
71  case R9:  case S9:  case D9:  case Q9:  return 9;
72  case R10: case S10: case D10: case Q10: return 10;
73  case R11: case S11: case D11: case Q11: return 11;
74  case R12: case S12: case D12: case Q12: return 12;
75  case SP:  case S13: case D13: case Q13: return 13;
76  case LR:  case S14: case D14: case Q14: return 14;
77  case PC:  case S15: case D15: case Q15: return 15;
78
79  case S16: case D16: return 16;
80  case S17: case D17: return 17;
81  case S18: case D18: return 18;
82  case S19: case D19: return 19;
83  case S20: case D20: return 20;
84  case S21: case D21: return 21;
85  case S22: case D22: return 22;
86  case S23: case D23: return 23;
87  case S24: case D24: return 24;
88  case S25: case D25: return 25;
89  case S26: case D26: return 26;
90  case S27: case D27: return 27;
91  case S28: case D28: return 28;
92  case S29: case D29: return 29;
93  case S30: case D30: return 30;
94  case S31: case D31: return 31;
95  }
96}
97
98ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
99                                         const ARMSubtarget &sti)
100  : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
101    TII(tii), STI(sti),
102    FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
103    BasePtr(ARM::R6) {
104}
105
106const unsigned*
107ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
108  static const unsigned CalleeSavedRegs[] = {
109    ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
110    ARM::R7, ARM::R6,  ARM::R5,  ARM::R4,
111
112    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
113    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
114    0
115  };
116
117  static const unsigned DarwinCalleeSavedRegs[] = {
118    // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
119    // register.
120    ARM::LR,  ARM::R7,  ARM::R6, ARM::R5, ARM::R4,
121    ARM::R11, ARM::R10, ARM::R8,
122
123    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
124    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
125    0
126  };
127  return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
128}
129
130BitVector ARMBaseRegisterInfo::
131getReservedRegs(const MachineFunction &MF) const {
132  // FIXME: avoid re-calculating this everytime.
133  BitVector Reserved(getNumRegs());
134  Reserved.set(ARM::SP);
135  Reserved.set(ARM::PC);
136  Reserved.set(ARM::FPSCR);
137  if (hasFP(MF))
138    Reserved.set(FramePtr);
139  if (hasBasePointer(MF))
140    Reserved.set(BasePtr);
141  // Some targets reserve R9.
142  if (STI.isR9Reserved())
143    Reserved.set(ARM::R9);
144  return Reserved;
145}
146
147bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
148                                        unsigned Reg) const {
149  switch (Reg) {
150  default: break;
151  case ARM::SP:
152  case ARM::PC:
153    return true;
154  case ARM::R6:
155    if (hasBasePointer(MF))
156      return true;
157    break;
158  case ARM::R7:
159  case ARM::R11:
160    if (FramePtr == Reg && hasFP(MF))
161      return true;
162    break;
163  case ARM::R9:
164    return STI.isR9Reserved();
165  }
166
167  return false;
168}
169
170const TargetRegisterClass *
171ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
172                                              const TargetRegisterClass *B,
173                                              unsigned SubIdx) const {
174  switch (SubIdx) {
175  default: return 0;
176  case ARM::ssub_0:
177  case ARM::ssub_1:
178  case ARM::ssub_2:
179  case ARM::ssub_3: {
180    // S sub-registers.
181    if (A->getSize() == 8) {
182      if (B == &ARM::SPR_8RegClass)
183        return &ARM::DPR_8RegClass;
184      assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
185      if (A == &ARM::DPR_8RegClass)
186        return A;
187      return &ARM::DPR_VFP2RegClass;
188    }
189
190    if (A->getSize() == 16) {
191      if (B == &ARM::SPR_8RegClass)
192        return &ARM::QPR_8RegClass;
193      return &ARM::QPR_VFP2RegClass;
194    }
195
196    if (A->getSize() == 32) {
197      if (B == &ARM::SPR_8RegClass)
198        return 0;  // Do not allow coalescing!
199      return &ARM::QQPR_VFP2RegClass;
200    }
201
202    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
203    return 0;  // Do not allow coalescing!
204  }
205  case ARM::dsub_0:
206  case ARM::dsub_1:
207  case ARM::dsub_2:
208  case ARM::dsub_3: {
209    // D sub-registers.
210    if (A->getSize() == 16) {
211      if (B == &ARM::DPR_VFP2RegClass)
212        return &ARM::QPR_VFP2RegClass;
213      if (B == &ARM::DPR_8RegClass)
214        return 0;  // Do not allow coalescing!
215      return A;
216    }
217
218    if (A->getSize() == 32) {
219      if (B == &ARM::DPR_VFP2RegClass)
220        return &ARM::QQPR_VFP2RegClass;
221      if (B == &ARM::DPR_8RegClass)
222        return 0;  // Do not allow coalescing!
223      return A;
224    }
225
226    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
227    if (B != &ARM::DPRRegClass)
228      return 0;  // Do not allow coalescing!
229    return A;
230  }
231  case ARM::dsub_4:
232  case ARM::dsub_5:
233  case ARM::dsub_6:
234  case ARM::dsub_7: {
235    // D sub-registers of QQQQ registers.
236    if (A->getSize() == 64 && B == &ARM::DPRRegClass)
237      return A;
238    return 0;  // Do not allow coalescing!
239  }
240
241  case ARM::qsub_0:
242  case ARM::qsub_1: {
243    // Q sub-registers.
244    if (A->getSize() == 32) {
245      if (B == &ARM::QPR_VFP2RegClass)
246        return &ARM::QQPR_VFP2RegClass;
247      if (B == &ARM::QPR_8RegClass)
248        return 0;  // Do not allow coalescing!
249      return A;
250    }
251
252    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
253    if (B == &ARM::QPRRegClass)
254      return A;
255    return 0;  // Do not allow coalescing!
256  }
257  case ARM::qsub_2:
258  case ARM::qsub_3: {
259    // Q sub-registers of QQQQ registers.
260    if (A->getSize() == 64 && B == &ARM::QPRRegClass)
261      return A;
262    return 0;  // Do not allow coalescing!
263  }
264  }
265  return 0;
266}
267
268bool
269ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
270                                          SmallVectorImpl<unsigned> &SubIndices,
271                                          unsigned &NewSubIdx) const {
272
273  unsigned Size = RC->getSize() * 8;
274  if (Size < 6)
275    return 0;
276
277  NewSubIdx = 0;  // Whole register.
278  unsigned NumRegs = SubIndices.size();
279  if (NumRegs == 8) {
280    // 8 D registers -> 1 QQQQ register.
281    return (Size == 512 &&
282            SubIndices[0] == ARM::dsub_0 &&
283            SubIndices[1] == ARM::dsub_1 &&
284            SubIndices[2] == ARM::dsub_2 &&
285            SubIndices[3] == ARM::dsub_3 &&
286            SubIndices[4] == ARM::dsub_4 &&
287            SubIndices[5] == ARM::dsub_5 &&
288            SubIndices[6] == ARM::dsub_6 &&
289            SubIndices[7] == ARM::dsub_7);
290  } else if (NumRegs == 4) {
291    if (SubIndices[0] == ARM::qsub_0) {
292      // 4 Q registers -> 1 QQQQ register.
293      return (Size == 512 &&
294              SubIndices[1] == ARM::qsub_1 &&
295              SubIndices[2] == ARM::qsub_2 &&
296              SubIndices[3] == ARM::qsub_3);
297    } else if (SubIndices[0] == ARM::dsub_0) {
298      // 4 D registers -> 1 QQ register.
299      if (Size >= 256 &&
300          SubIndices[1] == ARM::dsub_1 &&
301          SubIndices[2] == ARM::dsub_2 &&
302          SubIndices[3] == ARM::dsub_3) {
303        if (Size == 512)
304          NewSubIdx = ARM::qqsub_0;
305        return true;
306      }
307    } else if (SubIndices[0] == ARM::dsub_4) {
308      // 4 D registers -> 1 QQ register (2nd).
309      if (Size == 512 &&
310          SubIndices[1] == ARM::dsub_5 &&
311          SubIndices[2] == ARM::dsub_6 &&
312          SubIndices[3] == ARM::dsub_7) {
313        NewSubIdx = ARM::qqsub_1;
314        return true;
315      }
316    } else if (SubIndices[0] == ARM::ssub_0) {
317      // 4 S registers -> 1 Q register.
318      if (Size >= 128 &&
319          SubIndices[1] == ARM::ssub_1 &&
320          SubIndices[2] == ARM::ssub_2 &&
321          SubIndices[3] == ARM::ssub_3) {
322        if (Size >= 256)
323          NewSubIdx = ARM::qsub_0;
324        return true;
325      }
326    }
327  } else if (NumRegs == 2) {
328    if (SubIndices[0] == ARM::qsub_0) {
329      // 2 Q registers -> 1 QQ register.
330      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
331        if (Size == 512)
332          NewSubIdx = ARM::qqsub_0;
333        return true;
334      }
335    } else if (SubIndices[0] == ARM::qsub_2) {
336      // 2 Q registers -> 1 QQ register (2nd).
337      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
338        NewSubIdx = ARM::qqsub_1;
339        return true;
340      }
341    } else if (SubIndices[0] == ARM::dsub_0) {
342      // 2 D registers -> 1 Q register.
343      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
344        if (Size >= 256)
345          NewSubIdx = ARM::qsub_0;
346        return true;
347      }
348    } else if (SubIndices[0] == ARM::dsub_2) {
349      // 2 D registers -> 1 Q register (2nd).
350      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
351        NewSubIdx = ARM::qsub_1;
352        return true;
353      }
354    } else if (SubIndices[0] == ARM::dsub_4) {
355      // 2 D registers -> 1 Q register (3rd).
356      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
357        NewSubIdx = ARM::qsub_2;
358        return true;
359      }
360    } else if (SubIndices[0] == ARM::dsub_6) {
361      // 2 D registers -> 1 Q register (3rd).
362      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
363        NewSubIdx = ARM::qsub_3;
364        return true;
365      }
366    } else if (SubIndices[0] == ARM::ssub_0) {
367      // 2 S registers -> 1 D register.
368      if (SubIndices[1] == ARM::ssub_1) {
369        if (Size >= 128)
370          NewSubIdx = ARM::dsub_0;
371        return true;
372      }
373    } else if (SubIndices[0] == ARM::ssub_2) {
374      // 2 S registers -> 1 D register (2nd).
375      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
376        NewSubIdx = ARM::dsub_1;
377        return true;
378      }
379    }
380  }
381  return false;
382}
383
384
385const TargetRegisterClass *
386ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
387  return ARM::GPRRegisterClass;
388}
389
390/// getAllocationOrder - Returns the register allocation order for a specified
391/// register class in the form of a pair of TargetRegisterClass iterators.
392std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
393ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
394                                        unsigned HintType, unsigned HintReg,
395                                        const MachineFunction &MF) const {
396  // Alternative register allocation orders when favoring even / odd registers
397  // of register pairs.
398
399  // No FP, R9 is available.
400  static const unsigned GPREven1[] = {
401    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
402    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
403    ARM::R9, ARM::R11
404  };
405  static const unsigned GPROdd1[] = {
406    ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
407    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
408    ARM::R8, ARM::R10
409  };
410
411  // FP is R7, R9 is available.
412  static const unsigned GPREven2[] = {
413    ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
414    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
415    ARM::R9, ARM::R11
416  };
417  static const unsigned GPROdd2[] = {
418    ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
419    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
420    ARM::R8, ARM::R10
421  };
422
423  // FP is R11, R9 is available.
424  static const unsigned GPREven3[] = {
425    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
426    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
427    ARM::R9
428  };
429  static const unsigned GPROdd3[] = {
430    ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
431    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
432    ARM::R8
433  };
434
435  // No FP, R9 is not available.
436  static const unsigned GPREven4[] = {
437    ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
438    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
439    ARM::R11
440  };
441  static const unsigned GPROdd4[] = {
442    ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
443    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
444    ARM::R10
445  };
446
447  // FP is R7, R9 is not available.
448  static const unsigned GPREven5[] = {
449    ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
450    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
451    ARM::R11
452  };
453  static const unsigned GPROdd5[] = {
454    ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
455    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
456    ARM::R10
457  };
458
459  // FP is R11, R9 is not available.
460  static const unsigned GPREven6[] = {
461    ARM::R0, ARM::R2, ARM::R4, ARM::R6,
462    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
463  };
464  static const unsigned GPROdd6[] = {
465    ARM::R1, ARM::R3, ARM::R5, ARM::R7,
466    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
467  };
468
469
470  if (HintType == ARMRI::RegPairEven) {
471    if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
472      // It's no longer possible to fulfill this hint. Return the default
473      // allocation order.
474      return std::make_pair(RC->allocation_order_begin(MF),
475                            RC->allocation_order_end(MF));
476
477    if (!hasFP(MF)) {
478      if (!STI.isR9Reserved())
479        return std::make_pair(GPREven1,
480                              GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
481      else
482        return std::make_pair(GPREven4,
483                              GPREven4 + (sizeof(GPREven4)/sizeof(unsigned)));
484    } else if (FramePtr == ARM::R7) {
485      if (!STI.isR9Reserved())
486        return std::make_pair(GPREven2,
487                              GPREven2 + (sizeof(GPREven2)/sizeof(unsigned)));
488      else
489        return std::make_pair(GPREven5,
490                              GPREven5 + (sizeof(GPREven5)/sizeof(unsigned)));
491    } else { // FramePtr == ARM::R11
492      if (!STI.isR9Reserved())
493        return std::make_pair(GPREven3,
494                              GPREven3 + (sizeof(GPREven3)/sizeof(unsigned)));
495      else
496        return std::make_pair(GPREven6,
497                              GPREven6 + (sizeof(GPREven6)/sizeof(unsigned)));
498    }
499  } else if (HintType == ARMRI::RegPairOdd) {
500    if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
501      // It's no longer possible to fulfill this hint. Return the default
502      // allocation order.
503      return std::make_pair(RC->allocation_order_begin(MF),
504                            RC->allocation_order_end(MF));
505
506    if (!hasFP(MF)) {
507      if (!STI.isR9Reserved())
508        return std::make_pair(GPROdd1,
509                              GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
510      else
511        return std::make_pair(GPROdd4,
512                              GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned)));
513    } else if (FramePtr == ARM::R7) {
514      if (!STI.isR9Reserved())
515        return std::make_pair(GPROdd2,
516                              GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned)));
517      else
518        return std::make_pair(GPROdd5,
519                              GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned)));
520    } else { // FramePtr == ARM::R11
521      if (!STI.isR9Reserved())
522        return std::make_pair(GPROdd3,
523                              GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned)));
524      else
525        return std::make_pair(GPROdd6,
526                              GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned)));
527    }
528  }
529  return std::make_pair(RC->allocation_order_begin(MF),
530                        RC->allocation_order_end(MF));
531}
532
533/// ResolveRegAllocHint - Resolves the specified register allocation hint
534/// to a physical register. Returns the physical register if it is successful.
535unsigned
536ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
537                                         const MachineFunction &MF) const {
538  if (Reg == 0 || !isPhysicalRegister(Reg))
539    return 0;
540  if (Type == 0)
541    return Reg;
542  else if (Type == (unsigned)ARMRI::RegPairOdd)
543    // Odd register.
544    return getRegisterPairOdd(Reg, MF);
545  else if (Type == (unsigned)ARMRI::RegPairEven)
546    // Even register.
547    return getRegisterPairEven(Reg, MF);
548  return 0;
549}
550
551void
552ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
553                                        MachineFunction &MF) const {
554  MachineRegisterInfo *MRI = &MF.getRegInfo();
555  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
556  if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
557       Hint.first == (unsigned)ARMRI::RegPairEven) &&
558      Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) {
559    // If 'Reg' is one of the even / odd register pair and it's now changed
560    // (e.g. coalesced) into a different register. The other register of the
561    // pair allocation hint must be updated to reflect the relationship
562    // change.
563    unsigned OtherReg = Hint.second;
564    Hint = MRI->getRegAllocationHint(OtherReg);
565    if (Hint.second == Reg)
566      // Make sure the pair has not already divorced.
567      MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
568  }
569}
570
571/// hasFP - Return true if the specified function should have a dedicated frame
572/// pointer register.  This is true if the function has variable sized allocas
573/// or if frame pointer elimination is disabled.
574///
575bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const {
576  // Mac OS X requires FP not to be clobbered for backtracing purpose.
577  if (STI.isTargetDarwin())
578    return true;
579
580  const MachineFrameInfo *MFI = MF.getFrameInfo();
581  // Always eliminate non-leaf frame pointers.
582  return ((DisableFramePointerElim(MF) && MFI->hasCalls()) ||
583          needsStackRealignment(MF) ||
584          MFI->hasVarSizedObjects() ||
585          MFI->isFrameAddressTaken());
586}
587
588bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
589  const MachineFrameInfo *MFI = MF.getFrameInfo();
590  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
591
592  if (!EnableBasePointer)
593    return false;
594
595  if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
596    return true;
597
598  // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
599  // negative range for ldr/str (255), and thumb1 is positive offsets only.
600  // It's going to be better to use the SP or Base Pointer instead. When there
601  // are variable sized objects, we can't reference off of the SP, so we
602  // reserve a Base Pointer.
603  if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
604    // Conservatively estimate whether the negative offset from the frame
605    // pointer will be sufficient to reach. If a function has a smallish
606    // frame, it's less likely to have lots of spills and callee saved
607    // space, so it's all more likely to be within range of the frame pointer.
608    // If it's wrong, the scavenger will still enable access to work, it just
609    // won't be optimal.
610    if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
611      return false;
612    return true;
613  }
614
615  return false;
616}
617
618bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
619  const MachineFrameInfo *MFI = MF.getFrameInfo();
620  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
621  // We can't realign the stack if:
622  // 1. Dynamic stack realignment is explicitly disabled,
623  // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
624  // 3. There are VLAs in the function and the base pointer is disabled.
625  return (RealignStack && !AFI->isThumb1OnlyFunction() &&
626          (!MFI->hasVarSizedObjects() || EnableBasePointer));
627}
628
629bool ARMBaseRegisterInfo::
630needsStackRealignment(const MachineFunction &MF) const {
631  const MachineFrameInfo *MFI = MF.getFrameInfo();
632  const Function *F = MF.getFunction();
633  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
634  bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
635                               F->hasFnAttr(Attribute::StackAlignment));
636
637  return requiresRealignment && canRealignStack(MF);
638}
639
640bool ARMBaseRegisterInfo::
641cannotEliminateFrame(const MachineFunction &MF) const {
642  const MachineFrameInfo *MFI = MF.getFrameInfo();
643  if (DisableFramePointerElim(MF) && MFI->adjustsStack())
644    return true;
645  return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
646    || needsStackRealignment(MF);
647}
648
649/// estimateStackSize - Estimate and return the size of the frame.
650static unsigned estimateStackSize(MachineFunction &MF) {
651  const MachineFrameInfo *FFI = MF.getFrameInfo();
652  int Offset = 0;
653  for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
654    int FixedOff = -FFI->getObjectOffset(i);
655    if (FixedOff > Offset) Offset = FixedOff;
656  }
657  for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
658    if (FFI->isDeadObjectIndex(i))
659      continue;
660    Offset += FFI->getObjectSize(i);
661    unsigned Align = FFI->getObjectAlignment(i);
662    // Adjust to alignment boundary
663    Offset = (Offset+Align-1)/Align*Align;
664  }
665  return (unsigned)Offset;
666}
667
668/// estimateRSStackSizeLimit - Look at each instruction that references stack
669/// frames and return the stack size limit beyond which some of these
670/// instructions will require a scratch register during their expansion later.
671unsigned
672ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
673  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
674  unsigned Limit = (1 << 12) - 1;
675  for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
676    for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
677         I != E; ++I) {
678      for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
679        if (!I->getOperand(i).isFI()) continue;
680
681        // When using ADDri to get the address of a stack object, 255 is the
682        // largest offset guaranteed to fit in the immediate offset.
683        if (I->getOpcode() == ARM::ADDri) {
684          Limit = std::min(Limit, (1U << 8) - 1);
685          break;
686        }
687
688        // Otherwise check the addressing mode.
689        switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
690        case ARMII::AddrMode3:
691        case ARMII::AddrModeT2_i8:
692          Limit = std::min(Limit, (1U << 8) - 1);
693          break;
694        case ARMII::AddrMode5:
695        case ARMII::AddrModeT2_i8s4:
696          Limit = std::min(Limit, ((1U << 8) - 1) * 4);
697          break;
698        case ARMII::AddrModeT2_i12:
699          // i12 supports only positive offset so these will be converted to
700          // i8 opcodes. See llvm::rewriteT2FrameIndex.
701          if (hasFP(MF) && AFI->hasStackFrame())
702            Limit = std::min(Limit, (1U << 8) - 1);
703          break;
704        case ARMII::AddrMode6:
705          // Addressing mode 6 (load/store) instructions can't encode an
706          // immediate offset for stack references.
707          return 0;
708        default:
709          break;
710        }
711        break; // At most one FI per instruction
712      }
713    }
714  }
715
716  return Limit;
717}
718
719static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
720                                       const ARMBaseInstrInfo &TII) {
721  unsigned FnSize = 0;
722  for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
723       MBBI != E; ++MBBI) {
724    const MachineBasicBlock &MBB = *MBBI;
725    for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
726         I != E; ++I)
727      FnSize += TII.GetInstSizeInBytes(I);
728  }
729  return FnSize;
730}
731
732void
733ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
734                                                       RegScavenger *RS) const {
735  // This tells PEI to spill the FP as if it is any other callee-save register
736  // to take advantage the eliminateFrameIndex machinery. This also ensures it
737  // is spilled in the order specified by getCalleeSavedRegs() to make it easier
738  // to combine multiple loads / stores.
739  bool CanEliminateFrame = true;
740  bool CS1Spilled = false;
741  bool LRSpilled = false;
742  unsigned NumGPRSpills = 0;
743  SmallVector<unsigned, 4> UnspilledCS1GPRs;
744  SmallVector<unsigned, 4> UnspilledCS2GPRs;
745  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
746  MachineFrameInfo *MFI = MF.getFrameInfo();
747
748  // Spill R4 if Thumb2 function requires stack realignment - it will be used as
749  // scratch register.
750  // FIXME: It will be better just to find spare register here.
751  if (needsStackRealignment(MF) &&
752      AFI->isThumb2Function())
753    MF.getRegInfo().setPhysRegUsed(ARM::R4);
754
755  // Spill LR if Thumb1 function uses variable length argument lists.
756  if (AFI->isThumb1OnlyFunction() && AFI->getVarArgsRegSaveSize() > 0)
757    MF.getRegInfo().setPhysRegUsed(ARM::LR);
758
759  // Spill the BasePtr if it's used.
760  if (hasBasePointer(MF))
761    MF.getRegInfo().setPhysRegUsed(BasePtr);
762
763  // Don't spill FP if the frame can be eliminated. This is determined
764  // by scanning the callee-save registers to see if any is used.
765  const unsigned *CSRegs = getCalleeSavedRegs();
766  for (unsigned i = 0; CSRegs[i]; ++i) {
767    unsigned Reg = CSRegs[i];
768    bool Spilled = false;
769    if (MF.getRegInfo().isPhysRegUsed(Reg)) {
770      AFI->setCSRegisterIsSpilled(Reg);
771      Spilled = true;
772      CanEliminateFrame = false;
773    } else {
774      // Check alias registers too.
775      for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
776        if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
777          Spilled = true;
778          CanEliminateFrame = false;
779        }
780      }
781    }
782
783    if (!ARM::GPRRegisterClass->contains(Reg))
784      continue;
785
786    if (Spilled) {
787      NumGPRSpills++;
788
789      if (!STI.isTargetDarwin()) {
790        if (Reg == ARM::LR)
791          LRSpilled = true;
792        CS1Spilled = true;
793        continue;
794      }
795
796      // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
797      switch (Reg) {
798      case ARM::LR:
799        LRSpilled = true;
800        // Fallthrough
801      case ARM::R4:
802      case ARM::R5:
803      case ARM::R6:
804      case ARM::R7:
805        CS1Spilled = true;
806        break;
807      default:
808        break;
809      }
810    } else {
811      if (!STI.isTargetDarwin()) {
812        UnspilledCS1GPRs.push_back(Reg);
813        continue;
814      }
815
816      switch (Reg) {
817      case ARM::R4:
818      case ARM::R5:
819      case ARM::R6:
820      case ARM::R7:
821      case ARM::LR:
822        UnspilledCS1GPRs.push_back(Reg);
823        break;
824      default:
825        UnspilledCS2GPRs.push_back(Reg);
826        break;
827      }
828    }
829  }
830
831  bool ForceLRSpill = false;
832  if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
833    unsigned FnSize = GetFunctionSizeInBytes(MF, TII);
834    // Force LR to be spilled if the Thumb function size is > 2048. This enables
835    // use of BL to implement far jump. If it turns out that it's not needed
836    // then the branch fix up path will undo it.
837    if (FnSize >= (1 << 11)) {
838      CanEliminateFrame = false;
839      ForceLRSpill = true;
840    }
841  }
842
843  // If any of the stack slot references may be out of range of an immediate
844  // offset, make sure a register (or a spill slot) is available for the
845  // register scavenger. Note that if we're indexing off the frame pointer, the
846  // effective stack size is 4 bytes larger since the FP points to the stack
847  // slot of the previous FP. Also, if we have variable sized objects in the
848  // function, stack slot references will often be negative, and some of
849  // our instructions are positive-offset only, so conservatively consider
850  // that case to want a spill slot (or register) as well. Similarly, if
851  // the function adjusts the stack pointer during execution and the
852  // adjustments aren't already part of our stack size estimate, our offset
853  // calculations may be off, so be conservative.
854  // FIXME: We could add logic to be more precise about negative offsets
855  //        and which instructions will need a scratch register for them. Is it
856  //        worth the effort and added fragility?
857  bool BigStack =
858    (RS &&
859     (estimateStackSize(MF) + ((hasFP(MF) && AFI->hasStackFrame()) ? 4:0) >=
860      estimateRSStackSizeLimit(MF)))
861    || MFI->hasVarSizedObjects()
862    || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF));
863
864  bool ExtraCSSpill = false;
865  if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
866    AFI->setHasStackFrame(true);
867
868    // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
869    // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
870    if (!LRSpilled && CS1Spilled) {
871      MF.getRegInfo().setPhysRegUsed(ARM::LR);
872      AFI->setCSRegisterIsSpilled(ARM::LR);
873      NumGPRSpills++;
874      UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
875                                    UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
876      ForceLRSpill = false;
877      ExtraCSSpill = true;
878    }
879
880    if (hasFP(MF)) {
881      MF.getRegInfo().setPhysRegUsed(FramePtr);
882      NumGPRSpills++;
883    }
884
885    // If stack and double are 8-byte aligned and we are spilling an odd number
886    // of GPRs. Spill one extra callee save GPR so we won't have to pad between
887    // the integer and double callee save areas.
888    unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
889    if (TargetAlign == 8 && (NumGPRSpills & 1)) {
890      if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
891        for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
892          unsigned Reg = UnspilledCS1GPRs[i];
893          // Don't spill high register if the function is thumb1
894          if (!AFI->isThumb1OnlyFunction() ||
895              isARMLowRegister(Reg) || Reg == ARM::LR) {
896            MF.getRegInfo().setPhysRegUsed(Reg);
897            AFI->setCSRegisterIsSpilled(Reg);
898            if (!isReservedReg(MF, Reg))
899              ExtraCSSpill = true;
900            break;
901          }
902        }
903      } else if (!UnspilledCS2GPRs.empty() &&
904                 !AFI->isThumb1OnlyFunction()) {
905        unsigned Reg = UnspilledCS2GPRs.front();
906        MF.getRegInfo().setPhysRegUsed(Reg);
907        AFI->setCSRegisterIsSpilled(Reg);
908        if (!isReservedReg(MF, Reg))
909          ExtraCSSpill = true;
910      }
911    }
912
913    // Estimate if we might need to scavenge a register at some point in order
914    // to materialize a stack offset. If so, either spill one additional
915    // callee-saved register or reserve a special spill slot to facilitate
916    // register scavenging. Thumb1 needs a spill slot for stack pointer
917    // adjustments also, even when the frame itself is small.
918    if (BigStack && !ExtraCSSpill) {
919      // If any non-reserved CS register isn't spilled, just spill one or two
920      // extra. That should take care of it!
921      unsigned NumExtras = TargetAlign / 4;
922      SmallVector<unsigned, 2> Extras;
923      while (NumExtras && !UnspilledCS1GPRs.empty()) {
924        unsigned Reg = UnspilledCS1GPRs.back();
925        UnspilledCS1GPRs.pop_back();
926        if (!isReservedReg(MF, Reg) &&
927            (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
928             Reg == ARM::LR)) {
929          Extras.push_back(Reg);
930          NumExtras--;
931        }
932      }
933      // For non-Thumb1 functions, also check for hi-reg CS registers
934      if (!AFI->isThumb1OnlyFunction()) {
935        while (NumExtras && !UnspilledCS2GPRs.empty()) {
936          unsigned Reg = UnspilledCS2GPRs.back();
937          UnspilledCS2GPRs.pop_back();
938          if (!isReservedReg(MF, Reg)) {
939            Extras.push_back(Reg);
940            NumExtras--;
941          }
942        }
943      }
944      if (Extras.size() && NumExtras == 0) {
945        for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
946          MF.getRegInfo().setPhysRegUsed(Extras[i]);
947          AFI->setCSRegisterIsSpilled(Extras[i]);
948        }
949      } else if (!AFI->isThumb1OnlyFunction()) {
950        // note: Thumb1 functions spill to R12, not the stack.  Reserve a slot
951        // closest to SP or frame pointer.
952        const TargetRegisterClass *RC = ARM::GPRRegisterClass;
953        RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
954                                                           RC->getAlignment(),
955                                                           false));
956      }
957    }
958  }
959
960  if (ForceLRSpill) {
961    MF.getRegInfo().setPhysRegUsed(ARM::LR);
962    AFI->setCSRegisterIsSpilled(ARM::LR);
963    AFI->setLRIsSpilledForFarJump(true);
964  }
965}
966
967unsigned ARMBaseRegisterInfo::getRARegister() const {
968  return ARM::LR;
969}
970
971unsigned
972ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
973  if (hasFP(MF))
974    return FramePtr;
975  return ARM::SP;
976}
977
978// Provide a base+offset reference to an FI slot for debug info. It's the
979// same as what we use for resolving the code-gen references for now.
980// FIXME: This can go wrong when references are SP-relative and simple call
981//        frames aren't used.
982int
983ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI,
984                                            unsigned &FrameReg) const {
985  return ResolveFrameIndexReference(MF, FI, FrameReg, 0);
986}
987
988int
989ARMBaseRegisterInfo::ResolveFrameIndexReference(const MachineFunction &MF,
990                                                int FI,
991                                                unsigned &FrameReg,
992                                                int SPAdj) const {
993  const MachineFrameInfo *MFI = MF.getFrameInfo();
994  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
995  int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
996  int FPOffset = Offset - AFI->getFramePtrSpillOffset();
997  bool isFixed = MFI->isFixedObjectIndex(FI);
998
999  FrameReg = ARM::SP;
1000  Offset += SPAdj;
1001  if (AFI->isGPRCalleeSavedArea1Frame(FI))
1002    return Offset - AFI->getGPRCalleeSavedArea1Offset();
1003  else if (AFI->isGPRCalleeSavedArea2Frame(FI))
1004    return Offset - AFI->getGPRCalleeSavedArea2Offset();
1005  else if (AFI->isDPRCalleeSavedAreaFrame(FI))
1006    return Offset - AFI->getDPRCalleeSavedAreaOffset();
1007
1008  // When dynamically realigning the stack, use the frame pointer for
1009  // parameters, and the stack/base pointer for locals.
1010  if (needsStackRealignment(MF)) {
1011    assert (hasFP(MF) && "dynamic stack realignment without a FP!");
1012    if (isFixed) {
1013      FrameReg = getFrameRegister(MF);
1014      Offset = FPOffset;
1015    } else if (MFI->hasVarSizedObjects()) {
1016      assert(hasBasePointer(MF) &&
1017             "VLAs and dynamic stack alignment, but missing base pointer!");
1018      FrameReg = BasePtr;
1019    }
1020    return Offset;
1021  }
1022
1023  // If there is a frame pointer, use it when we can.
1024  if (hasFP(MF) && AFI->hasStackFrame()) {
1025    // Use frame pointer to reference fixed objects. Use it for locals if
1026    // there are VLAs (and thus the SP isn't reliable as a base).
1027    if (isFixed || (MFI->hasVarSizedObjects() && !hasBasePointer(MF))) {
1028      FrameReg = getFrameRegister(MF);
1029      return FPOffset;
1030    } else if (MFI->hasVarSizedObjects()) {
1031      assert(hasBasePointer(MF) && "missing base pointer!");
1032      // Use the base register since we have it.
1033      FrameReg = BasePtr;
1034    } else if (AFI->isThumb2Function()) {
1035      // In Thumb2 mode, the negative offset is very limited. Try to avoid
1036      // out of range references.
1037      if (FPOffset >= -255 && FPOffset < 0) {
1038        FrameReg = getFrameRegister(MF);
1039        return FPOffset;
1040      }
1041    } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) {
1042      // Otherwise, use SP or FP, whichever is closer to the stack slot.
1043      FrameReg = getFrameRegister(MF);
1044      return FPOffset;
1045    }
1046  }
1047  // Use the base pointer if we have one.
1048  if (hasBasePointer(MF))
1049    FrameReg = BasePtr;
1050  return Offset;
1051}
1052
1053int
1054ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
1055                                         int FI) const {
1056  unsigned FrameReg;
1057  return getFrameIndexReference(MF, FI, FrameReg);
1058}
1059
1060unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
1061  llvm_unreachable("What is the exception register");
1062  return 0;
1063}
1064
1065unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
1066  llvm_unreachable("What is the exception handler register");
1067  return 0;
1068}
1069
1070int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
1071  return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
1072}
1073
1074unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
1075                                              const MachineFunction &MF) const {
1076  switch (Reg) {
1077  default: break;
1078  // Return 0 if either register of the pair is a special register.
1079  // So no R12, etc.
1080  case ARM::R1:
1081    return ARM::R0;
1082  case ARM::R3:
1083    return ARM::R2;
1084  case ARM::R5:
1085    return ARM::R4;
1086  case ARM::R7:
1087    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
1088      ? 0 : ARM::R6;
1089  case ARM::R9:
1090    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
1091  case ARM::R11:
1092    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
1093
1094  case ARM::S1:
1095    return ARM::S0;
1096  case ARM::S3:
1097    return ARM::S2;
1098  case ARM::S5:
1099    return ARM::S4;
1100  case ARM::S7:
1101    return ARM::S6;
1102  case ARM::S9:
1103    return ARM::S8;
1104  case ARM::S11:
1105    return ARM::S10;
1106  case ARM::S13:
1107    return ARM::S12;
1108  case ARM::S15:
1109    return ARM::S14;
1110  case ARM::S17:
1111    return ARM::S16;
1112  case ARM::S19:
1113    return ARM::S18;
1114  case ARM::S21:
1115    return ARM::S20;
1116  case ARM::S23:
1117    return ARM::S22;
1118  case ARM::S25:
1119    return ARM::S24;
1120  case ARM::S27:
1121    return ARM::S26;
1122  case ARM::S29:
1123    return ARM::S28;
1124  case ARM::S31:
1125    return ARM::S30;
1126
1127  case ARM::D1:
1128    return ARM::D0;
1129  case ARM::D3:
1130    return ARM::D2;
1131  case ARM::D5:
1132    return ARM::D4;
1133  case ARM::D7:
1134    return ARM::D6;
1135  case ARM::D9:
1136    return ARM::D8;
1137  case ARM::D11:
1138    return ARM::D10;
1139  case ARM::D13:
1140    return ARM::D12;
1141  case ARM::D15:
1142    return ARM::D14;
1143  case ARM::D17:
1144    return ARM::D16;
1145  case ARM::D19:
1146    return ARM::D18;
1147  case ARM::D21:
1148    return ARM::D20;
1149  case ARM::D23:
1150    return ARM::D22;
1151  case ARM::D25:
1152    return ARM::D24;
1153  case ARM::D27:
1154    return ARM::D26;
1155  case ARM::D29:
1156    return ARM::D28;
1157  case ARM::D31:
1158    return ARM::D30;
1159  }
1160
1161  return 0;
1162}
1163
1164unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
1165                                             const MachineFunction &MF) const {
1166  switch (Reg) {
1167  default: break;
1168  // Return 0 if either register of the pair is a special register.
1169  // So no R12, etc.
1170  case ARM::R0:
1171    return ARM::R1;
1172  case ARM::R2:
1173    return ARM::R3;
1174  case ARM::R4:
1175    return ARM::R5;
1176  case ARM::R6:
1177    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
1178      ? 0 : ARM::R7;
1179  case ARM::R8:
1180    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
1181  case ARM::R10:
1182    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
1183
1184  case ARM::S0:
1185    return ARM::S1;
1186  case ARM::S2:
1187    return ARM::S3;
1188  case ARM::S4:
1189    return ARM::S5;
1190  case ARM::S6:
1191    return ARM::S7;
1192  case ARM::S8:
1193    return ARM::S9;
1194  case ARM::S10:
1195    return ARM::S11;
1196  case ARM::S12:
1197    return ARM::S13;
1198  case ARM::S14:
1199    return ARM::S15;
1200  case ARM::S16:
1201    return ARM::S17;
1202  case ARM::S18:
1203    return ARM::S19;
1204  case ARM::S20:
1205    return ARM::S21;
1206  case ARM::S22:
1207    return ARM::S23;
1208  case ARM::S24:
1209    return ARM::S25;
1210  case ARM::S26:
1211    return ARM::S27;
1212  case ARM::S28:
1213    return ARM::S29;
1214  case ARM::S30:
1215    return ARM::S31;
1216
1217  case ARM::D0:
1218    return ARM::D1;
1219  case ARM::D2:
1220    return ARM::D3;
1221  case ARM::D4:
1222    return ARM::D5;
1223  case ARM::D6:
1224    return ARM::D7;
1225  case ARM::D8:
1226    return ARM::D9;
1227  case ARM::D10:
1228    return ARM::D11;
1229  case ARM::D12:
1230    return ARM::D13;
1231  case ARM::D14:
1232    return ARM::D15;
1233  case ARM::D16:
1234    return ARM::D17;
1235  case ARM::D18:
1236    return ARM::D19;
1237  case ARM::D20:
1238    return ARM::D21;
1239  case ARM::D22:
1240    return ARM::D23;
1241  case ARM::D24:
1242    return ARM::D25;
1243  case ARM::D26:
1244    return ARM::D27;
1245  case ARM::D28:
1246    return ARM::D29;
1247  case ARM::D30:
1248    return ARM::D31;
1249  }
1250
1251  return 0;
1252}
1253
1254/// emitLoadConstPool - Emits a load from constpool to materialize the
1255/// specified immediate.
1256void ARMBaseRegisterInfo::
1257emitLoadConstPool(MachineBasicBlock &MBB,
1258                  MachineBasicBlock::iterator &MBBI,
1259                  DebugLoc dl,
1260                  unsigned DestReg, unsigned SubIdx, int Val,
1261                  ARMCC::CondCodes Pred,
1262                  unsigned PredReg) const {
1263  MachineFunction &MF = *MBB.getParent();
1264  MachineConstantPool *ConstantPool = MF.getConstantPool();
1265  const Constant *C =
1266        ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
1267  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
1268
1269  BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
1270    .addReg(DestReg, getDefRegState(true), SubIdx)
1271    .addConstantPoolIndex(Idx)
1272    .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
1273}
1274
1275bool ARMBaseRegisterInfo::
1276requiresRegisterScavenging(const MachineFunction &MF) const {
1277  return true;
1278}
1279
1280bool ARMBaseRegisterInfo::
1281requiresFrameIndexScavenging(const MachineFunction &MF) const {
1282  return true;
1283}
1284
1285bool ARMBaseRegisterInfo::
1286requiresVirtualBaseRegisters(const MachineFunction &MF) const {
1287  return EnableLocalStackAlloc;
1288}
1289
1290// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
1291// not required, we reserve argument space for call sites in the function
1292// immediately on entry to the current function. This eliminates the need for
1293// add/sub sp brackets around call sites. Returns true if the call frame is
1294// included as part of the stack frame.
1295bool ARMBaseRegisterInfo::
1296hasReservedCallFrame(const MachineFunction &MF) const {
1297  const MachineFrameInfo *FFI = MF.getFrameInfo();
1298  unsigned CFSize = FFI->getMaxCallFrameSize();
1299  // It's not always a good idea to include the call frame as part of the
1300  // stack frame. ARM (especially Thumb) has small immediate offset to
1301  // address the stack frame. So a large call frame can cause poor codegen
1302  // and may even makes it impossible to scavenge a register.
1303  if (CFSize >= ((1 << 12) - 1) / 2)  // Half of imm12
1304    return false;
1305
1306  return !MF.getFrameInfo()->hasVarSizedObjects();
1307}
1308
1309// canSimplifyCallFramePseudos - If there is a reserved call frame, the
1310// call frame pseudos can be simplified. Unlike most targets, having a FP
1311// is not sufficient here since we still may reference some objects via SP
1312// even when FP is available in Thumb2 mode.
1313bool ARMBaseRegisterInfo::
1314canSimplifyCallFramePseudos(const MachineFunction &MF) const {
1315  return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
1316}
1317
1318static void
1319emitSPUpdate(bool isARM,
1320             MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
1321             DebugLoc dl, const ARMBaseInstrInfo &TII,
1322             int NumBytes,
1323             ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
1324  if (isARM)
1325    emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1326                            Pred, PredReg, TII);
1327  else
1328    emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1329                           Pred, PredReg, TII);
1330}
1331
1332
1333void ARMBaseRegisterInfo::
1334eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1335                              MachineBasicBlock::iterator I) const {
1336  if (!hasReservedCallFrame(MF)) {
1337    // If we have alloca, convert as follows:
1338    // ADJCALLSTACKDOWN -> sub, sp, sp, amount
1339    // ADJCALLSTACKUP   -> add, sp, sp, amount
1340    MachineInstr *Old = I;
1341    DebugLoc dl = Old->getDebugLoc();
1342    unsigned Amount = Old->getOperand(0).getImm();
1343    if (Amount != 0) {
1344      // We need to keep the stack aligned properly.  To do this, we round the
1345      // amount of space needed for the outgoing arguments up to the next
1346      // alignment boundary.
1347      unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1348      Amount = (Amount+Align-1)/Align*Align;
1349
1350      ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1351      assert(!AFI->isThumb1OnlyFunction() &&
1352             "This eliminateCallFramePseudoInstr does not support Thumb1!");
1353      bool isARM = !AFI->isThumbFunction();
1354
1355      // Replace the pseudo instruction with a new instruction...
1356      unsigned Opc = Old->getOpcode();
1357      int PIdx = Old->findFirstPredOperandIdx();
1358      ARMCC::CondCodes Pred = (PIdx == -1)
1359        ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
1360      if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
1361        // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
1362        unsigned PredReg = Old->getOperand(2).getReg();
1363        emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
1364      } else {
1365        // Note: PredReg is operand 3 for ADJCALLSTACKUP.
1366        unsigned PredReg = Old->getOperand(3).getReg();
1367        assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
1368        emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
1369      }
1370    }
1371  }
1372  MBB.erase(I);
1373}
1374
1375int64_t ARMBaseRegisterInfo::
1376getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
1377  const TargetInstrDesc &Desc = MI->getDesc();
1378  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1379  int64_t InstrOffs = 0;;
1380  int Scale = 1;
1381  unsigned ImmIdx = 0;
1382  switch (AddrMode) {
1383  case ARMII::AddrModeT2_i8:
1384  case ARMII::AddrModeT2_i12:
1385    // i8 supports only negative, and i12 supports only positive, so
1386    // based on Offset sign, consider the appropriate instruction
1387    InstrOffs = MI->getOperand(Idx+1).getImm();
1388    Scale = 1;
1389    break;
1390  case ARMII::AddrMode5: {
1391    // VFP address mode.
1392    const MachineOperand &OffOp = MI->getOperand(Idx+1);
1393    InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
1394    if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
1395      InstrOffs = -InstrOffs;
1396    Scale = 4;
1397    break;
1398  }
1399  case ARMII::AddrMode2: {
1400    ImmIdx = Idx+2;
1401    InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
1402    if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1403      InstrOffs = -InstrOffs;
1404    break;
1405  }
1406  case ARMII::AddrMode3: {
1407    ImmIdx = Idx+2;
1408    InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
1409    if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1410      InstrOffs = -InstrOffs;
1411    break;
1412  }
1413  case ARMII::AddrModeT1_s: {
1414    ImmIdx = Idx+1;
1415    InstrOffs = MI->getOperand(ImmIdx).getImm();
1416    Scale = 4;
1417    break;
1418  }
1419  default:
1420    llvm_unreachable("Unsupported addressing mode!");
1421    break;
1422  }
1423
1424  return InstrOffs * Scale;
1425}
1426
1427/// needsFrameBaseReg - Returns true if the instruction's frame index
1428/// reference would be better served by a base register other than FP
1429/// or SP. Used by LocalStackFrameAllocation to determine which frame index
1430/// references it should create new base registers for.
1431bool ARMBaseRegisterInfo::
1432needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1433  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
1434    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1435  }
1436
1437  // It's the load/store FI references that cause issues, as it can be difficult
1438  // to materialize the offset if it won't fit in the literal field. Estimate
1439  // based on the size of the local frame and some conservative assumptions
1440  // about the rest of the stack frame (note, this is pre-regalloc, so
1441  // we don't know everything for certain yet) whether this offset is likely
1442  // to be out of range of the immediate. Return true if so.
1443
1444  // We only generate virtual base registers for loads and stores, so
1445  // return false for everything else.
1446  unsigned Opc = MI->getOpcode();
1447  switch (Opc) {
1448  case ARM::LDR: case ARM::LDRH: case ARM::LDRB:
1449  case ARM::STR: case ARM::STRH: case ARM::STRB:
1450  case ARM::t2LDRi12: case ARM::t2LDRi8:
1451  case ARM::t2STRi12: case ARM::t2STRi8:
1452  case ARM::VLDRS: case ARM::VLDRD:
1453  case ARM::VSTRS: case ARM::VSTRD:
1454  case ARM::tSTRspi: case ARM::tLDRspi:
1455    if (ForceAllBaseRegAlloc)
1456      return true;
1457    break;
1458  default:
1459    return false;
1460  }
1461
1462  // Without a virtual base register, if the function has variable sized
1463  // objects, all fixed-size local references will be via the frame pointer,
1464  // Approximate the offset and see if it's legal for the instruction.
1465  // Note that the incoming offset is based on the SP value at function entry,
1466  // so it'll be negative.
1467  MachineFunction &MF = *MI->getParent()->getParent();
1468  MachineFrameInfo *MFI = MF.getFrameInfo();
1469  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1470
1471  // Estimate an offset from the frame pointer.
1472  // Conservatively assume all callee-saved registers get pushed. R4-R6
1473  // will be earlier than the FP, so we ignore those.
1474  // R7, LR
1475  int64_t FPOffset = Offset - 8;
1476  // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
1477  if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
1478    FPOffset -= 80;
1479  // Estimate an offset from the stack pointer.
1480  // The incoming offset is relating to the SP at the start of the function,
1481  // but when we access the local it'll be relative to the SP after local
1482  // allocation, so adjust our SP-relative offset by that allocation size.
1483  Offset = -Offset;
1484  Offset += MFI->getLocalFrameSize();
1485  // Assume that we'll have at least some spill slots allocated.
1486  // FIXME: This is a total SWAG number. We should run some statistics
1487  //        and pick a real one.
1488  Offset += 128; // 128 bytes of spill slots
1489
1490  // If there is a frame pointer, try using it.
1491  // The FP is only available if there is no dynamic realignment. We
1492  // don't know for sure yet whether we'll need that, so we guess based
1493  // on whether there are any local variables that would trigger it.
1494  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1495  if (hasFP(MF) &&
1496      !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1497    if (isFrameOffsetLegal(MI, FPOffset))
1498      return false;
1499  }
1500  // If we can reference via the stack pointer, try that.
1501  // FIXME: This (and the code that resolves the references) can be improved
1502  //        to only disallow SP relative references in the live range of
1503  //        the VLA(s). In practice, it's unclear how much difference that
1504  //        would make, but it may be worth doing.
1505  if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1506    return false;
1507
1508  // The offset likely isn't legal, we want to allocate a virtual base register.
1509  return true;
1510}
1511
1512/// materializeFrameBaseRegister - Insert defining instruction(s) for
1513/// BaseReg to be a pointer to FrameIdx before insertion point I.
1514void ARMBaseRegisterInfo::
1515materializeFrameBaseRegister(MachineBasicBlock::iterator I, unsigned BaseReg,
1516                             int FrameIdx, int64_t Offset) const {
1517  ARMFunctionInfo *AFI =
1518    I->getParent()->getParent()->getInfo<ARMFunctionInfo>();
1519  unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1520    (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1521
1522  MachineInstrBuilder MIB =
1523    BuildMI(*I->getParent(), I, I->getDebugLoc(), TII.get(ADDriOpc), BaseReg)
1524    .addFrameIndex(FrameIdx).addImm(Offset);
1525  if (!AFI->isThumb1OnlyFunction())
1526    AddDefaultCC(AddDefaultPred(MIB));
1527}
1528
1529void
1530ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1531                                       unsigned BaseReg, int64_t Offset) const {
1532  MachineInstr &MI = *I;
1533  MachineBasicBlock &MBB = *MI.getParent();
1534  MachineFunction &MF = *MBB.getParent();
1535  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1536  int Off = Offset; // ARM doesn't need the general 64-bit offsets
1537  unsigned i = 0;
1538
1539  assert(!AFI->isThumb1OnlyFunction() &&
1540         "This resolveFrameIndex does not support Thumb1!");
1541
1542  while (!MI.getOperand(i).isFI()) {
1543    ++i;
1544    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1545  }
1546  bool Done = false;
1547  if (!AFI->isThumbFunction())
1548    Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1549  else {
1550    assert(AFI->isThumb2Function());
1551    Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1552  }
1553  assert (Done && "Unable to resolve frame index!");
1554}
1555
1556bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1557                                             int64_t Offset) const {
1558  const TargetInstrDesc &Desc = MI->getDesc();
1559  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1560  unsigned i = 0;
1561
1562  while (!MI->getOperand(i).isFI()) {
1563    ++i;
1564    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1565  }
1566
1567  // AddrMode4 and AddrMode6 cannot handle any offset.
1568  if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1569    return Offset == 0;
1570
1571  unsigned NumBits = 0;
1572  unsigned Scale = 1;
1573  bool isSigned = true;
1574  switch (AddrMode) {
1575  case ARMII::AddrModeT2_i8:
1576  case ARMII::AddrModeT2_i12:
1577    // i8 supports only negative, and i12 supports only positive, so
1578    // based on Offset sign, consider the appropriate instruction
1579    Scale = 1;
1580    if (Offset < 0) {
1581      NumBits = 8;
1582      Offset = -Offset;
1583    } else {
1584      NumBits = 12;
1585    }
1586    break;
1587  case ARMII::AddrMode5:
1588    // VFP address mode.
1589    NumBits = 8;
1590    Scale = 4;
1591    break;
1592  case ARMII::AddrMode2:
1593    NumBits = 12;
1594    break;
1595  case ARMII::AddrMode3:
1596    NumBits = 8;
1597    break;
1598  case ARMII::AddrModeT1_s:
1599    NumBits = 5;
1600    Scale = 4;
1601    isSigned = false;
1602    break;
1603  default:
1604    llvm_unreachable("Unsupported addressing mode!");
1605    break;
1606  }
1607
1608  Offset += getFrameIndexInstrOffset(MI, i);
1609  // Make sure the offset is encodable for instructions that scale the
1610  // immediate.
1611  if ((Offset & (Scale-1)) != 0)
1612    return false;
1613
1614  if (isSigned && Offset < 0)
1615    Offset = -Offset;
1616
1617  unsigned Mask = (1 << NumBits) - 1;
1618  if ((unsigned)Offset <= Mask * Scale)
1619    return true;
1620
1621  return false;
1622}
1623
1624void
1625ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1626                                         int SPAdj, RegScavenger *RS) const {
1627  unsigned i = 0;
1628  MachineInstr &MI = *II;
1629  MachineBasicBlock &MBB = *MI.getParent();
1630  MachineFunction &MF = *MBB.getParent();
1631  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1632  assert(!AFI->isThumb1OnlyFunction() &&
1633         "This eliminateFrameIndex does not support Thumb1!");
1634
1635  while (!MI.getOperand(i).isFI()) {
1636    ++i;
1637    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1638  }
1639
1640  int FrameIndex = MI.getOperand(i).getIndex();
1641  unsigned FrameReg;
1642
1643  int Offset = ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1644
1645  // Special handling of dbg_value instructions.
1646  if (MI.isDebugValue()) {
1647    MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1648    MI.getOperand(i+1).ChangeToImmediate(Offset);
1649    return;
1650  }
1651
1652  // Modify MI as necessary to handle as much of 'Offset' as possible
1653  bool Done = false;
1654  if (!AFI->isThumbFunction())
1655    Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1656  else {
1657    assert(AFI->isThumb2Function());
1658    Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1659  }
1660  if (Done)
1661    return;
1662
1663  // If we get here, the immediate doesn't fit into the instruction.  We folded
1664  // as much as possible above, handle the rest, providing a register that is
1665  // SP+LargeImm.
1666  assert((Offset ||
1667          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1668          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1669         "This code isn't needed if offset already handled!");
1670
1671  unsigned ScratchReg = 0;
1672  int PIdx = MI.findFirstPredOperandIdx();
1673  ARMCC::CondCodes Pred = (PIdx == -1)
1674    ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1675  unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1676  if (Offset == 0)
1677    // Must be addrmode4/6.
1678    MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1679  else {
1680    ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1681    if (!AFI->isThumbFunction())
1682      emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1683                              Offset, Pred, PredReg, TII);
1684    else {
1685      assert(AFI->isThumb2Function());
1686      emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1687                             Offset, Pred, PredReg, TII);
1688    }
1689    MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1690  }
1691}
1692
1693/// Move iterator past the next bunch of callee save load / store ops for
1694/// the particular spill area (1: integer area 1, 2: integer area 2,
1695/// 3: fp area, 0: don't care).
1696static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
1697                                   MachineBasicBlock::iterator &MBBI,
1698                                   int Opc1, int Opc2, unsigned Area,
1699                                   const ARMSubtarget &STI) {
1700  while (MBBI != MBB.end() &&
1701         ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) &&
1702         MBBI->getOperand(1).isFI()) {
1703    if (Area != 0) {
1704      bool Done = false;
1705      unsigned Category = 0;
1706      switch (MBBI->getOperand(0).getReg()) {
1707      case ARM::R4:  case ARM::R5:  case ARM::R6: case ARM::R7:
1708      case ARM::LR:
1709        Category = 1;
1710        break;
1711      case ARM::R8:  case ARM::R9:  case ARM::R10: case ARM::R11:
1712        Category = STI.isTargetDarwin() ? 2 : 1;
1713        break;
1714      case ARM::D8:  case ARM::D9:  case ARM::D10: case ARM::D11:
1715      case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
1716        Category = 3;
1717        break;
1718      default:
1719        Done = true;
1720        break;
1721      }
1722      if (Done || Category != Area)
1723        break;
1724    }
1725
1726    ++MBBI;
1727  }
1728}
1729
1730void ARMBaseRegisterInfo::
1731emitPrologue(MachineFunction &MF) const {
1732  MachineBasicBlock &MBB = MF.front();
1733  MachineBasicBlock::iterator MBBI = MBB.begin();
1734  MachineFrameInfo  *MFI = MF.getFrameInfo();
1735  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1736  assert(!AFI->isThumb1OnlyFunction() &&
1737         "This emitPrologue does not support Thumb1!");
1738  bool isARM = !AFI->isThumbFunction();
1739  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1740  unsigned NumBytes = MFI->getStackSize();
1741  const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1742  DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1743
1744  // Determine the sizes of each callee-save spill areas and record which frame
1745  // belongs to which callee-save spill areas.
1746  unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
1747  int FramePtrSpillFI = 0;
1748
1749  // Allocate the vararg register save area. This is not counted in NumBytes.
1750  if (VARegSaveSize)
1751    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
1752
1753  if (!AFI->hasStackFrame()) {
1754    if (NumBytes != 0)
1755      emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1756    return;
1757  }
1758
1759  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1760    unsigned Reg = CSI[i].getReg();
1761    int FI = CSI[i].getFrameIdx();
1762    switch (Reg) {
1763    case ARM::R4:
1764    case ARM::R5:
1765    case ARM::R6:
1766    case ARM::R7:
1767    case ARM::LR:
1768      if (Reg == FramePtr)
1769        FramePtrSpillFI = FI;
1770      AFI->addGPRCalleeSavedArea1Frame(FI);
1771      GPRCS1Size += 4;
1772      break;
1773    case ARM::R8:
1774    case ARM::R9:
1775    case ARM::R10:
1776    case ARM::R11:
1777      if (Reg == FramePtr)
1778        FramePtrSpillFI = FI;
1779      if (STI.isTargetDarwin()) {
1780        AFI->addGPRCalleeSavedArea2Frame(FI);
1781        GPRCS2Size += 4;
1782      } else {
1783        AFI->addGPRCalleeSavedArea1Frame(FI);
1784        GPRCS1Size += 4;
1785      }
1786      break;
1787    default:
1788      AFI->addDPRCalleeSavedAreaFrame(FI);
1789      DPRCSSize += 8;
1790    }
1791  }
1792
1793  // Build the new SUBri to adjust SP for integer callee-save spill area 1.
1794  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size);
1795  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI);
1796
1797  // Set FP to point to the stack slot that contains the previous FP.
1798  // For Darwin, FP is R7, which has now been stored in spill area 1.
1799  // Otherwise, if this is not Darwin, all the callee-saved registers go
1800  // into spill area 1, including the FP in R11.  In either case, it is
1801  // now safe to emit this assignment.
1802  bool HasFP = hasFP(MF);
1803  if (HasFP) {
1804    unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
1805    MachineInstrBuilder MIB =
1806      BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
1807      .addFrameIndex(FramePtrSpillFI).addImm(0);
1808    AddDefaultCC(AddDefaultPred(MIB));
1809  }
1810
1811  // Build the new SUBri to adjust SP for integer callee-save spill area 2.
1812  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size);
1813
1814  // Build the new SUBri to adjust SP for FP callee-save spill area.
1815  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI);
1816  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize);
1817
1818  // Determine starting offsets of spill areas.
1819  unsigned DPRCSOffset  = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
1820  unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
1821  unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
1822  if (HasFP)
1823    AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
1824                                NumBytes);
1825  AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
1826  AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
1827  AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
1828
1829  movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI);
1830  NumBytes = DPRCSOffset;
1831  if (NumBytes) {
1832    // Adjust SP after all the callee-save spills.
1833    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1834    if (HasFP)
1835      AFI->setShouldRestoreSPFromFP(true);
1836  }
1837
1838  if (STI.isTargetELF() && hasFP(MF)) {
1839    MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
1840                             AFI->getFramePtrSpillOffset());
1841    AFI->setShouldRestoreSPFromFP(true);
1842  }
1843
1844  AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
1845  AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
1846  AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
1847
1848  // If we need dynamic stack realignment, do it here. Be paranoid and make
1849  // sure if we also have VLAs, we have a base pointer for frame access.
1850  if (needsStackRealignment(MF)) {
1851    unsigned MaxAlign = MFI->getMaxAlignment();
1852    assert (!AFI->isThumb1OnlyFunction());
1853    if (!AFI->isThumbFunction()) {
1854      // Emit bic sp, sp, MaxAlign
1855      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1856                                          TII.get(ARM::BICri), ARM::SP)
1857                                  .addReg(ARM::SP, RegState::Kill)
1858                                  .addImm(MaxAlign-1)));
1859    } else {
1860      // We cannot use sp as source/dest register here, thus we're emitting the
1861      // following sequence:
1862      // mov r4, sp
1863      // bic r4, r4, MaxAlign
1864      // mov sp, r4
1865      // FIXME: It will be better just to find spare register here.
1866      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
1867        .addReg(ARM::SP, RegState::Kill);
1868      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1869                                          TII.get(ARM::t2BICri), ARM::R4)
1870                                  .addReg(ARM::R4, RegState::Kill)
1871                                  .addImm(MaxAlign-1)));
1872      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
1873        .addReg(ARM::R4, RegState::Kill);
1874    }
1875
1876    AFI->setShouldRestoreSPFromFP(true);
1877  }
1878
1879  // If we need a base pointer, set it up here. It's whatever the value
1880  // of the stack pointer is at this point. Any variable size objects
1881  // will be allocated after this, so we can still use the base pointer
1882  // to reference locals.
1883  if (hasBasePointer(MF)) {
1884    if (isARM)
1885      BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), BasePtr)
1886        .addReg(ARM::SP)
1887        .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
1888    else
1889      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), BasePtr)
1890        .addReg(ARM::SP);
1891  }
1892
1893  // If the frame has variable sized objects then the epilogue must restore
1894  // the sp from fp.
1895  if (!AFI->shouldRestoreSPFromFP() && MFI->hasVarSizedObjects())
1896    AFI->setShouldRestoreSPFromFP(true);
1897}
1898
1899static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
1900  for (unsigned i = 0; CSRegs[i]; ++i)
1901    if (Reg == CSRegs[i])
1902      return true;
1903  return false;
1904}
1905
1906static bool isCSRestore(MachineInstr *MI,
1907                        const ARMBaseInstrInfo &TII,
1908                        const unsigned *CSRegs) {
1909  return ((MI->getOpcode() == (int)ARM::VLDRD ||
1910           MI->getOpcode() == (int)ARM::LDR ||
1911           MI->getOpcode() == (int)ARM::t2LDRi12) &&
1912          MI->getOperand(1).isFI() &&
1913          isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
1914}
1915
1916void ARMBaseRegisterInfo::
1917emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
1918  MachineBasicBlock::iterator MBBI = prior(MBB.end());
1919  assert(MBBI->getDesc().isReturn() &&
1920         "Can only insert epilog into returning blocks");
1921  unsigned RetOpcode = MBBI->getOpcode();
1922  DebugLoc dl = MBBI->getDebugLoc();
1923  MachineFrameInfo *MFI = MF.getFrameInfo();
1924  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1925  assert(!AFI->isThumb1OnlyFunction() &&
1926         "This emitEpilogue does not support Thumb1!");
1927  bool isARM = !AFI->isThumbFunction();
1928
1929  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1930  int NumBytes = (int)MFI->getStackSize();
1931
1932  if (!AFI->hasStackFrame()) {
1933    if (NumBytes != 0)
1934      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1935  } else {
1936    // Unwind MBBI to point to first LDR / VLDRD.
1937    const unsigned *CSRegs = getCalleeSavedRegs();
1938    if (MBBI != MBB.begin()) {
1939      do
1940        --MBBI;
1941      while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
1942      if (!isCSRestore(MBBI, TII, CSRegs))
1943        ++MBBI;
1944    }
1945
1946    // Move SP to start of FP callee save spill area.
1947    NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
1948                 AFI->getGPRCalleeSavedArea2Size() +
1949                 AFI->getDPRCalleeSavedAreaSize());
1950
1951    // Reset SP based on frame pointer only if the stack frame extends beyond
1952    // frame pointer stack slot or target is ELF and the function has FP.
1953    if (AFI->shouldRestoreSPFromFP()) {
1954      NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
1955      if (NumBytes) {
1956        if (isARM)
1957          emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
1958                                  ARMCC::AL, 0, TII);
1959        else
1960          emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
1961                                 ARMCC::AL, 0, TII);
1962      } else {
1963        // Thumb2 or ARM.
1964        if (isARM)
1965          BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
1966            .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
1967        else
1968          BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
1969            .addReg(FramePtr);
1970      }
1971    } else if (NumBytes)
1972      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1973
1974    // Move SP to start of integer callee save spill area 2.
1975    movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI);
1976    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize());
1977
1978    // Move SP to start of integer callee save spill area 1.
1979    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI);
1980    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size());
1981
1982    // Move SP to SP upon entry to the function.
1983    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI);
1984    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
1985  }
1986
1987  if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
1988      RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
1989    // Tail call return: adjust the stack pointer and jump to callee.
1990    MBBI = prior(MBB.end());
1991    MachineOperand &JumpTarget = MBBI->getOperand(0);
1992
1993    // Jump to label or value in register.
1994    if (RetOpcode == ARM::TCRETURNdi) {
1995      BuildMI(MBB, MBBI, dl,
1996            TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)).
1997        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1998                         JumpTarget.getTargetFlags());
1999    } else if (RetOpcode == ARM::TCRETURNdiND) {
2000      BuildMI(MBB, MBBI, dl,
2001            TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
2002        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
2003                         JumpTarget.getTargetFlags());
2004    } else if (RetOpcode == ARM::TCRETURNri) {
2005      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
2006        addReg(JumpTarget.getReg(), RegState::Kill);
2007    } else if (RetOpcode == ARM::TCRETURNriND) {
2008      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
2009        addReg(JumpTarget.getReg(), RegState::Kill);
2010    }
2011
2012    MachineInstr *NewMI = prior(MBBI);
2013    for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
2014      NewMI->addOperand(MBBI->getOperand(i));
2015
2016    // Delete the pseudo instruction TCRETURN.
2017    MBB.erase(MBBI);
2018  }
2019
2020  if (VARegSaveSize)
2021    emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
2022}
2023
2024#include "ARMGenRegisterInfo.inc"
2025