ARMBaseRegisterInfo.cpp revision 1ab3f16f06698596716593a30545799688acccd7
1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the base ARM implementation of TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARM.h"
15#include "ARMAddressingModes.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMBaseRegisterInfo.h"
18#include "ARMInstrInfo.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMSubtarget.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/LLVMContext.h"
25#include "llvm/CodeGen/MachineConstantPool.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineLocation.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/RegisterScavenging.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetFrameInfo.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetOptions.h"
38#include "llvm/ADT/BitVector.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/Support/CommandLine.h"
41
42namespace llvm {
43cl::opt<bool>
44ReuseFrameIndexVals("arm-reuse-frame-index-vals", cl::Hidden, cl::init(false),
45          cl::desc("Reuse repeated frame index values"));
46static cl::opt<bool>
47ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
48          cl::desc("Force use of virtual base registers for stack load/store"));
49static cl::opt<bool>
50EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
51          cl::desc("Enable pre-regalloc stack frame index allocation"));
52}
53
54using namespace llvm;
55
56unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned RegEnum,
57                                                   bool *isSPVFP) {
58  if (isSPVFP)
59    *isSPVFP = false;
60
61  using namespace ARM;
62  switch (RegEnum) {
63  default:
64    llvm_unreachable("Unknown ARM register!");
65  case R0:  case D0:  case Q0:  return 0;
66  case R1:  case D1:  case Q1:  return 1;
67  case R2:  case D2:  case Q2:  return 2;
68  case R3:  case D3:  case Q3:  return 3;
69  case R4:  case D4:  case Q4:  return 4;
70  case R5:  case D5:  case Q5:  return 5;
71  case R6:  case D6:  case Q6:  return 6;
72  case R7:  case D7:  case Q7:  return 7;
73  case R8:  case D8:  case Q8:  return 8;
74  case R9:  case D9:  case Q9:  return 9;
75  case R10: case D10: case Q10: return 10;
76  case R11: case D11: case Q11: return 11;
77  case R12: case D12: case Q12: return 12;
78  case SP:  case D13: case Q13: return 13;
79  case LR:  case D14: case Q14: return 14;
80  case PC:  case D15: case Q15: return 15;
81
82  case D16: return 16;
83  case D17: return 17;
84  case D18: return 18;
85  case D19: return 19;
86  case D20: return 20;
87  case D21: return 21;
88  case D22: return 22;
89  case D23: return 23;
90  case D24: return 24;
91  case D25: return 25;
92  case D26: return 26;
93  case D27: return 27;
94  case D28: return 28;
95  case D29: return 29;
96  case D30: return 30;
97  case D31: return 31;
98
99  case S0: case S1: case S2: case S3:
100  case S4: case S5: case S6: case S7:
101  case S8: case S9: case S10: case S11:
102  case S12: case S13: case S14: case S15:
103  case S16: case S17: case S18: case S19:
104  case S20: case S21: case S22: case S23:
105  case S24: case S25: case S26: case S27:
106  case S28: case S29: case S30: case S31: {
107    if (isSPVFP)
108      *isSPVFP = true;
109    switch (RegEnum) {
110    default: return 0; // Avoid compile time warning.
111    case S0: return 0;
112    case S1: return 1;
113    case S2: return 2;
114    case S3: return 3;
115    case S4: return 4;
116    case S5: return 5;
117    case S6: return 6;
118    case S7: return 7;
119    case S8: return 8;
120    case S9: return 9;
121    case S10: return 10;
122    case S11: return 11;
123    case S12: return 12;
124    case S13: return 13;
125    case S14: return 14;
126    case S15: return 15;
127    case S16: return 16;
128    case S17: return 17;
129    case S18: return 18;
130    case S19: return 19;
131    case S20: return 20;
132    case S21: return 21;
133    case S22: return 22;
134    case S23: return 23;
135    case S24: return 24;
136    case S25: return 25;
137    case S26: return 26;
138    case S27: return 27;
139    case S28: return 28;
140    case S29: return 29;
141    case S30: return 30;
142    case S31: return 31;
143    }
144  }
145  }
146}
147
148ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
149                                         const ARMSubtarget &sti)
150  : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
151    TII(tii), STI(sti),
152    FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11) {
153}
154
155const unsigned*
156ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
157  static const unsigned CalleeSavedRegs[] = {
158    ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
159    ARM::R7, ARM::R6,  ARM::R5,  ARM::R4,
160
161    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
162    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
163    0
164  };
165
166  static const unsigned DarwinCalleeSavedRegs[] = {
167    // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
168    // register.
169    ARM::LR,  ARM::R7,  ARM::R6, ARM::R5, ARM::R4,
170    ARM::R11, ARM::R10, ARM::R8,
171
172    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
173    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
174    0
175  };
176  return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
177}
178
179BitVector ARMBaseRegisterInfo::
180getReservedRegs(const MachineFunction &MF) const {
181  // FIXME: avoid re-calculating this everytime.
182  BitVector Reserved(getNumRegs());
183  Reserved.set(ARM::SP);
184  Reserved.set(ARM::PC);
185  Reserved.set(ARM::FPSCR);
186  if (hasFP(MF))
187    Reserved.set(FramePtr);
188  // Some targets reserve R9.
189  if (STI.isR9Reserved())
190    Reserved.set(ARM::R9);
191  return Reserved;
192}
193
194bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
195                                        unsigned Reg) const {
196  switch (Reg) {
197  default: break;
198  case ARM::SP:
199  case ARM::PC:
200    return true;
201  case ARM::R7:
202  case ARM::R11:
203    if (FramePtr == Reg && hasFP(MF))
204      return true;
205    break;
206  case ARM::R9:
207    return STI.isR9Reserved();
208  }
209
210  return false;
211}
212
213const TargetRegisterClass *
214ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
215                                              const TargetRegisterClass *B,
216                                              unsigned SubIdx) const {
217  switch (SubIdx) {
218  default: return 0;
219  case ARM::ssub_0:
220  case ARM::ssub_1:
221  case ARM::ssub_2:
222  case ARM::ssub_3: {
223    // S sub-registers.
224    if (A->getSize() == 8) {
225      if (B == &ARM::SPR_8RegClass)
226        return &ARM::DPR_8RegClass;
227      assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
228      if (A == &ARM::DPR_8RegClass)
229        return A;
230      return &ARM::DPR_VFP2RegClass;
231    }
232
233    if (A->getSize() == 16) {
234      if (B == &ARM::SPR_8RegClass)
235        return &ARM::QPR_8RegClass;
236      return &ARM::QPR_VFP2RegClass;
237    }
238
239    if (A->getSize() == 32) {
240      if (B == &ARM::SPR_8RegClass)
241        return 0;  // Do not allow coalescing!
242      return &ARM::QQPR_VFP2RegClass;
243    }
244
245    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
246    return 0;  // Do not allow coalescing!
247  }
248  case ARM::dsub_0:
249  case ARM::dsub_1:
250  case ARM::dsub_2:
251  case ARM::dsub_3: {
252    // D sub-registers.
253    if (A->getSize() == 16) {
254      if (B == &ARM::DPR_VFP2RegClass)
255        return &ARM::QPR_VFP2RegClass;
256      if (B == &ARM::DPR_8RegClass)
257        return 0;  // Do not allow coalescing!
258      return A;
259    }
260
261    if (A->getSize() == 32) {
262      if (B == &ARM::DPR_VFP2RegClass)
263        return &ARM::QQPR_VFP2RegClass;
264      if (B == &ARM::DPR_8RegClass)
265        return 0;  // Do not allow coalescing!
266      return A;
267    }
268
269    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
270    if (B != &ARM::DPRRegClass)
271      return 0;  // Do not allow coalescing!
272    return A;
273  }
274  case ARM::dsub_4:
275  case ARM::dsub_5:
276  case ARM::dsub_6:
277  case ARM::dsub_7: {
278    // D sub-registers of QQQQ registers.
279    if (A->getSize() == 64 && B == &ARM::DPRRegClass)
280      return A;
281    return 0;  // Do not allow coalescing!
282  }
283
284  case ARM::qsub_0:
285  case ARM::qsub_1: {
286    // Q sub-registers.
287    if (A->getSize() == 32) {
288      if (B == &ARM::QPR_VFP2RegClass)
289        return &ARM::QQPR_VFP2RegClass;
290      if (B == &ARM::QPR_8RegClass)
291        return 0;  // Do not allow coalescing!
292      return A;
293    }
294
295    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
296    if (B == &ARM::QPRRegClass)
297      return A;
298    return 0;  // Do not allow coalescing!
299  }
300  case ARM::qsub_2:
301  case ARM::qsub_3: {
302    // Q sub-registers of QQQQ registers.
303    if (A->getSize() == 64 && B == &ARM::QPRRegClass)
304      return A;
305    return 0;  // Do not allow coalescing!
306  }
307  }
308  return 0;
309}
310
311bool
312ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
313                                          SmallVectorImpl<unsigned> &SubIndices,
314                                          unsigned &NewSubIdx) const {
315
316  unsigned Size = RC->getSize() * 8;
317  if (Size < 6)
318    return 0;
319
320  NewSubIdx = 0;  // Whole register.
321  unsigned NumRegs = SubIndices.size();
322  if (NumRegs == 8) {
323    // 8 D registers -> 1 QQQQ register.
324    return (Size == 512 &&
325            SubIndices[0] == ARM::dsub_0 &&
326            SubIndices[1] == ARM::dsub_1 &&
327            SubIndices[2] == ARM::dsub_2 &&
328            SubIndices[3] == ARM::dsub_3 &&
329            SubIndices[4] == ARM::dsub_4 &&
330            SubIndices[5] == ARM::dsub_5 &&
331            SubIndices[6] == ARM::dsub_6 &&
332            SubIndices[7] == ARM::dsub_7);
333  } else if (NumRegs == 4) {
334    if (SubIndices[0] == ARM::qsub_0) {
335      // 4 Q registers -> 1 QQQQ register.
336      return (Size == 512 &&
337              SubIndices[1] == ARM::qsub_1 &&
338              SubIndices[2] == ARM::qsub_2 &&
339              SubIndices[3] == ARM::qsub_3);
340    } else if (SubIndices[0] == ARM::dsub_0) {
341      // 4 D registers -> 1 QQ register.
342      if (Size >= 256 &&
343          SubIndices[1] == ARM::dsub_1 &&
344          SubIndices[2] == ARM::dsub_2 &&
345          SubIndices[3] == ARM::dsub_3) {
346        if (Size == 512)
347          NewSubIdx = ARM::qqsub_0;
348        return true;
349      }
350    } else if (SubIndices[0] == ARM::dsub_4) {
351      // 4 D registers -> 1 QQ register (2nd).
352      if (Size == 512 &&
353          SubIndices[1] == ARM::dsub_5 &&
354          SubIndices[2] == ARM::dsub_6 &&
355          SubIndices[3] == ARM::dsub_7) {
356        NewSubIdx = ARM::qqsub_1;
357        return true;
358      }
359    } else if (SubIndices[0] == ARM::ssub_0) {
360      // 4 S registers -> 1 Q register.
361      if (Size >= 128 &&
362          SubIndices[1] == ARM::ssub_1 &&
363          SubIndices[2] == ARM::ssub_2 &&
364          SubIndices[3] == ARM::ssub_3) {
365        if (Size >= 256)
366          NewSubIdx = ARM::qsub_0;
367        return true;
368      }
369    }
370  } else if (NumRegs == 2) {
371    if (SubIndices[0] == ARM::qsub_0) {
372      // 2 Q registers -> 1 QQ register.
373      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
374        if (Size == 512)
375          NewSubIdx = ARM::qqsub_0;
376        return true;
377      }
378    } else if (SubIndices[0] == ARM::qsub_2) {
379      // 2 Q registers -> 1 QQ register (2nd).
380      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
381        NewSubIdx = ARM::qqsub_1;
382        return true;
383      }
384    } else if (SubIndices[0] == ARM::dsub_0) {
385      // 2 D registers -> 1 Q register.
386      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
387        if (Size >= 256)
388          NewSubIdx = ARM::qsub_0;
389        return true;
390      }
391    } else if (SubIndices[0] == ARM::dsub_2) {
392      // 2 D registers -> 1 Q register (2nd).
393      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
394        NewSubIdx = ARM::qsub_1;
395        return true;
396      }
397    } else if (SubIndices[0] == ARM::dsub_4) {
398      // 2 D registers -> 1 Q register (3rd).
399      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
400        NewSubIdx = ARM::qsub_2;
401        return true;
402      }
403    } else if (SubIndices[0] == ARM::dsub_6) {
404      // 2 D registers -> 1 Q register (3rd).
405      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
406        NewSubIdx = ARM::qsub_3;
407        return true;
408      }
409    } else if (SubIndices[0] == ARM::ssub_0) {
410      // 2 S registers -> 1 D register.
411      if (SubIndices[1] == ARM::ssub_1) {
412        if (Size >= 128)
413          NewSubIdx = ARM::dsub_0;
414        return true;
415      }
416    } else if (SubIndices[0] == ARM::ssub_2) {
417      // 2 S registers -> 1 D register (2nd).
418      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
419        NewSubIdx = ARM::dsub_1;
420        return true;
421      }
422    }
423  }
424  return false;
425}
426
427
428const TargetRegisterClass *
429ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
430  return ARM::GPRRegisterClass;
431}
432
433/// getAllocationOrder - Returns the register allocation order for a specified
434/// register class in the form of a pair of TargetRegisterClass iterators.
435std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
436ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
437                                        unsigned HintType, unsigned HintReg,
438                                        const MachineFunction &MF) const {
439  // Alternative register allocation orders when favoring even / odd registers
440  // of register pairs.
441
442  // No FP, R9 is available.
443  static const unsigned GPREven1[] = {
444    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
445    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
446    ARM::R9, ARM::R11
447  };
448  static const unsigned GPROdd1[] = {
449    ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
450    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
451    ARM::R8, ARM::R10
452  };
453
454  // FP is R7, R9 is available.
455  static const unsigned GPREven2[] = {
456    ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
457    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
458    ARM::R9, ARM::R11
459  };
460  static const unsigned GPROdd2[] = {
461    ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
462    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
463    ARM::R8, ARM::R10
464  };
465
466  // FP is R11, R9 is available.
467  static const unsigned GPREven3[] = {
468    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
469    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
470    ARM::R9
471  };
472  static const unsigned GPROdd3[] = {
473    ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
474    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
475    ARM::R8
476  };
477
478  // No FP, R9 is not available.
479  static const unsigned GPREven4[] = {
480    ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
481    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
482    ARM::R11
483  };
484  static const unsigned GPROdd4[] = {
485    ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
486    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
487    ARM::R10
488  };
489
490  // FP is R7, R9 is not available.
491  static const unsigned GPREven5[] = {
492    ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
493    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
494    ARM::R11
495  };
496  static const unsigned GPROdd5[] = {
497    ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
498    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
499    ARM::R10
500  };
501
502  // FP is R11, R9 is not available.
503  static const unsigned GPREven6[] = {
504    ARM::R0, ARM::R2, ARM::R4, ARM::R6,
505    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
506  };
507  static const unsigned GPROdd6[] = {
508    ARM::R1, ARM::R3, ARM::R5, ARM::R7,
509    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
510  };
511
512
513  if (HintType == ARMRI::RegPairEven) {
514    if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
515      // It's no longer possible to fulfill this hint. Return the default
516      // allocation order.
517      return std::make_pair(RC->allocation_order_begin(MF),
518                            RC->allocation_order_end(MF));
519
520    if (!hasFP(MF)) {
521      if (!STI.isR9Reserved())
522        return std::make_pair(GPREven1,
523                              GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
524      else
525        return std::make_pair(GPREven4,
526                              GPREven4 + (sizeof(GPREven4)/sizeof(unsigned)));
527    } else if (FramePtr == ARM::R7) {
528      if (!STI.isR9Reserved())
529        return std::make_pair(GPREven2,
530                              GPREven2 + (sizeof(GPREven2)/sizeof(unsigned)));
531      else
532        return std::make_pair(GPREven5,
533                              GPREven5 + (sizeof(GPREven5)/sizeof(unsigned)));
534    } else { // FramePtr == ARM::R11
535      if (!STI.isR9Reserved())
536        return std::make_pair(GPREven3,
537                              GPREven3 + (sizeof(GPREven3)/sizeof(unsigned)));
538      else
539        return std::make_pair(GPREven6,
540                              GPREven6 + (sizeof(GPREven6)/sizeof(unsigned)));
541    }
542  } else if (HintType == ARMRI::RegPairOdd) {
543    if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
544      // It's no longer possible to fulfill this hint. Return the default
545      // allocation order.
546      return std::make_pair(RC->allocation_order_begin(MF),
547                            RC->allocation_order_end(MF));
548
549    if (!hasFP(MF)) {
550      if (!STI.isR9Reserved())
551        return std::make_pair(GPROdd1,
552                              GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
553      else
554        return std::make_pair(GPROdd4,
555                              GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned)));
556    } else if (FramePtr == ARM::R7) {
557      if (!STI.isR9Reserved())
558        return std::make_pair(GPROdd2,
559                              GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned)));
560      else
561        return std::make_pair(GPROdd5,
562                              GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned)));
563    } else { // FramePtr == ARM::R11
564      if (!STI.isR9Reserved())
565        return std::make_pair(GPROdd3,
566                              GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned)));
567      else
568        return std::make_pair(GPROdd6,
569                              GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned)));
570    }
571  }
572  return std::make_pair(RC->allocation_order_begin(MF),
573                        RC->allocation_order_end(MF));
574}
575
576/// ResolveRegAllocHint - Resolves the specified register allocation hint
577/// to a physical register. Returns the physical register if it is successful.
578unsigned
579ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
580                                         const MachineFunction &MF) const {
581  if (Reg == 0 || !isPhysicalRegister(Reg))
582    return 0;
583  if (Type == 0)
584    return Reg;
585  else if (Type == (unsigned)ARMRI::RegPairOdd)
586    // Odd register.
587    return getRegisterPairOdd(Reg, MF);
588  else if (Type == (unsigned)ARMRI::RegPairEven)
589    // Even register.
590    return getRegisterPairEven(Reg, MF);
591  return 0;
592}
593
594void
595ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
596                                        MachineFunction &MF) const {
597  MachineRegisterInfo *MRI = &MF.getRegInfo();
598  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
599  if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
600       Hint.first == (unsigned)ARMRI::RegPairEven) &&
601      Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) {
602    // If 'Reg' is one of the even / odd register pair and it's now changed
603    // (e.g. coalesced) into a different register. The other register of the
604    // pair allocation hint must be updated to reflect the relationship
605    // change.
606    unsigned OtherReg = Hint.second;
607    Hint = MRI->getRegAllocationHint(OtherReg);
608    if (Hint.second == Reg)
609      // Make sure the pair has not already divorced.
610      MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
611  }
612}
613
614/// hasFP - Return true if the specified function should have a dedicated frame
615/// pointer register.  This is true if the function has variable sized allocas
616/// or if frame pointer elimination is disabled.
617///
618bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const {
619  // Mac OS X requires FP not to be clobbered for backtracing purpose.
620  if (STI.isTargetDarwin())
621    return true;
622
623  const MachineFrameInfo *MFI = MF.getFrameInfo();
624  // Always eliminate non-leaf frame pointers.
625  return ((DisableFramePointerElim(MF) && MFI->hasCalls()) ||
626          needsStackRealignment(MF) ||
627          MFI->hasVarSizedObjects() ||
628          MFI->isFrameAddressTaken());
629}
630
631bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
632  const MachineFrameInfo *MFI = MF.getFrameInfo();
633  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
634  return (RealignStack &&
635          !AFI->isThumb1OnlyFunction() &&
636          !MFI->hasVarSizedObjects());
637}
638
639bool ARMBaseRegisterInfo::
640needsStackRealignment(const MachineFunction &MF) const {
641  const MachineFrameInfo *MFI = MF.getFrameInfo();
642  const Function *F = MF.getFunction();
643  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
644  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
645  bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
646                               F->hasFnAttr(Attribute::StackAlignment));
647
648  // FIXME: Currently we don't support stack realignment for functions with
649  //        variable-sized allocas.
650  // FIXME: It's more complicated than this...
651  if (0 && requiresRealignment && MFI->hasVarSizedObjects())
652    report_fatal_error(
653      "Stack realignment in presense of dynamic allocas is not supported");
654
655  // FIXME: This probably isn't the right place for this.
656  if (0 && requiresRealignment && AFI->isThumb1OnlyFunction())
657    report_fatal_error(
658      "Stack realignment in thumb1 functions is not supported");
659
660  return requiresRealignment && canRealignStack(MF);
661}
662
663bool ARMBaseRegisterInfo::
664cannotEliminateFrame(const MachineFunction &MF) const {
665  const MachineFrameInfo *MFI = MF.getFrameInfo();
666  if (DisableFramePointerElim(MF) && MFI->adjustsStack())
667    return true;
668  return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
669    || needsStackRealignment(MF);
670}
671
672/// estimateStackSize - Estimate and return the size of the frame.
673static unsigned estimateStackSize(MachineFunction &MF) {
674  const MachineFrameInfo *FFI = MF.getFrameInfo();
675  int Offset = 0;
676  for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
677    int FixedOff = -FFI->getObjectOffset(i);
678    if (FixedOff > Offset) Offset = FixedOff;
679  }
680  for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
681    if (FFI->isDeadObjectIndex(i))
682      continue;
683    Offset += FFI->getObjectSize(i);
684    unsigned Align = FFI->getObjectAlignment(i);
685    // Adjust to alignment boundary
686    Offset = (Offset+Align-1)/Align*Align;
687  }
688  return (unsigned)Offset;
689}
690
691/// estimateRSStackSizeLimit - Look at each instruction that references stack
692/// frames and return the stack size limit beyond which some of these
693/// instructions will require a scratch register during their expansion later.
694unsigned
695ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
696  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
697  unsigned Limit = (1 << 12) - 1;
698  for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
699    for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
700         I != E; ++I) {
701      for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
702        if (!I->getOperand(i).isFI()) continue;
703
704        // When using ADDri to get the address of a stack object, 255 is the
705        // largest offset guaranteed to fit in the immediate offset.
706        if (I->getOpcode() == ARM::ADDri) {
707          Limit = std::min(Limit, (1U << 8) - 1);
708          break;
709        }
710
711        // Otherwise check the addressing mode.
712        switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
713        case ARMII::AddrMode3:
714        case ARMII::AddrModeT2_i8:
715          Limit = std::min(Limit, (1U << 8) - 1);
716          break;
717        case ARMII::AddrMode5:
718        case ARMII::AddrModeT2_i8s4:
719          Limit = std::min(Limit, ((1U << 8) - 1) * 4);
720          break;
721        case ARMII::AddrModeT2_i12:
722          // i12 supports only positive offset so these will be converted to
723          // i8 opcodes. See llvm::rewriteT2FrameIndex.
724          if (hasFP(MF) && AFI->hasStackFrame())
725            Limit = std::min(Limit, (1U << 8) - 1);
726          break;
727        case ARMII::AddrMode6:
728          // Addressing mode 6 (load/store) instructions can't encode an
729          // immediate offset for stack references.
730          return 0;
731        default:
732          break;
733        }
734        break; // At most one FI per instruction
735      }
736    }
737  }
738
739  return Limit;
740}
741
742static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
743                                       const ARMBaseInstrInfo &TII) {
744  unsigned FnSize = 0;
745  for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
746       MBBI != E; ++MBBI) {
747    const MachineBasicBlock &MBB = *MBBI;
748    for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
749         I != E; ++I)
750      FnSize += TII.GetInstSizeInBytes(I);
751  }
752  return FnSize;
753}
754
755void
756ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
757                                                       RegScavenger *RS) const {
758  // This tells PEI to spill the FP as if it is any other callee-save register
759  // to take advantage the eliminateFrameIndex machinery. This also ensures it
760  // is spilled in the order specified by getCalleeSavedRegs() to make it easier
761  // to combine multiple loads / stores.
762  bool CanEliminateFrame = true;
763  bool CS1Spilled = false;
764  bool LRSpilled = false;
765  unsigned NumGPRSpills = 0;
766  SmallVector<unsigned, 4> UnspilledCS1GPRs;
767  SmallVector<unsigned, 4> UnspilledCS2GPRs;
768  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
769  MachineFrameInfo *MFI = MF.getFrameInfo();
770
771  // Spill R4 if Thumb2 function requires stack realignment - it will be used as
772  // scratch register.
773  // FIXME: It will be better just to find spare register here.
774  if (needsStackRealignment(MF) &&
775      AFI->isThumb2Function())
776    MF.getRegInfo().setPhysRegUsed(ARM::R4);
777
778  // Spill LR if Thumb1 function uses variable length argument lists.
779  if (AFI->isThumb1OnlyFunction() && AFI->getVarArgsRegSaveSize() > 0)
780    MF.getRegInfo().setPhysRegUsed(ARM::LR);
781
782  // Don't spill FP if the frame can be eliminated. This is determined
783  // by scanning the callee-save registers to see if any is used.
784  const unsigned *CSRegs = getCalleeSavedRegs();
785  for (unsigned i = 0; CSRegs[i]; ++i) {
786    unsigned Reg = CSRegs[i];
787    bool Spilled = false;
788    if (MF.getRegInfo().isPhysRegUsed(Reg)) {
789      AFI->setCSRegisterIsSpilled(Reg);
790      Spilled = true;
791      CanEliminateFrame = false;
792    } else {
793      // Check alias registers too.
794      for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
795        if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
796          Spilled = true;
797          CanEliminateFrame = false;
798        }
799      }
800    }
801
802    if (!ARM::GPRRegisterClass->contains(Reg))
803      continue;
804
805    if (Spilled) {
806      NumGPRSpills++;
807
808      if (!STI.isTargetDarwin()) {
809        if (Reg == ARM::LR)
810          LRSpilled = true;
811        CS1Spilled = true;
812        continue;
813      }
814
815      // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
816      switch (Reg) {
817      case ARM::LR:
818        LRSpilled = true;
819        // Fallthrough
820      case ARM::R4:
821      case ARM::R5:
822      case ARM::R6:
823      case ARM::R7:
824        CS1Spilled = true;
825        break;
826      default:
827        break;
828      }
829    } else {
830      if (!STI.isTargetDarwin()) {
831        UnspilledCS1GPRs.push_back(Reg);
832        continue;
833      }
834
835      switch (Reg) {
836      case ARM::R4:
837      case ARM::R5:
838      case ARM::R6:
839      case ARM::R7:
840      case ARM::LR:
841        UnspilledCS1GPRs.push_back(Reg);
842        break;
843      default:
844        UnspilledCS2GPRs.push_back(Reg);
845        break;
846      }
847    }
848  }
849
850  bool ForceLRSpill = false;
851  if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
852    unsigned FnSize = GetFunctionSizeInBytes(MF, TII);
853    // Force LR to be spilled if the Thumb function size is > 2048. This enables
854    // use of BL to implement far jump. If it turns out that it's not needed
855    // then the branch fix up path will undo it.
856    if (FnSize >= (1 << 11)) {
857      CanEliminateFrame = false;
858      ForceLRSpill = true;
859    }
860  }
861
862  // If any of the stack slot references may be out of range of an immediate
863  // offset, make sure a register (or a spill slot) is available for the
864  // register scavenger. Note that if we're indexing off the frame pointer, the
865  // effective stack size is 4 bytes larger since the FP points to the stack
866  // slot of the previous FP. Also, if we have variable sized objects in the
867  // function, stack slot references will often be negative, and some of
868  // our instructions are positive-offset only, so conservatively consider
869  // that case to want a spill slot (or register) as well. Similarly, if
870  // the function adjusts the stack pointer during execution and the
871  // adjustments aren't already part of our stack size estimate, our offset
872  // calculations may be off, so be conservative.
873  // FIXME: We could add logic to be more precise about negative offsets
874  //        and which instructions will need a scratch register for them. Is it
875  //        worth the effort and added fragility?
876  bool BigStack =
877    (RS &&
878     (estimateStackSize(MF) + ((hasFP(MF) && AFI->hasStackFrame()) ? 4:0) >=
879      estimateRSStackSizeLimit(MF)))
880    || MFI->hasVarSizedObjects()
881    || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF));
882
883  bool ExtraCSSpill = false;
884  if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
885    AFI->setHasStackFrame(true);
886
887    // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
888    // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
889    if (!LRSpilled && CS1Spilled) {
890      MF.getRegInfo().setPhysRegUsed(ARM::LR);
891      AFI->setCSRegisterIsSpilled(ARM::LR);
892      NumGPRSpills++;
893      UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
894                                    UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
895      ForceLRSpill = false;
896      ExtraCSSpill = true;
897    }
898
899    if (hasFP(MF)) {
900      MF.getRegInfo().setPhysRegUsed(FramePtr);
901      NumGPRSpills++;
902    }
903
904    // If stack and double are 8-byte aligned and we are spilling an odd number
905    // of GPRs. Spill one extra callee save GPR so we won't have to pad between
906    // the integer and double callee save areas.
907    unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
908    if (TargetAlign == 8 && (NumGPRSpills & 1)) {
909      if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
910        for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
911          unsigned Reg = UnspilledCS1GPRs[i];
912          // Don't spill high register if the function is thumb1
913          if (!AFI->isThumb1OnlyFunction() ||
914              isARMLowRegister(Reg) || Reg == ARM::LR) {
915            MF.getRegInfo().setPhysRegUsed(Reg);
916            AFI->setCSRegisterIsSpilled(Reg);
917            if (!isReservedReg(MF, Reg))
918              ExtraCSSpill = true;
919            break;
920          }
921        }
922      } else if (!UnspilledCS2GPRs.empty() &&
923                 !AFI->isThumb1OnlyFunction()) {
924        unsigned Reg = UnspilledCS2GPRs.front();
925        MF.getRegInfo().setPhysRegUsed(Reg);
926        AFI->setCSRegisterIsSpilled(Reg);
927        if (!isReservedReg(MF, Reg))
928          ExtraCSSpill = true;
929      }
930    }
931
932    // Estimate if we might need to scavenge a register at some point in order
933    // to materialize a stack offset. If so, either spill one additional
934    // callee-saved register or reserve a special spill slot to facilitate
935    // register scavenging. Thumb1 needs a spill slot for stack pointer
936    // adjustments also, even when the frame itself is small.
937    if (BigStack && !ExtraCSSpill) {
938      // If any non-reserved CS register isn't spilled, just spill one or two
939      // extra. That should take care of it!
940      unsigned NumExtras = TargetAlign / 4;
941      SmallVector<unsigned, 2> Extras;
942      while (NumExtras && !UnspilledCS1GPRs.empty()) {
943        unsigned Reg = UnspilledCS1GPRs.back();
944        UnspilledCS1GPRs.pop_back();
945        if (!isReservedReg(MF, Reg) &&
946            (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
947             Reg == ARM::LR)) {
948          Extras.push_back(Reg);
949          NumExtras--;
950        }
951      }
952      // For non-Thumb1 functions, also check for hi-reg CS registers
953      if (!AFI->isThumb1OnlyFunction()) {
954        while (NumExtras && !UnspilledCS2GPRs.empty()) {
955          unsigned Reg = UnspilledCS2GPRs.back();
956          UnspilledCS2GPRs.pop_back();
957          if (!isReservedReg(MF, Reg)) {
958            Extras.push_back(Reg);
959            NumExtras--;
960          }
961        }
962      }
963      if (Extras.size() && NumExtras == 0) {
964        for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
965          MF.getRegInfo().setPhysRegUsed(Extras[i]);
966          AFI->setCSRegisterIsSpilled(Extras[i]);
967        }
968      } else if (!AFI->isThumb1OnlyFunction()) {
969        // note: Thumb1 functions spill to R12, not the stack.  Reserve a slot
970        // closest to SP or frame pointer.
971        const TargetRegisterClass *RC = ARM::GPRRegisterClass;
972        RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
973                                                           RC->getAlignment(),
974                                                           false));
975      }
976    }
977  }
978
979  if (ForceLRSpill) {
980    MF.getRegInfo().setPhysRegUsed(ARM::LR);
981    AFI->setCSRegisterIsSpilled(ARM::LR);
982    AFI->setLRIsSpilledForFarJump(true);
983  }
984}
985
986unsigned ARMBaseRegisterInfo::getRARegister() const {
987  return ARM::LR;
988}
989
990unsigned
991ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
992  if (hasFP(MF))
993    return FramePtr;
994  return ARM::SP;
995}
996
997// Provide a base+offset reference to an FI slot for debug info. It's the
998// same as what we use for resolving the code-gen references for now.
999// FIXME: This can go wrong when references are SP-relative and simple call
1000//        frames aren't used.
1001int
1002ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI,
1003                                            unsigned &FrameReg) const {
1004  return ResolveFrameIndexReference(MF, FI, FrameReg, 0);
1005}
1006
1007int
1008ARMBaseRegisterInfo::ResolveFrameIndexReference(const MachineFunction &MF,
1009                                                int FI,
1010                                                unsigned &FrameReg,
1011                                                int SPAdj) const {
1012  const MachineFrameInfo *MFI = MF.getFrameInfo();
1013  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1014  int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
1015  int FPOffset = Offset - AFI->getFramePtrSpillOffset();
1016  bool isFixed = MFI->isFixedObjectIndex(FI);
1017
1018  FrameReg = ARM::SP;
1019  Offset += SPAdj;
1020  if (AFI->isGPRCalleeSavedArea1Frame(FI))
1021    return Offset - AFI->getGPRCalleeSavedArea1Offset();
1022  else if (AFI->isGPRCalleeSavedArea2Frame(FI))
1023    return Offset - AFI->getGPRCalleeSavedArea2Offset();
1024  else if (AFI->isDPRCalleeSavedAreaFrame(FI))
1025    return Offset - AFI->getDPRCalleeSavedAreaOffset();
1026
1027  // When dynamically realigning the stack, use the frame pointer for
1028  // parameters, and the stack pointer for locals.
1029  if (needsStackRealignment(MF)) {
1030    assert (hasFP(MF) && "dynamic stack realignment without a FP!");
1031    if (isFixed) {
1032      FrameReg = getFrameRegister(MF);
1033      Offset = FPOffset;
1034    }
1035    return Offset;
1036  }
1037
1038  // If there is a frame pointer, use it when we can.
1039  if (hasFP(MF) && AFI->hasStackFrame()) {
1040    // Use frame pointer to reference fixed objects. Use it for locals if
1041    // there are VLAs (and thus the SP isn't reliable as a base).
1042    if (isFixed || MFI->hasVarSizedObjects()) {
1043      FrameReg = getFrameRegister(MF);
1044      Offset = FPOffset;
1045    } else if (AFI->isThumb2Function()) {
1046      // In Thumb2 mode, the negative offset is very limited. Try to avoid
1047      // out of range references.
1048      if (FPOffset >= -255 && FPOffset < 0) {
1049        FrameReg = getFrameRegister(MF);
1050        Offset = FPOffset;
1051      }
1052    } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) {
1053      // Otherwise, use SP or FP, whichever is closer to the stack slot.
1054      FrameReg = getFrameRegister(MF);
1055      Offset = FPOffset;
1056    }
1057  }
1058  return Offset;
1059}
1060
1061int
1062ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
1063                                         int FI) const {
1064  unsigned FrameReg;
1065  return getFrameIndexReference(MF, FI, FrameReg);
1066}
1067
1068unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
1069  llvm_unreachable("What is the exception register");
1070  return 0;
1071}
1072
1073unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
1074  llvm_unreachable("What is the exception handler register");
1075  return 0;
1076}
1077
1078int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
1079  return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
1080}
1081
1082unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
1083                                              const MachineFunction &MF) const {
1084  switch (Reg) {
1085  default: break;
1086  // Return 0 if either register of the pair is a special register.
1087  // So no R12, etc.
1088  case ARM::R1:
1089    return ARM::R0;
1090  case ARM::R3:
1091    return ARM::R2;
1092  case ARM::R5:
1093    return ARM::R4;
1094  case ARM::R7:
1095    return isReservedReg(MF, ARM::R7)  ? 0 : ARM::R6;
1096  case ARM::R9:
1097    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
1098  case ARM::R11:
1099    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
1100
1101  case ARM::S1:
1102    return ARM::S0;
1103  case ARM::S3:
1104    return ARM::S2;
1105  case ARM::S5:
1106    return ARM::S4;
1107  case ARM::S7:
1108    return ARM::S6;
1109  case ARM::S9:
1110    return ARM::S8;
1111  case ARM::S11:
1112    return ARM::S10;
1113  case ARM::S13:
1114    return ARM::S12;
1115  case ARM::S15:
1116    return ARM::S14;
1117  case ARM::S17:
1118    return ARM::S16;
1119  case ARM::S19:
1120    return ARM::S18;
1121  case ARM::S21:
1122    return ARM::S20;
1123  case ARM::S23:
1124    return ARM::S22;
1125  case ARM::S25:
1126    return ARM::S24;
1127  case ARM::S27:
1128    return ARM::S26;
1129  case ARM::S29:
1130    return ARM::S28;
1131  case ARM::S31:
1132    return ARM::S30;
1133
1134  case ARM::D1:
1135    return ARM::D0;
1136  case ARM::D3:
1137    return ARM::D2;
1138  case ARM::D5:
1139    return ARM::D4;
1140  case ARM::D7:
1141    return ARM::D6;
1142  case ARM::D9:
1143    return ARM::D8;
1144  case ARM::D11:
1145    return ARM::D10;
1146  case ARM::D13:
1147    return ARM::D12;
1148  case ARM::D15:
1149    return ARM::D14;
1150  case ARM::D17:
1151    return ARM::D16;
1152  case ARM::D19:
1153    return ARM::D18;
1154  case ARM::D21:
1155    return ARM::D20;
1156  case ARM::D23:
1157    return ARM::D22;
1158  case ARM::D25:
1159    return ARM::D24;
1160  case ARM::D27:
1161    return ARM::D26;
1162  case ARM::D29:
1163    return ARM::D28;
1164  case ARM::D31:
1165    return ARM::D30;
1166  }
1167
1168  return 0;
1169}
1170
1171unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
1172                                             const MachineFunction &MF) const {
1173  switch (Reg) {
1174  default: break;
1175  // Return 0 if either register of the pair is a special register.
1176  // So no R12, etc.
1177  case ARM::R0:
1178    return ARM::R1;
1179  case ARM::R2:
1180    return ARM::R3;
1181  case ARM::R4:
1182    return ARM::R5;
1183  case ARM::R6:
1184    return isReservedReg(MF, ARM::R7)  ? 0 : ARM::R7;
1185  case ARM::R8:
1186    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
1187  case ARM::R10:
1188    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
1189
1190  case ARM::S0:
1191    return ARM::S1;
1192  case ARM::S2:
1193    return ARM::S3;
1194  case ARM::S4:
1195    return ARM::S5;
1196  case ARM::S6:
1197    return ARM::S7;
1198  case ARM::S8:
1199    return ARM::S9;
1200  case ARM::S10:
1201    return ARM::S11;
1202  case ARM::S12:
1203    return ARM::S13;
1204  case ARM::S14:
1205    return ARM::S15;
1206  case ARM::S16:
1207    return ARM::S17;
1208  case ARM::S18:
1209    return ARM::S19;
1210  case ARM::S20:
1211    return ARM::S21;
1212  case ARM::S22:
1213    return ARM::S23;
1214  case ARM::S24:
1215    return ARM::S25;
1216  case ARM::S26:
1217    return ARM::S27;
1218  case ARM::S28:
1219    return ARM::S29;
1220  case ARM::S30:
1221    return ARM::S31;
1222
1223  case ARM::D0:
1224    return ARM::D1;
1225  case ARM::D2:
1226    return ARM::D3;
1227  case ARM::D4:
1228    return ARM::D5;
1229  case ARM::D6:
1230    return ARM::D7;
1231  case ARM::D8:
1232    return ARM::D9;
1233  case ARM::D10:
1234    return ARM::D11;
1235  case ARM::D12:
1236    return ARM::D13;
1237  case ARM::D14:
1238    return ARM::D15;
1239  case ARM::D16:
1240    return ARM::D17;
1241  case ARM::D18:
1242    return ARM::D19;
1243  case ARM::D20:
1244    return ARM::D21;
1245  case ARM::D22:
1246    return ARM::D23;
1247  case ARM::D24:
1248    return ARM::D25;
1249  case ARM::D26:
1250    return ARM::D27;
1251  case ARM::D28:
1252    return ARM::D29;
1253  case ARM::D30:
1254    return ARM::D31;
1255  }
1256
1257  return 0;
1258}
1259
1260/// emitLoadConstPool - Emits a load from constpool to materialize the
1261/// specified immediate.
1262void ARMBaseRegisterInfo::
1263emitLoadConstPool(MachineBasicBlock &MBB,
1264                  MachineBasicBlock::iterator &MBBI,
1265                  DebugLoc dl,
1266                  unsigned DestReg, unsigned SubIdx, int Val,
1267                  ARMCC::CondCodes Pred,
1268                  unsigned PredReg) const {
1269  MachineFunction &MF = *MBB.getParent();
1270  MachineConstantPool *ConstantPool = MF.getConstantPool();
1271  const Constant *C =
1272        ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
1273  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
1274
1275  BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
1276    .addReg(DestReg, getDefRegState(true), SubIdx)
1277    .addConstantPoolIndex(Idx)
1278    .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
1279}
1280
1281bool ARMBaseRegisterInfo::
1282requiresRegisterScavenging(const MachineFunction &MF) const {
1283  return true;
1284}
1285
1286bool ARMBaseRegisterInfo::
1287requiresFrameIndexScavenging(const MachineFunction &MF) const {
1288  return true;
1289}
1290
1291bool ARMBaseRegisterInfo::
1292requiresVirtualBaseRegisters(const MachineFunction &MF) const {
1293  return EnableLocalStackAlloc;
1294}
1295
1296// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
1297// not required, we reserve argument space for call sites in the function
1298// immediately on entry to the current function. This eliminates the need for
1299// add/sub sp brackets around call sites. Returns true if the call frame is
1300// included as part of the stack frame.
1301bool ARMBaseRegisterInfo::
1302hasReservedCallFrame(const MachineFunction &MF) const {
1303  const MachineFrameInfo *FFI = MF.getFrameInfo();
1304  unsigned CFSize = FFI->getMaxCallFrameSize();
1305  // It's not always a good idea to include the call frame as part of the
1306  // stack frame. ARM (especially Thumb) has small immediate offset to
1307  // address the stack frame. So a large call frame can cause poor codegen
1308  // and may even makes it impossible to scavenge a register.
1309  if (CFSize >= ((1 << 12) - 1) / 2)  // Half of imm12
1310    return false;
1311
1312  return !MF.getFrameInfo()->hasVarSizedObjects();
1313}
1314
1315// canSimplifyCallFramePseudos - If there is a reserved call frame, the
1316// call frame pseudos can be simplified. Unlike most targets, having a FP
1317// is not sufficient here since we still may reference some objects via SP
1318// even when FP is available in Thumb2 mode.
1319bool ARMBaseRegisterInfo::
1320canSimplifyCallFramePseudos(const MachineFunction &MF) const {
1321  return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
1322}
1323
1324static void
1325emitSPUpdate(bool isARM,
1326             MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
1327             DebugLoc dl, const ARMBaseInstrInfo &TII,
1328             int NumBytes,
1329             ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
1330  if (isARM)
1331    emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1332                            Pred, PredReg, TII);
1333  else
1334    emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1335                           Pred, PredReg, TII);
1336}
1337
1338
1339void ARMBaseRegisterInfo::
1340eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1341                              MachineBasicBlock::iterator I) const {
1342  if (!hasReservedCallFrame(MF)) {
1343    // If we have alloca, convert as follows:
1344    // ADJCALLSTACKDOWN -> sub, sp, sp, amount
1345    // ADJCALLSTACKUP   -> add, sp, sp, amount
1346    MachineInstr *Old = I;
1347    DebugLoc dl = Old->getDebugLoc();
1348    unsigned Amount = Old->getOperand(0).getImm();
1349    if (Amount != 0) {
1350      // We need to keep the stack aligned properly.  To do this, we round the
1351      // amount of space needed for the outgoing arguments up to the next
1352      // alignment boundary.
1353      unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1354      Amount = (Amount+Align-1)/Align*Align;
1355
1356      ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1357      assert(!AFI->isThumb1OnlyFunction() &&
1358             "This eliminateCallFramePseudoInstr does not support Thumb1!");
1359      bool isARM = !AFI->isThumbFunction();
1360
1361      // Replace the pseudo instruction with a new instruction...
1362      unsigned Opc = Old->getOpcode();
1363      int PIdx = Old->findFirstPredOperandIdx();
1364      ARMCC::CondCodes Pred = (PIdx == -1)
1365        ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
1366      if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
1367        // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
1368        unsigned PredReg = Old->getOperand(2).getReg();
1369        emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
1370      } else {
1371        // Note: PredReg is operand 3 for ADJCALLSTACKUP.
1372        unsigned PredReg = Old->getOperand(3).getReg();
1373        assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
1374        emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
1375      }
1376    }
1377  }
1378  MBB.erase(I);
1379}
1380
1381int64_t ARMBaseRegisterInfo::
1382getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
1383  const TargetInstrDesc &Desc = MI->getDesc();
1384  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1385  int64_t InstrOffs = 0;;
1386  int Scale = 1;
1387  unsigned ImmIdx = 0;
1388  switch (AddrMode) {
1389  case ARMII::AddrModeT2_i8:
1390  case ARMII::AddrModeT2_i12:
1391    // i8 supports only negative, and i12 supports only positive, so
1392    // based on Offset sign, consider the appropriate instruction
1393    InstrOffs = MI->getOperand(Idx+1).getImm();
1394    Scale = 1;
1395    break;
1396  case ARMII::AddrMode5: {
1397    // VFP address mode.
1398    const MachineOperand &OffOp = MI->getOperand(Idx+1);
1399    InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
1400    if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
1401      InstrOffs = -InstrOffs;
1402    Scale = 4;
1403    break;
1404  }
1405  case ARMII::AddrMode2: {
1406    ImmIdx = Idx+2;
1407    InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
1408    if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1409      InstrOffs = -InstrOffs;
1410    break;
1411  }
1412  case ARMII::AddrMode3: {
1413    ImmIdx = Idx+2;
1414    InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
1415    if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1416      InstrOffs = -InstrOffs;
1417    break;
1418  }
1419  case ARMII::AddrModeT1_s: {
1420    ImmIdx = Idx+1;
1421    InstrOffs = MI->getOperand(ImmIdx).getImm();
1422    Scale = 4;
1423    break;
1424  }
1425  default:
1426    llvm_unreachable("Unsupported addressing mode!");
1427    break;
1428  }
1429
1430  return InstrOffs * Scale;
1431}
1432
1433/// needsFrameBaseReg - Returns true if the instruction's frame index
1434/// reference would be better served by a base register other than FP
1435/// or SP. Used by LocalStackFrameAllocation to determine which frame index
1436/// references it should create new base registers for.
1437bool ARMBaseRegisterInfo::
1438needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1439  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
1440    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1441  }
1442
1443  // It's the load/store FI references that cause issues, as it can be difficult
1444  // to materialize the offset if it won't fit in the literal field. Estimate
1445  // based on the size of the local frame and some conservative assumptions
1446  // about the rest of the stack frame (note, this is pre-regalloc, so
1447  // we don't know everything for certain yet) whether this offset is likely
1448  // to be out of range of the immediate. Return true if so.
1449
1450  // We only generate virtual base registers for loads and stores, so
1451  // return false for everything else.
1452  unsigned Opc = MI->getOpcode();
1453  switch (Opc) {
1454  case ARM::LDR: case ARM::LDRH: case ARM::LDRB:
1455  case ARM::STR: case ARM::STRH: case ARM::STRB:
1456  case ARM::t2LDRi12: case ARM::t2LDRi8:
1457  case ARM::t2STRi12: case ARM::t2STRi8:
1458  case ARM::VLDRS: case ARM::VLDRD:
1459  case ARM::VSTRS: case ARM::VSTRD:
1460  case ARM::tSTRspi: case ARM::tLDRspi:
1461    if (ForceAllBaseRegAlloc)
1462      return true;
1463    break;
1464  default:
1465    return false;
1466  }
1467
1468  // Without a virtual base register, if the function has variable sized
1469  // objects, all fixed-size local references will be via the frame pointer,
1470  // Approximate the offset and see if it's legal for the instruction.
1471  // Note that the incoming offset is based on the SP value at function entry,
1472  // so it'll be negative.
1473  MachineFunction &MF = *MI->getParent()->getParent();
1474  MachineFrameInfo *MFI = MF.getFrameInfo();
1475  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1476
1477  // Estimate an offset from the frame pointer.
1478  // Conservatively assume all callee-saved registers get pushed. R4-R6
1479  // will be earlier than the FP, so we ignore those.
1480  // R7, LR
1481  int64_t FPOffset = Offset - 8;
1482  // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
1483  if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
1484    FPOffset -= 80;
1485  // Estimate an offset from the stack pointer.
1486  Offset = -Offset;
1487  // Assume that we'll have at least some spill slots allocated.
1488  // FIXME: This is a total SWAG number. We should run some statistics
1489  //        and pick a real one.
1490  Offset += 128; // 128 bytes of spill slots
1491
1492  // If there is a frame pointer, try using it.
1493  // The FP is only available if there is no dynamic realignment. We
1494  // don't know for sure yet whether we'll need that, so we guess based
1495  // on whether there are any local variables that would trigger it.
1496  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1497  if (hasFP(MF) &&
1498      !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1499    if (isFrameOffsetLegal(MI, FPOffset))
1500      return false;
1501  }
1502  // If we can reference via the stack pointer, try that.
1503  // FIXME: This (and the code that resolves the references) can be improved
1504  //        to only disallow SP relative references in the live range of
1505  //        the VLA(s). In practice, it's unclear how much difference that
1506  //        would make, but it may be worth doing.
1507  if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1508    return false;
1509
1510  // The offset likely isn't legal, we want to allocate a virtual base register.
1511  return true;
1512}
1513
1514/// materializeFrameBaseRegister - Insert defining instruction(s) for
1515/// BaseReg to be a pointer to FrameIdx before insertion point I.
1516void ARMBaseRegisterInfo::
1517materializeFrameBaseRegister(MachineBasicBlock::iterator I, unsigned BaseReg,
1518                             int FrameIdx, int64_t Offset) const {
1519  ARMFunctionInfo *AFI =
1520    I->getParent()->getParent()->getInfo<ARMFunctionInfo>();
1521  unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1522    (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1523
1524  MachineInstrBuilder MIB =
1525    BuildMI(*I->getParent(), I, I->getDebugLoc(), TII.get(ADDriOpc), BaseReg)
1526    .addFrameIndex(FrameIdx).addImm(Offset);
1527  if (!AFI->isThumb1OnlyFunction())
1528    AddDefaultCC(AddDefaultPred(MIB));
1529}
1530
1531void
1532ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1533                                       unsigned BaseReg, int64_t Offset) const {
1534  MachineInstr &MI = *I;
1535  MachineBasicBlock &MBB = *MI.getParent();
1536  MachineFunction &MF = *MBB.getParent();
1537  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1538  int Off = Offset; // ARM doesn't need the general 64-bit offsets
1539  unsigned i = 0;
1540
1541  assert(!AFI->isThumb1OnlyFunction() &&
1542         "This resolveFrameIndex does not support Thumb1!");
1543
1544  while (!MI.getOperand(i).isFI()) {
1545    ++i;
1546    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1547  }
1548  bool Done = false;
1549  if (!AFI->isThumbFunction())
1550    Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1551  else {
1552    assert(AFI->isThumb2Function());
1553    Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1554  }
1555  assert (Done && "Unable to resolve frame index!");
1556}
1557
1558bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1559                                             int64_t Offset) const {
1560  const TargetInstrDesc &Desc = MI->getDesc();
1561  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1562  unsigned i = 0;
1563
1564  while (!MI->getOperand(i).isFI()) {
1565    ++i;
1566    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1567  }
1568
1569  // AddrMode4 and AddrMode6 cannot handle any offset.
1570  if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1571    return Offset == 0;
1572
1573  unsigned NumBits = 0;
1574  unsigned Scale = 1;
1575  bool isSigned = true;
1576  switch (AddrMode) {
1577  case ARMII::AddrModeT2_i8:
1578  case ARMII::AddrModeT2_i12:
1579    // i8 supports only negative, and i12 supports only positive, so
1580    // based on Offset sign, consider the appropriate instruction
1581    Scale = 1;
1582    if (Offset < 0) {
1583      NumBits = 8;
1584      Offset = -Offset;
1585    } else {
1586      NumBits = 12;
1587    }
1588    break;
1589  case ARMII::AddrMode5:
1590    // VFP address mode.
1591    NumBits = 8;
1592    Scale = 4;
1593    break;
1594  case ARMII::AddrMode2:
1595    NumBits = 12;
1596    break;
1597  case ARMII::AddrMode3:
1598    NumBits = 8;
1599    break;
1600  case ARMII::AddrModeT1_s:
1601    NumBits = 5;
1602    Scale = 4;
1603    isSigned = false;
1604    break;
1605  default:
1606    llvm_unreachable("Unsupported addressing mode!");
1607    break;
1608  }
1609
1610  Offset += getFrameIndexInstrOffset(MI, i);
1611  assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1612  if (isSigned && Offset < 0)
1613    Offset = -Offset;
1614
1615
1616  unsigned Mask = (1 << NumBits) - 1;
1617  if ((unsigned)Offset <= Mask * Scale)
1618    return true;
1619
1620  return false;
1621}
1622
1623unsigned
1624ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1625                                         int SPAdj, FrameIndexValue *Value,
1626                                         RegScavenger *RS) const {
1627  unsigned i = 0;
1628  MachineInstr &MI = *II;
1629  MachineBasicBlock &MBB = *MI.getParent();
1630  MachineFunction &MF = *MBB.getParent();
1631  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1632  assert(!AFI->isThumb1OnlyFunction() &&
1633         "This eliminateFrameIndex does not support Thumb1!");
1634
1635  while (!MI.getOperand(i).isFI()) {
1636    ++i;
1637    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1638  }
1639
1640  int FrameIndex = MI.getOperand(i).getIndex();
1641  unsigned FrameReg;
1642
1643  int Offset = ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1644
1645  // Special handling of dbg_value instructions.
1646  if (MI.isDebugValue()) {
1647    MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1648    MI.getOperand(i+1).ChangeToImmediate(Offset);
1649    return 0;
1650  }
1651
1652  // Modify MI as necessary to handle as much of 'Offset' as possible
1653  bool Done = false;
1654  if (!AFI->isThumbFunction())
1655    Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1656  else {
1657    assert(AFI->isThumb2Function());
1658    Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1659  }
1660  if (Done)
1661    return 0;
1662
1663  // If we get here, the immediate doesn't fit into the instruction.  We folded
1664  // as much as possible above, handle the rest, providing a register that is
1665  // SP+LargeImm.
1666  assert((Offset ||
1667          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1668          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1669         "This code isn't needed if offset already handled!");
1670
1671  unsigned ScratchReg = 0;
1672  int PIdx = MI.findFirstPredOperandIdx();
1673  ARMCC::CondCodes Pred = (PIdx == -1)
1674    ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1675  unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1676  if (Offset == 0)
1677    // Must be addrmode4/6.
1678    MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1679  else {
1680    ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1681    if (Value) {
1682      Value->first = FrameReg; // use the frame register as a kind indicator
1683      Value->second = Offset;
1684    }
1685    if (!AFI->isThumbFunction())
1686      emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1687                              Offset, Pred, PredReg, TII);
1688    else {
1689      assert(AFI->isThumb2Function());
1690      emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1691                             Offset, Pred, PredReg, TII);
1692    }
1693    MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1694    if (!ReuseFrameIndexVals)
1695      ScratchReg = 0;
1696  }
1697  return ScratchReg;
1698}
1699
1700/// Move iterator past the next bunch of callee save load / store ops for
1701/// the particular spill area (1: integer area 1, 2: integer area 2,
1702/// 3: fp area, 0: don't care).
1703static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
1704                                   MachineBasicBlock::iterator &MBBI,
1705                                   int Opc1, int Opc2, unsigned Area,
1706                                   const ARMSubtarget &STI) {
1707  while (MBBI != MBB.end() &&
1708         ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) &&
1709         MBBI->getOperand(1).isFI()) {
1710    if (Area != 0) {
1711      bool Done = false;
1712      unsigned Category = 0;
1713      switch (MBBI->getOperand(0).getReg()) {
1714      case ARM::R4:  case ARM::R5:  case ARM::R6: case ARM::R7:
1715      case ARM::LR:
1716        Category = 1;
1717        break;
1718      case ARM::R8:  case ARM::R9:  case ARM::R10: case ARM::R11:
1719        Category = STI.isTargetDarwin() ? 2 : 1;
1720        break;
1721      case ARM::D8:  case ARM::D9:  case ARM::D10: case ARM::D11:
1722      case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
1723        Category = 3;
1724        break;
1725      default:
1726        Done = true;
1727        break;
1728      }
1729      if (Done || Category != Area)
1730        break;
1731    }
1732
1733    ++MBBI;
1734  }
1735}
1736
1737void ARMBaseRegisterInfo::
1738emitPrologue(MachineFunction &MF) const {
1739  MachineBasicBlock &MBB = MF.front();
1740  MachineBasicBlock::iterator MBBI = MBB.begin();
1741  MachineFrameInfo  *MFI = MF.getFrameInfo();
1742  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1743  assert(!AFI->isThumb1OnlyFunction() &&
1744         "This emitPrologue does not support Thumb1!");
1745  bool isARM = !AFI->isThumbFunction();
1746  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1747  unsigned NumBytes = MFI->getStackSize();
1748  const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1749  DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1750
1751  // Determine the sizes of each callee-save spill areas and record which frame
1752  // belongs to which callee-save spill areas.
1753  unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
1754  int FramePtrSpillFI = 0;
1755
1756  // Allocate the vararg register save area. This is not counted in NumBytes.
1757  if (VARegSaveSize)
1758    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
1759
1760  if (!AFI->hasStackFrame()) {
1761    if (NumBytes != 0)
1762      emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1763    return;
1764  }
1765
1766  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1767    unsigned Reg = CSI[i].getReg();
1768    int FI = CSI[i].getFrameIdx();
1769    switch (Reg) {
1770    case ARM::R4:
1771    case ARM::R5:
1772    case ARM::R6:
1773    case ARM::R7:
1774    case ARM::LR:
1775      if (Reg == FramePtr)
1776        FramePtrSpillFI = FI;
1777      AFI->addGPRCalleeSavedArea1Frame(FI);
1778      GPRCS1Size += 4;
1779      break;
1780    case ARM::R8:
1781    case ARM::R9:
1782    case ARM::R10:
1783    case ARM::R11:
1784      if (Reg == FramePtr)
1785        FramePtrSpillFI = FI;
1786      if (STI.isTargetDarwin()) {
1787        AFI->addGPRCalleeSavedArea2Frame(FI);
1788        GPRCS2Size += 4;
1789      } else {
1790        AFI->addGPRCalleeSavedArea1Frame(FI);
1791        GPRCS1Size += 4;
1792      }
1793      break;
1794    default:
1795      AFI->addDPRCalleeSavedAreaFrame(FI);
1796      DPRCSSize += 8;
1797    }
1798  }
1799
1800  // Build the new SUBri to adjust SP for integer callee-save spill area 1.
1801  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size);
1802  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI);
1803
1804  // Set FP to point to the stack slot that contains the previous FP.
1805  // For Darwin, FP is R7, which has now been stored in spill area 1.
1806  // Otherwise, if this is not Darwin, all the callee-saved registers go
1807  // into spill area 1, including the FP in R11.  In either case, it is
1808  // now safe to emit this assignment.
1809  bool HasFP = hasFP(MF);
1810  if (HasFP) {
1811    unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
1812    MachineInstrBuilder MIB =
1813      BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
1814      .addFrameIndex(FramePtrSpillFI).addImm(0);
1815    AddDefaultCC(AddDefaultPred(MIB));
1816  }
1817
1818  // Build the new SUBri to adjust SP for integer callee-save spill area 2.
1819  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size);
1820
1821  // Build the new SUBri to adjust SP for FP callee-save spill area.
1822  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI);
1823  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize);
1824
1825  // Determine starting offsets of spill areas.
1826  unsigned DPRCSOffset  = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
1827  unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
1828  unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
1829  if (HasFP)
1830    AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
1831                                NumBytes);
1832  AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
1833  AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
1834  AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
1835
1836  movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI);
1837  NumBytes = DPRCSOffset;
1838  if (NumBytes) {
1839    // Adjust SP after all the callee-save spills.
1840    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1841    if (HasFP)
1842      AFI->setShouldRestoreSPFromFP(true);
1843  }
1844
1845  if (STI.isTargetELF() && hasFP(MF)) {
1846    MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
1847                             AFI->getFramePtrSpillOffset());
1848    AFI->setShouldRestoreSPFromFP(true);
1849  }
1850
1851  AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
1852  AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
1853  AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
1854
1855  // If we need dynamic stack realignment, do it here.
1856  if (needsStackRealignment(MF)) {
1857    unsigned MaxAlign = MFI->getMaxAlignment();
1858    assert (!AFI->isThumb1OnlyFunction());
1859    if (!AFI->isThumbFunction()) {
1860      // Emit bic sp, sp, MaxAlign
1861      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1862                                          TII.get(ARM::BICri), ARM::SP)
1863                                  .addReg(ARM::SP, RegState::Kill)
1864                                  .addImm(MaxAlign-1)));
1865    } else {
1866      // We cannot use sp as source/dest register here, thus we're emitting the
1867      // following sequence:
1868      // mov r4, sp
1869      // bic r4, r4, MaxAlign
1870      // mov sp, r4
1871      // FIXME: It will be better just to find spare register here.
1872      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
1873        .addReg(ARM::SP, RegState::Kill);
1874      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1875                                          TII.get(ARM::t2BICri), ARM::R4)
1876                                  .addReg(ARM::R4, RegState::Kill)
1877                                  .addImm(MaxAlign-1)));
1878      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
1879        .addReg(ARM::R4, RegState::Kill);
1880    }
1881
1882    AFI->setShouldRestoreSPFromFP(true);
1883  }
1884
1885  // If the frame has variable sized objects then the epilogue must restore
1886  // the sp from fp.
1887  if (!AFI->shouldRestoreSPFromFP() && MFI->hasVarSizedObjects())
1888    AFI->setShouldRestoreSPFromFP(true);
1889}
1890
1891static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
1892  for (unsigned i = 0; CSRegs[i]; ++i)
1893    if (Reg == CSRegs[i])
1894      return true;
1895  return false;
1896}
1897
1898static bool isCSRestore(MachineInstr *MI,
1899                        const ARMBaseInstrInfo &TII,
1900                        const unsigned *CSRegs) {
1901  return ((MI->getOpcode() == (int)ARM::VLDRD ||
1902           MI->getOpcode() == (int)ARM::LDR ||
1903           MI->getOpcode() == (int)ARM::t2LDRi12) &&
1904          MI->getOperand(1).isFI() &&
1905          isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
1906}
1907
1908void ARMBaseRegisterInfo::
1909emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
1910  MachineBasicBlock::iterator MBBI = prior(MBB.end());
1911  assert(MBBI->getDesc().isReturn() &&
1912         "Can only insert epilog into returning blocks");
1913  unsigned RetOpcode = MBBI->getOpcode();
1914  DebugLoc dl = MBBI->getDebugLoc();
1915  MachineFrameInfo *MFI = MF.getFrameInfo();
1916  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1917  assert(!AFI->isThumb1OnlyFunction() &&
1918         "This emitEpilogue does not support Thumb1!");
1919  bool isARM = !AFI->isThumbFunction();
1920
1921  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1922  int NumBytes = (int)MFI->getStackSize();
1923
1924  if (!AFI->hasStackFrame()) {
1925    if (NumBytes != 0)
1926      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1927  } else {
1928    // Unwind MBBI to point to first LDR / VLDRD.
1929    const unsigned *CSRegs = getCalleeSavedRegs();
1930    if (MBBI != MBB.begin()) {
1931      do
1932        --MBBI;
1933      while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
1934      if (!isCSRestore(MBBI, TII, CSRegs))
1935        ++MBBI;
1936    }
1937
1938    // Move SP to start of FP callee save spill area.
1939    NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
1940                 AFI->getGPRCalleeSavedArea2Size() +
1941                 AFI->getDPRCalleeSavedAreaSize());
1942
1943    // Reset SP based on frame pointer only if the stack frame extends beyond
1944    // frame pointer stack slot or target is ELF and the function has FP.
1945    if (AFI->shouldRestoreSPFromFP()) {
1946      NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
1947      if (NumBytes) {
1948        if (isARM)
1949          emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
1950                                  ARMCC::AL, 0, TII);
1951        else
1952          emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
1953                                 ARMCC::AL, 0, TII);
1954      } else {
1955        // Thumb2 or ARM.
1956        if (isARM)
1957          BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
1958            .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
1959        else
1960          BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
1961            .addReg(FramePtr);
1962      }
1963    } else if (NumBytes)
1964      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1965
1966    // Move SP to start of integer callee save spill area 2.
1967    movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI);
1968    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize());
1969
1970    // Move SP to start of integer callee save spill area 1.
1971    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI);
1972    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size());
1973
1974    // Move SP to SP upon entry to the function.
1975    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI);
1976    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
1977  }
1978
1979  if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
1980      RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
1981    // Tail call return: adjust the stack pointer and jump to callee.
1982    MBBI = prior(MBB.end());
1983    MachineOperand &JumpTarget = MBBI->getOperand(0);
1984
1985    // Jump to label or value in register.
1986    if (RetOpcode == ARM::TCRETURNdi) {
1987      BuildMI(MBB, MBBI, dl,
1988            TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)).
1989        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1990                         JumpTarget.getTargetFlags());
1991    } else if (RetOpcode == ARM::TCRETURNdiND) {
1992      BuildMI(MBB, MBBI, dl,
1993            TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
1994        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1995                         JumpTarget.getTargetFlags());
1996    } else if (RetOpcode == ARM::TCRETURNri) {
1997      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
1998        addReg(JumpTarget.getReg(), RegState::Kill);
1999    } else if (RetOpcode == ARM::TCRETURNriND) {
2000      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
2001        addReg(JumpTarget.getReg(), RegState::Kill);
2002    }
2003
2004    MachineInstr *NewMI = prior(MBBI);
2005    for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
2006      NewMI->addOperand(MBBI->getOperand(i));
2007
2008    // Delete the pseudo instruction TCRETURN.
2009    MBB.erase(MBBI);
2010  }
2011
2012  if (VARegSaveSize)
2013    emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
2014}
2015
2016#include "ARMGenRegisterInfo.inc"
2017