ARMBaseRegisterInfo.cpp revision e1e6d187863ad7ca2e5331f496f27d480cb39734
1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the base ARM implementation of TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARM.h"
15#include "ARMAddressingModes.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMBaseRegisterInfo.h"
18#include "ARMInstrInfo.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMSubtarget.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/LLVMContext.h"
25#include "llvm/CodeGen/MachineConstantPool.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineLocation.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/RegisterScavenging.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetFrameInfo.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetOptions.h"
38#include "llvm/ADT/BitVector.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/Support/CommandLine.h"
41
42namespace llvm {
43static cl::opt<bool>
44ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
45          cl::desc("Force use of virtual base registers for stack load/store"));
46static cl::opt<bool>
47EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
48          cl::desc("Enable pre-regalloc stack frame index allocation"));
49}
50
51using namespace llvm;
52
53static cl::opt<bool>
54EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(false),
55          cl::desc("Enable use of a base pointer for complex stack frames"));
56
57unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned RegEnum,
58                                                   bool *isSPVFP) {
59  if (isSPVFP)
60    *isSPVFP = false;
61
62  using namespace ARM;
63  switch (RegEnum) {
64  default:
65    llvm_unreachable("Unknown ARM register!");
66  case R0:  case D0:  case Q0:  return 0;
67  case R1:  case D1:  case Q1:  return 1;
68  case R2:  case D2:  case Q2:  return 2;
69  case R3:  case D3:  case Q3:  return 3;
70  case R4:  case D4:  case Q4:  return 4;
71  case R5:  case D5:  case Q5:  return 5;
72  case R6:  case D6:  case Q6:  return 6;
73  case R7:  case D7:  case Q7:  return 7;
74  case R8:  case D8:  case Q8:  return 8;
75  case R9:  case D9:  case Q9:  return 9;
76  case R10: case D10: case Q10: return 10;
77  case R11: case D11: case Q11: return 11;
78  case R12: case D12: case Q12: return 12;
79  case SP:  case D13: case Q13: return 13;
80  case LR:  case D14: case Q14: return 14;
81  case PC:  case D15: case Q15: return 15;
82
83  case D16: return 16;
84  case D17: return 17;
85  case D18: return 18;
86  case D19: return 19;
87  case D20: return 20;
88  case D21: return 21;
89  case D22: return 22;
90  case D23: return 23;
91  case D24: return 24;
92  case D25: return 25;
93  case D26: return 26;
94  case D27: return 27;
95  case D28: return 28;
96  case D29: return 29;
97  case D30: return 30;
98  case D31: return 31;
99
100  case S0: case S1: case S2: case S3:
101  case S4: case S5: case S6: case S7:
102  case S8: case S9: case S10: case S11:
103  case S12: case S13: case S14: case S15:
104  case S16: case S17: case S18: case S19:
105  case S20: case S21: case S22: case S23:
106  case S24: case S25: case S26: case S27:
107  case S28: case S29: case S30: case S31: {
108    if (isSPVFP)
109      *isSPVFP = true;
110    switch (RegEnum) {
111    default: return 0; // Avoid compile time warning.
112    case S0: return 0;
113    case S1: return 1;
114    case S2: return 2;
115    case S3: return 3;
116    case S4: return 4;
117    case S5: return 5;
118    case S6: return 6;
119    case S7: return 7;
120    case S8: return 8;
121    case S9: return 9;
122    case S10: return 10;
123    case S11: return 11;
124    case S12: return 12;
125    case S13: return 13;
126    case S14: return 14;
127    case S15: return 15;
128    case S16: return 16;
129    case S17: return 17;
130    case S18: return 18;
131    case S19: return 19;
132    case S20: return 20;
133    case S21: return 21;
134    case S22: return 22;
135    case S23: return 23;
136    case S24: return 24;
137    case S25: return 25;
138    case S26: return 26;
139    case S27: return 27;
140    case S28: return 28;
141    case S29: return 29;
142    case S30: return 30;
143    case S31: return 31;
144    }
145  }
146  }
147}
148
149ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
150                                         const ARMSubtarget &sti)
151  : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
152    TII(tii), STI(sti),
153    FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
154    BasePtr(ARM::R6) {
155}
156
157const unsigned*
158ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
159  static const unsigned CalleeSavedRegs[] = {
160    ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
161    ARM::R7, ARM::R6,  ARM::R5,  ARM::R4,
162
163    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
164    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
165    0
166  };
167
168  static const unsigned DarwinCalleeSavedRegs[] = {
169    // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
170    // register.
171    ARM::LR,  ARM::R7,  ARM::R6, ARM::R5, ARM::R4,
172    ARM::R11, ARM::R10, ARM::R8,
173
174    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
175    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
176    0
177  };
178  return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
179}
180
181BitVector ARMBaseRegisterInfo::
182getReservedRegs(const MachineFunction &MF) const {
183  // FIXME: avoid re-calculating this everytime.
184  BitVector Reserved(getNumRegs());
185  Reserved.set(ARM::SP);
186  Reserved.set(ARM::PC);
187  Reserved.set(ARM::FPSCR);
188  if (hasFP(MF))
189    Reserved.set(FramePtr);
190  if (hasBasePointer(MF))
191    Reserved.set(BasePtr);
192  // Some targets reserve R9.
193  if (STI.isR9Reserved())
194    Reserved.set(ARM::R9);
195  return Reserved;
196}
197
198bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
199                                        unsigned Reg) const {
200  switch (Reg) {
201  default: break;
202  case ARM::SP:
203  case ARM::PC:
204    return true;
205  case ARM::R6:
206    if (hasBasePointer(MF))
207      return true;
208    break;
209  case ARM::R7:
210  case ARM::R11:
211    if (FramePtr == Reg && hasFP(MF))
212      return true;
213    break;
214  case ARM::R9:
215    return STI.isR9Reserved();
216  }
217
218  return false;
219}
220
221const TargetRegisterClass *
222ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
223                                              const TargetRegisterClass *B,
224                                              unsigned SubIdx) const {
225  switch (SubIdx) {
226  default: return 0;
227  case ARM::ssub_0:
228  case ARM::ssub_1:
229  case ARM::ssub_2:
230  case ARM::ssub_3: {
231    // S sub-registers.
232    if (A->getSize() == 8) {
233      if (B == &ARM::SPR_8RegClass)
234        return &ARM::DPR_8RegClass;
235      assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
236      if (A == &ARM::DPR_8RegClass)
237        return A;
238      return &ARM::DPR_VFP2RegClass;
239    }
240
241    if (A->getSize() == 16) {
242      if (B == &ARM::SPR_8RegClass)
243        return &ARM::QPR_8RegClass;
244      return &ARM::QPR_VFP2RegClass;
245    }
246
247    if (A->getSize() == 32) {
248      if (B == &ARM::SPR_8RegClass)
249        return 0;  // Do not allow coalescing!
250      return &ARM::QQPR_VFP2RegClass;
251    }
252
253    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
254    return 0;  // Do not allow coalescing!
255  }
256  case ARM::dsub_0:
257  case ARM::dsub_1:
258  case ARM::dsub_2:
259  case ARM::dsub_3: {
260    // D sub-registers.
261    if (A->getSize() == 16) {
262      if (B == &ARM::DPR_VFP2RegClass)
263        return &ARM::QPR_VFP2RegClass;
264      if (B == &ARM::DPR_8RegClass)
265        return 0;  // Do not allow coalescing!
266      return A;
267    }
268
269    if (A->getSize() == 32) {
270      if (B == &ARM::DPR_VFP2RegClass)
271        return &ARM::QQPR_VFP2RegClass;
272      if (B == &ARM::DPR_8RegClass)
273        return 0;  // Do not allow coalescing!
274      return A;
275    }
276
277    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
278    if (B != &ARM::DPRRegClass)
279      return 0;  // Do not allow coalescing!
280    return A;
281  }
282  case ARM::dsub_4:
283  case ARM::dsub_5:
284  case ARM::dsub_6:
285  case ARM::dsub_7: {
286    // D sub-registers of QQQQ registers.
287    if (A->getSize() == 64 && B == &ARM::DPRRegClass)
288      return A;
289    return 0;  // Do not allow coalescing!
290  }
291
292  case ARM::qsub_0:
293  case ARM::qsub_1: {
294    // Q sub-registers.
295    if (A->getSize() == 32) {
296      if (B == &ARM::QPR_VFP2RegClass)
297        return &ARM::QQPR_VFP2RegClass;
298      if (B == &ARM::QPR_8RegClass)
299        return 0;  // Do not allow coalescing!
300      return A;
301    }
302
303    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
304    if (B == &ARM::QPRRegClass)
305      return A;
306    return 0;  // Do not allow coalescing!
307  }
308  case ARM::qsub_2:
309  case ARM::qsub_3: {
310    // Q sub-registers of QQQQ registers.
311    if (A->getSize() == 64 && B == &ARM::QPRRegClass)
312      return A;
313    return 0;  // Do not allow coalescing!
314  }
315  }
316  return 0;
317}
318
319bool
320ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
321                                          SmallVectorImpl<unsigned> &SubIndices,
322                                          unsigned &NewSubIdx) const {
323
324  unsigned Size = RC->getSize() * 8;
325  if (Size < 6)
326    return 0;
327
328  NewSubIdx = 0;  // Whole register.
329  unsigned NumRegs = SubIndices.size();
330  if (NumRegs == 8) {
331    // 8 D registers -> 1 QQQQ register.
332    return (Size == 512 &&
333            SubIndices[0] == ARM::dsub_0 &&
334            SubIndices[1] == ARM::dsub_1 &&
335            SubIndices[2] == ARM::dsub_2 &&
336            SubIndices[3] == ARM::dsub_3 &&
337            SubIndices[4] == ARM::dsub_4 &&
338            SubIndices[5] == ARM::dsub_5 &&
339            SubIndices[6] == ARM::dsub_6 &&
340            SubIndices[7] == ARM::dsub_7);
341  } else if (NumRegs == 4) {
342    if (SubIndices[0] == ARM::qsub_0) {
343      // 4 Q registers -> 1 QQQQ register.
344      return (Size == 512 &&
345              SubIndices[1] == ARM::qsub_1 &&
346              SubIndices[2] == ARM::qsub_2 &&
347              SubIndices[3] == ARM::qsub_3);
348    } else if (SubIndices[0] == ARM::dsub_0) {
349      // 4 D registers -> 1 QQ register.
350      if (Size >= 256 &&
351          SubIndices[1] == ARM::dsub_1 &&
352          SubIndices[2] == ARM::dsub_2 &&
353          SubIndices[3] == ARM::dsub_3) {
354        if (Size == 512)
355          NewSubIdx = ARM::qqsub_0;
356        return true;
357      }
358    } else if (SubIndices[0] == ARM::dsub_4) {
359      // 4 D registers -> 1 QQ register (2nd).
360      if (Size == 512 &&
361          SubIndices[1] == ARM::dsub_5 &&
362          SubIndices[2] == ARM::dsub_6 &&
363          SubIndices[3] == ARM::dsub_7) {
364        NewSubIdx = ARM::qqsub_1;
365        return true;
366      }
367    } else if (SubIndices[0] == ARM::ssub_0) {
368      // 4 S registers -> 1 Q register.
369      if (Size >= 128 &&
370          SubIndices[1] == ARM::ssub_1 &&
371          SubIndices[2] == ARM::ssub_2 &&
372          SubIndices[3] == ARM::ssub_3) {
373        if (Size >= 256)
374          NewSubIdx = ARM::qsub_0;
375        return true;
376      }
377    }
378  } else if (NumRegs == 2) {
379    if (SubIndices[0] == ARM::qsub_0) {
380      // 2 Q registers -> 1 QQ register.
381      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
382        if (Size == 512)
383          NewSubIdx = ARM::qqsub_0;
384        return true;
385      }
386    } else if (SubIndices[0] == ARM::qsub_2) {
387      // 2 Q registers -> 1 QQ register (2nd).
388      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
389        NewSubIdx = ARM::qqsub_1;
390        return true;
391      }
392    } else if (SubIndices[0] == ARM::dsub_0) {
393      // 2 D registers -> 1 Q register.
394      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
395        if (Size >= 256)
396          NewSubIdx = ARM::qsub_0;
397        return true;
398      }
399    } else if (SubIndices[0] == ARM::dsub_2) {
400      // 2 D registers -> 1 Q register (2nd).
401      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
402        NewSubIdx = ARM::qsub_1;
403        return true;
404      }
405    } else if (SubIndices[0] == ARM::dsub_4) {
406      // 2 D registers -> 1 Q register (3rd).
407      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
408        NewSubIdx = ARM::qsub_2;
409        return true;
410      }
411    } else if (SubIndices[0] == ARM::dsub_6) {
412      // 2 D registers -> 1 Q register (3rd).
413      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
414        NewSubIdx = ARM::qsub_3;
415        return true;
416      }
417    } else if (SubIndices[0] == ARM::ssub_0) {
418      // 2 S registers -> 1 D register.
419      if (SubIndices[1] == ARM::ssub_1) {
420        if (Size >= 128)
421          NewSubIdx = ARM::dsub_0;
422        return true;
423      }
424    } else if (SubIndices[0] == ARM::ssub_2) {
425      // 2 S registers -> 1 D register (2nd).
426      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
427        NewSubIdx = ARM::dsub_1;
428        return true;
429      }
430    }
431  }
432  return false;
433}
434
435
436const TargetRegisterClass *
437ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
438  return ARM::GPRRegisterClass;
439}
440
441/// getAllocationOrder - Returns the register allocation order for a specified
442/// register class in the form of a pair of TargetRegisterClass iterators.
443std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
444ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
445                                        unsigned HintType, unsigned HintReg,
446                                        const MachineFunction &MF) const {
447  // Alternative register allocation orders when favoring even / odd registers
448  // of register pairs.
449
450  // No FP, R9 is available.
451  static const unsigned GPREven1[] = {
452    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
453    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
454    ARM::R9, ARM::R11
455  };
456  static const unsigned GPROdd1[] = {
457    ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
458    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
459    ARM::R8, ARM::R10
460  };
461
462  // FP is R7, R9 is available.
463  static const unsigned GPREven2[] = {
464    ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
465    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
466    ARM::R9, ARM::R11
467  };
468  static const unsigned GPROdd2[] = {
469    ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
470    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
471    ARM::R8, ARM::R10
472  };
473
474  // FP is R11, R9 is available.
475  static const unsigned GPREven3[] = {
476    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
477    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
478    ARM::R9
479  };
480  static const unsigned GPROdd3[] = {
481    ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
482    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
483    ARM::R8
484  };
485
486  // No FP, R9 is not available.
487  static const unsigned GPREven4[] = {
488    ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
489    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
490    ARM::R11
491  };
492  static const unsigned GPROdd4[] = {
493    ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
494    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
495    ARM::R10
496  };
497
498  // FP is R7, R9 is not available.
499  static const unsigned GPREven5[] = {
500    ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
501    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
502    ARM::R11
503  };
504  static const unsigned GPROdd5[] = {
505    ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
506    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
507    ARM::R10
508  };
509
510  // FP is R11, R9 is not available.
511  static const unsigned GPREven6[] = {
512    ARM::R0, ARM::R2, ARM::R4, ARM::R6,
513    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
514  };
515  static const unsigned GPROdd6[] = {
516    ARM::R1, ARM::R3, ARM::R5, ARM::R7,
517    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
518  };
519
520
521  if (HintType == ARMRI::RegPairEven) {
522    if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
523      // It's no longer possible to fulfill this hint. Return the default
524      // allocation order.
525      return std::make_pair(RC->allocation_order_begin(MF),
526                            RC->allocation_order_end(MF));
527
528    if (!hasFP(MF)) {
529      if (!STI.isR9Reserved())
530        return std::make_pair(GPREven1,
531                              GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
532      else
533        return std::make_pair(GPREven4,
534                              GPREven4 + (sizeof(GPREven4)/sizeof(unsigned)));
535    } else if (FramePtr == ARM::R7) {
536      if (!STI.isR9Reserved())
537        return std::make_pair(GPREven2,
538                              GPREven2 + (sizeof(GPREven2)/sizeof(unsigned)));
539      else
540        return std::make_pair(GPREven5,
541                              GPREven5 + (sizeof(GPREven5)/sizeof(unsigned)));
542    } else { // FramePtr == ARM::R11
543      if (!STI.isR9Reserved())
544        return std::make_pair(GPREven3,
545                              GPREven3 + (sizeof(GPREven3)/sizeof(unsigned)));
546      else
547        return std::make_pair(GPREven6,
548                              GPREven6 + (sizeof(GPREven6)/sizeof(unsigned)));
549    }
550  } else if (HintType == ARMRI::RegPairOdd) {
551    if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
552      // It's no longer possible to fulfill this hint. Return the default
553      // allocation order.
554      return std::make_pair(RC->allocation_order_begin(MF),
555                            RC->allocation_order_end(MF));
556
557    if (!hasFP(MF)) {
558      if (!STI.isR9Reserved())
559        return std::make_pair(GPROdd1,
560                              GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
561      else
562        return std::make_pair(GPROdd4,
563                              GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned)));
564    } else if (FramePtr == ARM::R7) {
565      if (!STI.isR9Reserved())
566        return std::make_pair(GPROdd2,
567                              GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned)));
568      else
569        return std::make_pair(GPROdd5,
570                              GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned)));
571    } else { // FramePtr == ARM::R11
572      if (!STI.isR9Reserved())
573        return std::make_pair(GPROdd3,
574                              GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned)));
575      else
576        return std::make_pair(GPROdd6,
577                              GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned)));
578    }
579  }
580  return std::make_pair(RC->allocation_order_begin(MF),
581                        RC->allocation_order_end(MF));
582}
583
584/// ResolveRegAllocHint - Resolves the specified register allocation hint
585/// to a physical register. Returns the physical register if it is successful.
586unsigned
587ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
588                                         const MachineFunction &MF) const {
589  if (Reg == 0 || !isPhysicalRegister(Reg))
590    return 0;
591  if (Type == 0)
592    return Reg;
593  else if (Type == (unsigned)ARMRI::RegPairOdd)
594    // Odd register.
595    return getRegisterPairOdd(Reg, MF);
596  else if (Type == (unsigned)ARMRI::RegPairEven)
597    // Even register.
598    return getRegisterPairEven(Reg, MF);
599  return 0;
600}
601
602void
603ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
604                                        MachineFunction &MF) const {
605  MachineRegisterInfo *MRI = &MF.getRegInfo();
606  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
607  if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
608       Hint.first == (unsigned)ARMRI::RegPairEven) &&
609      Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) {
610    // If 'Reg' is one of the even / odd register pair and it's now changed
611    // (e.g. coalesced) into a different register. The other register of the
612    // pair allocation hint must be updated to reflect the relationship
613    // change.
614    unsigned OtherReg = Hint.second;
615    Hint = MRI->getRegAllocationHint(OtherReg);
616    if (Hint.second == Reg)
617      // Make sure the pair has not already divorced.
618      MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
619  }
620}
621
622/// hasFP - Return true if the specified function should have a dedicated frame
623/// pointer register.  This is true if the function has variable sized allocas
624/// or if frame pointer elimination is disabled.
625///
626bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const {
627  // Mac OS X requires FP not to be clobbered for backtracing purpose.
628  if (STI.isTargetDarwin())
629    return true;
630
631  const MachineFrameInfo *MFI = MF.getFrameInfo();
632  // Always eliminate non-leaf frame pointers.
633  return ((DisableFramePointerElim(MF) && MFI->hasCalls()) ||
634          needsStackRealignment(MF) ||
635          MFI->hasVarSizedObjects() ||
636          MFI->isFrameAddressTaken());
637}
638
639bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
640  const MachineFrameInfo *MFI = MF.getFrameInfo();
641  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
642
643  if (!EnableBasePointer)
644    return false;
645
646  if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
647    return true;
648
649  // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
650  // negative range for ldr/str (255), and thumb1 is positive offsets only.
651  // It's going to be better to use the SP or Base Pointer instead. When there
652  // are variable sized objects, we can't reference off of the SP, so we
653  // reserve a Base Pointer.
654  if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
655    // Conservatively estimate whether the negative offset from the frame
656    // pointer will be sufficient to reach. If a function has a smallish
657    // frame, it's less likely to have lots of spills and callee saved
658    // space, so it's all more likely to be within range of the frame pointer.
659    // If it's wrong, the scavenger will still enable access to work, it just
660    // won't be optimal.
661    if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
662      return false;
663    return true;
664  }
665
666  return false;
667}
668
669bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
670  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
671  return (RealignStack && !AFI->isThumb1OnlyFunction());
672}
673
674bool ARMBaseRegisterInfo::
675needsStackRealignment(const MachineFunction &MF) const {
676  const MachineFrameInfo *MFI = MF.getFrameInfo();
677  const Function *F = MF.getFunction();
678  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
679  bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
680                               F->hasFnAttr(Attribute::StackAlignment));
681
682  return requiresRealignment && canRealignStack(MF);
683}
684
685bool ARMBaseRegisterInfo::
686cannotEliminateFrame(const MachineFunction &MF) const {
687  const MachineFrameInfo *MFI = MF.getFrameInfo();
688  if (DisableFramePointerElim(MF) && MFI->adjustsStack())
689    return true;
690  return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
691    || needsStackRealignment(MF);
692}
693
694/// estimateStackSize - Estimate and return the size of the frame.
695static unsigned estimateStackSize(MachineFunction &MF) {
696  const MachineFrameInfo *FFI = MF.getFrameInfo();
697  int Offset = 0;
698  for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
699    int FixedOff = -FFI->getObjectOffset(i);
700    if (FixedOff > Offset) Offset = FixedOff;
701  }
702  for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
703    if (FFI->isDeadObjectIndex(i))
704      continue;
705    Offset += FFI->getObjectSize(i);
706    unsigned Align = FFI->getObjectAlignment(i);
707    // Adjust to alignment boundary
708    Offset = (Offset+Align-1)/Align*Align;
709  }
710  return (unsigned)Offset;
711}
712
713/// estimateRSStackSizeLimit - Look at each instruction that references stack
714/// frames and return the stack size limit beyond which some of these
715/// instructions will require a scratch register during their expansion later.
716unsigned
717ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
718  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
719  unsigned Limit = (1 << 12) - 1;
720  for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
721    for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
722         I != E; ++I) {
723      for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
724        if (!I->getOperand(i).isFI()) continue;
725
726        // When using ADDri to get the address of a stack object, 255 is the
727        // largest offset guaranteed to fit in the immediate offset.
728        if (I->getOpcode() == ARM::ADDri) {
729          Limit = std::min(Limit, (1U << 8) - 1);
730          break;
731        }
732
733        // Otherwise check the addressing mode.
734        switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
735        case ARMII::AddrMode3:
736        case ARMII::AddrModeT2_i8:
737          Limit = std::min(Limit, (1U << 8) - 1);
738          break;
739        case ARMII::AddrMode5:
740        case ARMII::AddrModeT2_i8s4:
741          Limit = std::min(Limit, ((1U << 8) - 1) * 4);
742          break;
743        case ARMII::AddrModeT2_i12:
744          // i12 supports only positive offset so these will be converted to
745          // i8 opcodes. See llvm::rewriteT2FrameIndex.
746          if (hasFP(MF) && AFI->hasStackFrame())
747            Limit = std::min(Limit, (1U << 8) - 1);
748          break;
749        case ARMII::AddrMode6:
750          // Addressing mode 6 (load/store) instructions can't encode an
751          // immediate offset for stack references.
752          return 0;
753        default:
754          break;
755        }
756        break; // At most one FI per instruction
757      }
758    }
759  }
760
761  return Limit;
762}
763
764static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
765                                       const ARMBaseInstrInfo &TII) {
766  unsigned FnSize = 0;
767  for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
768       MBBI != E; ++MBBI) {
769    const MachineBasicBlock &MBB = *MBBI;
770    for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
771         I != E; ++I)
772      FnSize += TII.GetInstSizeInBytes(I);
773  }
774  return FnSize;
775}
776
777void
778ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
779                                                       RegScavenger *RS) const {
780  // This tells PEI to spill the FP as if it is any other callee-save register
781  // to take advantage the eliminateFrameIndex machinery. This also ensures it
782  // is spilled in the order specified by getCalleeSavedRegs() to make it easier
783  // to combine multiple loads / stores.
784  bool CanEliminateFrame = true;
785  bool CS1Spilled = false;
786  bool LRSpilled = false;
787  unsigned NumGPRSpills = 0;
788  SmallVector<unsigned, 4> UnspilledCS1GPRs;
789  SmallVector<unsigned, 4> UnspilledCS2GPRs;
790  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
791  MachineFrameInfo *MFI = MF.getFrameInfo();
792
793  // Spill R4 if Thumb2 function requires stack realignment - it will be used as
794  // scratch register.
795  // FIXME: It will be better just to find spare register here.
796  if (needsStackRealignment(MF) &&
797      AFI->isThumb2Function())
798    MF.getRegInfo().setPhysRegUsed(ARM::R4);
799
800  // Spill LR if Thumb1 function uses variable length argument lists.
801  if (AFI->isThumb1OnlyFunction() && AFI->getVarArgsRegSaveSize() > 0)
802    MF.getRegInfo().setPhysRegUsed(ARM::LR);
803
804  // Spill the BasePtr if it's used.
805  if (hasBasePointer(MF))
806    MF.getRegInfo().setPhysRegUsed(BasePtr);
807
808  // Don't spill FP if the frame can be eliminated. This is determined
809  // by scanning the callee-save registers to see if any is used.
810  const unsigned *CSRegs = getCalleeSavedRegs();
811  for (unsigned i = 0; CSRegs[i]; ++i) {
812    unsigned Reg = CSRegs[i];
813    bool Spilled = false;
814    if (MF.getRegInfo().isPhysRegUsed(Reg)) {
815      AFI->setCSRegisterIsSpilled(Reg);
816      Spilled = true;
817      CanEliminateFrame = false;
818    } else {
819      // Check alias registers too.
820      for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
821        if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
822          Spilled = true;
823          CanEliminateFrame = false;
824        }
825      }
826    }
827
828    if (!ARM::GPRRegisterClass->contains(Reg))
829      continue;
830
831    if (Spilled) {
832      NumGPRSpills++;
833
834      if (!STI.isTargetDarwin()) {
835        if (Reg == ARM::LR)
836          LRSpilled = true;
837        CS1Spilled = true;
838        continue;
839      }
840
841      // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
842      switch (Reg) {
843      case ARM::LR:
844        LRSpilled = true;
845        // Fallthrough
846      case ARM::R4:
847      case ARM::R5:
848      case ARM::R6:
849      case ARM::R7:
850        CS1Spilled = true;
851        break;
852      default:
853        break;
854      }
855    } else {
856      if (!STI.isTargetDarwin()) {
857        UnspilledCS1GPRs.push_back(Reg);
858        continue;
859      }
860
861      switch (Reg) {
862      case ARM::R4:
863      case ARM::R5:
864      case ARM::R6:
865      case ARM::R7:
866      case ARM::LR:
867        UnspilledCS1GPRs.push_back(Reg);
868        break;
869      default:
870        UnspilledCS2GPRs.push_back(Reg);
871        break;
872      }
873    }
874  }
875
876  bool ForceLRSpill = false;
877  if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
878    unsigned FnSize = GetFunctionSizeInBytes(MF, TII);
879    // Force LR to be spilled if the Thumb function size is > 2048. This enables
880    // use of BL to implement far jump. If it turns out that it's not needed
881    // then the branch fix up path will undo it.
882    if (FnSize >= (1 << 11)) {
883      CanEliminateFrame = false;
884      ForceLRSpill = true;
885    }
886  }
887
888  // If any of the stack slot references may be out of range of an immediate
889  // offset, make sure a register (or a spill slot) is available for the
890  // register scavenger. Note that if we're indexing off the frame pointer, the
891  // effective stack size is 4 bytes larger since the FP points to the stack
892  // slot of the previous FP. Also, if we have variable sized objects in the
893  // function, stack slot references will often be negative, and some of
894  // our instructions are positive-offset only, so conservatively consider
895  // that case to want a spill slot (or register) as well. Similarly, if
896  // the function adjusts the stack pointer during execution and the
897  // adjustments aren't already part of our stack size estimate, our offset
898  // calculations may be off, so be conservative.
899  // FIXME: We could add logic to be more precise about negative offsets
900  //        and which instructions will need a scratch register for them. Is it
901  //        worth the effort and added fragility?
902  bool BigStack =
903    (RS &&
904     (estimateStackSize(MF) + ((hasFP(MF) && AFI->hasStackFrame()) ? 4:0) >=
905      estimateRSStackSizeLimit(MF)))
906    || MFI->hasVarSizedObjects()
907    || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF));
908
909  bool ExtraCSSpill = false;
910  if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
911    AFI->setHasStackFrame(true);
912
913    // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
914    // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
915    if (!LRSpilled && CS1Spilled) {
916      MF.getRegInfo().setPhysRegUsed(ARM::LR);
917      AFI->setCSRegisterIsSpilled(ARM::LR);
918      NumGPRSpills++;
919      UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
920                                    UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
921      ForceLRSpill = false;
922      ExtraCSSpill = true;
923    }
924
925    if (hasFP(MF)) {
926      MF.getRegInfo().setPhysRegUsed(FramePtr);
927      NumGPRSpills++;
928    }
929
930    // If stack and double are 8-byte aligned and we are spilling an odd number
931    // of GPRs. Spill one extra callee save GPR so we won't have to pad between
932    // the integer and double callee save areas.
933    unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
934    if (TargetAlign == 8 && (NumGPRSpills & 1)) {
935      if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
936        for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
937          unsigned Reg = UnspilledCS1GPRs[i];
938          // Don't spill high register if the function is thumb1
939          if (!AFI->isThumb1OnlyFunction() ||
940              isARMLowRegister(Reg) || Reg == ARM::LR) {
941            MF.getRegInfo().setPhysRegUsed(Reg);
942            AFI->setCSRegisterIsSpilled(Reg);
943            if (!isReservedReg(MF, Reg))
944              ExtraCSSpill = true;
945            break;
946          }
947        }
948      } else if (!UnspilledCS2GPRs.empty() &&
949                 !AFI->isThumb1OnlyFunction()) {
950        unsigned Reg = UnspilledCS2GPRs.front();
951        MF.getRegInfo().setPhysRegUsed(Reg);
952        AFI->setCSRegisterIsSpilled(Reg);
953        if (!isReservedReg(MF, Reg))
954          ExtraCSSpill = true;
955      }
956    }
957
958    // Estimate if we might need to scavenge a register at some point in order
959    // to materialize a stack offset. If so, either spill one additional
960    // callee-saved register or reserve a special spill slot to facilitate
961    // register scavenging. Thumb1 needs a spill slot for stack pointer
962    // adjustments also, even when the frame itself is small.
963    if (BigStack && !ExtraCSSpill) {
964      // If any non-reserved CS register isn't spilled, just spill one or two
965      // extra. That should take care of it!
966      unsigned NumExtras = TargetAlign / 4;
967      SmallVector<unsigned, 2> Extras;
968      while (NumExtras && !UnspilledCS1GPRs.empty()) {
969        unsigned Reg = UnspilledCS1GPRs.back();
970        UnspilledCS1GPRs.pop_back();
971        if (!isReservedReg(MF, Reg) &&
972            (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
973             Reg == ARM::LR)) {
974          Extras.push_back(Reg);
975          NumExtras--;
976        }
977      }
978      // For non-Thumb1 functions, also check for hi-reg CS registers
979      if (!AFI->isThumb1OnlyFunction()) {
980        while (NumExtras && !UnspilledCS2GPRs.empty()) {
981          unsigned Reg = UnspilledCS2GPRs.back();
982          UnspilledCS2GPRs.pop_back();
983          if (!isReservedReg(MF, Reg)) {
984            Extras.push_back(Reg);
985            NumExtras--;
986          }
987        }
988      }
989      if (Extras.size() && NumExtras == 0) {
990        for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
991          MF.getRegInfo().setPhysRegUsed(Extras[i]);
992          AFI->setCSRegisterIsSpilled(Extras[i]);
993        }
994      } else if (!AFI->isThumb1OnlyFunction()) {
995        // note: Thumb1 functions spill to R12, not the stack.  Reserve a slot
996        // closest to SP or frame pointer.
997        const TargetRegisterClass *RC = ARM::GPRRegisterClass;
998        RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
999                                                           RC->getAlignment(),
1000                                                           false));
1001      }
1002    }
1003  }
1004
1005  if (ForceLRSpill) {
1006    MF.getRegInfo().setPhysRegUsed(ARM::LR);
1007    AFI->setCSRegisterIsSpilled(ARM::LR);
1008    AFI->setLRIsSpilledForFarJump(true);
1009  }
1010}
1011
1012unsigned ARMBaseRegisterInfo::getRARegister() const {
1013  return ARM::LR;
1014}
1015
1016unsigned
1017ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1018  if (hasFP(MF))
1019    return FramePtr;
1020  return ARM::SP;
1021}
1022
1023// Provide a base+offset reference to an FI slot for debug info. It's the
1024// same as what we use for resolving the code-gen references for now.
1025// FIXME: This can go wrong when references are SP-relative and simple call
1026//        frames aren't used.
1027int
1028ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI,
1029                                            unsigned &FrameReg) const {
1030  return ResolveFrameIndexReference(MF, FI, FrameReg, 0);
1031}
1032
1033int
1034ARMBaseRegisterInfo::ResolveFrameIndexReference(const MachineFunction &MF,
1035                                                int FI,
1036                                                unsigned &FrameReg,
1037                                                int SPAdj) const {
1038  const MachineFrameInfo *MFI = MF.getFrameInfo();
1039  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1040  int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
1041  int FPOffset = Offset - AFI->getFramePtrSpillOffset();
1042  bool isFixed = MFI->isFixedObjectIndex(FI);
1043
1044  FrameReg = ARM::SP;
1045  Offset += SPAdj;
1046  if (AFI->isGPRCalleeSavedArea1Frame(FI))
1047    return Offset - AFI->getGPRCalleeSavedArea1Offset();
1048  else if (AFI->isGPRCalleeSavedArea2Frame(FI))
1049    return Offset - AFI->getGPRCalleeSavedArea2Offset();
1050  else if (AFI->isDPRCalleeSavedAreaFrame(FI))
1051    return Offset - AFI->getDPRCalleeSavedAreaOffset();
1052
1053  // When dynamically realigning the stack, use the frame pointer for
1054  // parameters, and the stack/base pointer for locals.
1055  if (needsStackRealignment(MF)) {
1056    assert (hasFP(MF) && "dynamic stack realignment without a FP!");
1057    if (isFixed) {
1058      FrameReg = getFrameRegister(MF);
1059      Offset = FPOffset;
1060    } else if (MFI->hasVarSizedObjects())
1061      FrameReg = BasePtr;
1062    return Offset;
1063  }
1064
1065  // If there is a frame pointer, use it when we can.
1066  if (hasFP(MF) && AFI->hasStackFrame()) {
1067    // Use frame pointer to reference fixed objects. Use it for locals if
1068    // there are VLAs (and thus the SP isn't reliable as a base).
1069    if (isFixed || (MFI->hasVarSizedObjects() && !hasBasePointer(MF))) {
1070      FrameReg = getFrameRegister(MF);
1071      Offset = FPOffset;
1072    } else if (MFI->hasVarSizedObjects()) {
1073      assert(hasBasePointer(MF) && "missing base pointer!");
1074      // Use the base register since we have it.
1075      FrameReg = BasePtr;
1076    } else if (AFI->isThumb2Function()) {
1077      // In Thumb2 mode, the negative offset is very limited. Try to avoid
1078      // out of range references.
1079      if (FPOffset >= -255 && FPOffset < 0) {
1080        FrameReg = getFrameRegister(MF);
1081        Offset = FPOffset;
1082      }
1083    } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) {
1084      // Otherwise, use SP or FP, whichever is closer to the stack slot.
1085      FrameReg = getFrameRegister(MF);
1086      Offset = FPOffset;
1087    }
1088  }
1089  // Use the base pointer if we have one.
1090  if (hasBasePointer(MF))
1091    FrameReg = BasePtr;
1092  return Offset;
1093}
1094
1095int
1096ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
1097                                         int FI) const {
1098  unsigned FrameReg;
1099  return getFrameIndexReference(MF, FI, FrameReg);
1100}
1101
1102unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
1103  llvm_unreachable("What is the exception register");
1104  return 0;
1105}
1106
1107unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
1108  llvm_unreachable("What is the exception handler register");
1109  return 0;
1110}
1111
1112int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
1113  return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
1114}
1115
1116unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
1117                                              const MachineFunction &MF) const {
1118  switch (Reg) {
1119  default: break;
1120  // Return 0 if either register of the pair is a special register.
1121  // So no R12, etc.
1122  case ARM::R1:
1123    return ARM::R0;
1124  case ARM::R3:
1125    return ARM::R2;
1126  case ARM::R5:
1127    return ARM::R4;
1128  case ARM::R7:
1129    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
1130      ? 0 : ARM::R6;
1131  case ARM::R9:
1132    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
1133  case ARM::R11:
1134    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
1135
1136  case ARM::S1:
1137    return ARM::S0;
1138  case ARM::S3:
1139    return ARM::S2;
1140  case ARM::S5:
1141    return ARM::S4;
1142  case ARM::S7:
1143    return ARM::S6;
1144  case ARM::S9:
1145    return ARM::S8;
1146  case ARM::S11:
1147    return ARM::S10;
1148  case ARM::S13:
1149    return ARM::S12;
1150  case ARM::S15:
1151    return ARM::S14;
1152  case ARM::S17:
1153    return ARM::S16;
1154  case ARM::S19:
1155    return ARM::S18;
1156  case ARM::S21:
1157    return ARM::S20;
1158  case ARM::S23:
1159    return ARM::S22;
1160  case ARM::S25:
1161    return ARM::S24;
1162  case ARM::S27:
1163    return ARM::S26;
1164  case ARM::S29:
1165    return ARM::S28;
1166  case ARM::S31:
1167    return ARM::S30;
1168
1169  case ARM::D1:
1170    return ARM::D0;
1171  case ARM::D3:
1172    return ARM::D2;
1173  case ARM::D5:
1174    return ARM::D4;
1175  case ARM::D7:
1176    return ARM::D6;
1177  case ARM::D9:
1178    return ARM::D8;
1179  case ARM::D11:
1180    return ARM::D10;
1181  case ARM::D13:
1182    return ARM::D12;
1183  case ARM::D15:
1184    return ARM::D14;
1185  case ARM::D17:
1186    return ARM::D16;
1187  case ARM::D19:
1188    return ARM::D18;
1189  case ARM::D21:
1190    return ARM::D20;
1191  case ARM::D23:
1192    return ARM::D22;
1193  case ARM::D25:
1194    return ARM::D24;
1195  case ARM::D27:
1196    return ARM::D26;
1197  case ARM::D29:
1198    return ARM::D28;
1199  case ARM::D31:
1200    return ARM::D30;
1201  }
1202
1203  return 0;
1204}
1205
1206unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
1207                                             const MachineFunction &MF) const {
1208  switch (Reg) {
1209  default: break;
1210  // Return 0 if either register of the pair is a special register.
1211  // So no R12, etc.
1212  case ARM::R0:
1213    return ARM::R1;
1214  case ARM::R2:
1215    return ARM::R3;
1216  case ARM::R4:
1217    return ARM::R5;
1218  case ARM::R6:
1219    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
1220      ? 0 : ARM::R7;
1221  case ARM::R8:
1222    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
1223  case ARM::R10:
1224    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
1225
1226  case ARM::S0:
1227    return ARM::S1;
1228  case ARM::S2:
1229    return ARM::S3;
1230  case ARM::S4:
1231    return ARM::S5;
1232  case ARM::S6:
1233    return ARM::S7;
1234  case ARM::S8:
1235    return ARM::S9;
1236  case ARM::S10:
1237    return ARM::S11;
1238  case ARM::S12:
1239    return ARM::S13;
1240  case ARM::S14:
1241    return ARM::S15;
1242  case ARM::S16:
1243    return ARM::S17;
1244  case ARM::S18:
1245    return ARM::S19;
1246  case ARM::S20:
1247    return ARM::S21;
1248  case ARM::S22:
1249    return ARM::S23;
1250  case ARM::S24:
1251    return ARM::S25;
1252  case ARM::S26:
1253    return ARM::S27;
1254  case ARM::S28:
1255    return ARM::S29;
1256  case ARM::S30:
1257    return ARM::S31;
1258
1259  case ARM::D0:
1260    return ARM::D1;
1261  case ARM::D2:
1262    return ARM::D3;
1263  case ARM::D4:
1264    return ARM::D5;
1265  case ARM::D6:
1266    return ARM::D7;
1267  case ARM::D8:
1268    return ARM::D9;
1269  case ARM::D10:
1270    return ARM::D11;
1271  case ARM::D12:
1272    return ARM::D13;
1273  case ARM::D14:
1274    return ARM::D15;
1275  case ARM::D16:
1276    return ARM::D17;
1277  case ARM::D18:
1278    return ARM::D19;
1279  case ARM::D20:
1280    return ARM::D21;
1281  case ARM::D22:
1282    return ARM::D23;
1283  case ARM::D24:
1284    return ARM::D25;
1285  case ARM::D26:
1286    return ARM::D27;
1287  case ARM::D28:
1288    return ARM::D29;
1289  case ARM::D30:
1290    return ARM::D31;
1291  }
1292
1293  return 0;
1294}
1295
1296/// emitLoadConstPool - Emits a load from constpool to materialize the
1297/// specified immediate.
1298void ARMBaseRegisterInfo::
1299emitLoadConstPool(MachineBasicBlock &MBB,
1300                  MachineBasicBlock::iterator &MBBI,
1301                  DebugLoc dl,
1302                  unsigned DestReg, unsigned SubIdx, int Val,
1303                  ARMCC::CondCodes Pred,
1304                  unsigned PredReg) const {
1305  MachineFunction &MF = *MBB.getParent();
1306  MachineConstantPool *ConstantPool = MF.getConstantPool();
1307  const Constant *C =
1308        ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
1309  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
1310
1311  BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
1312    .addReg(DestReg, getDefRegState(true), SubIdx)
1313    .addConstantPoolIndex(Idx)
1314    .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
1315}
1316
1317bool ARMBaseRegisterInfo::
1318requiresRegisterScavenging(const MachineFunction &MF) const {
1319  return true;
1320}
1321
1322bool ARMBaseRegisterInfo::
1323requiresFrameIndexScavenging(const MachineFunction &MF) const {
1324  return true;
1325}
1326
1327bool ARMBaseRegisterInfo::
1328requiresVirtualBaseRegisters(const MachineFunction &MF) const {
1329  return EnableLocalStackAlloc;
1330}
1331
1332// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
1333// not required, we reserve argument space for call sites in the function
1334// immediately on entry to the current function. This eliminates the need for
1335// add/sub sp brackets around call sites. Returns true if the call frame is
1336// included as part of the stack frame.
1337bool ARMBaseRegisterInfo::
1338hasReservedCallFrame(const MachineFunction &MF) const {
1339  const MachineFrameInfo *FFI = MF.getFrameInfo();
1340  unsigned CFSize = FFI->getMaxCallFrameSize();
1341  // It's not always a good idea to include the call frame as part of the
1342  // stack frame. ARM (especially Thumb) has small immediate offset to
1343  // address the stack frame. So a large call frame can cause poor codegen
1344  // and may even makes it impossible to scavenge a register.
1345  if (CFSize >= ((1 << 12) - 1) / 2)  // Half of imm12
1346    return false;
1347
1348  return !MF.getFrameInfo()->hasVarSizedObjects();
1349}
1350
1351// canSimplifyCallFramePseudos - If there is a reserved call frame, the
1352// call frame pseudos can be simplified. Unlike most targets, having a FP
1353// is not sufficient here since we still may reference some objects via SP
1354// even when FP is available in Thumb2 mode.
1355bool ARMBaseRegisterInfo::
1356canSimplifyCallFramePseudos(const MachineFunction &MF) const {
1357  return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
1358}
1359
1360static void
1361emitSPUpdate(bool isARM,
1362             MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
1363             DebugLoc dl, const ARMBaseInstrInfo &TII,
1364             int NumBytes,
1365             ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
1366  if (isARM)
1367    emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1368                            Pred, PredReg, TII);
1369  else
1370    emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1371                           Pred, PredReg, TII);
1372}
1373
1374
1375void ARMBaseRegisterInfo::
1376eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1377                              MachineBasicBlock::iterator I) const {
1378  if (!hasReservedCallFrame(MF)) {
1379    // If we have alloca, convert as follows:
1380    // ADJCALLSTACKDOWN -> sub, sp, sp, amount
1381    // ADJCALLSTACKUP   -> add, sp, sp, amount
1382    MachineInstr *Old = I;
1383    DebugLoc dl = Old->getDebugLoc();
1384    unsigned Amount = Old->getOperand(0).getImm();
1385    if (Amount != 0) {
1386      // We need to keep the stack aligned properly.  To do this, we round the
1387      // amount of space needed for the outgoing arguments up to the next
1388      // alignment boundary.
1389      unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1390      Amount = (Amount+Align-1)/Align*Align;
1391
1392      ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1393      assert(!AFI->isThumb1OnlyFunction() &&
1394             "This eliminateCallFramePseudoInstr does not support Thumb1!");
1395      bool isARM = !AFI->isThumbFunction();
1396
1397      // Replace the pseudo instruction with a new instruction...
1398      unsigned Opc = Old->getOpcode();
1399      int PIdx = Old->findFirstPredOperandIdx();
1400      ARMCC::CondCodes Pred = (PIdx == -1)
1401        ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
1402      if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
1403        // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
1404        unsigned PredReg = Old->getOperand(2).getReg();
1405        emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
1406      } else {
1407        // Note: PredReg is operand 3 for ADJCALLSTACKUP.
1408        unsigned PredReg = Old->getOperand(3).getReg();
1409        assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
1410        emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
1411      }
1412    }
1413  }
1414  MBB.erase(I);
1415}
1416
1417int64_t ARMBaseRegisterInfo::
1418getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
1419  const TargetInstrDesc &Desc = MI->getDesc();
1420  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1421  int64_t InstrOffs = 0;;
1422  int Scale = 1;
1423  unsigned ImmIdx = 0;
1424  switch (AddrMode) {
1425  case ARMII::AddrModeT2_i8:
1426  case ARMII::AddrModeT2_i12:
1427    // i8 supports only negative, and i12 supports only positive, so
1428    // based on Offset sign, consider the appropriate instruction
1429    InstrOffs = MI->getOperand(Idx+1).getImm();
1430    Scale = 1;
1431    break;
1432  case ARMII::AddrMode5: {
1433    // VFP address mode.
1434    const MachineOperand &OffOp = MI->getOperand(Idx+1);
1435    InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
1436    if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
1437      InstrOffs = -InstrOffs;
1438    Scale = 4;
1439    break;
1440  }
1441  case ARMII::AddrMode2: {
1442    ImmIdx = Idx+2;
1443    InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
1444    if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1445      InstrOffs = -InstrOffs;
1446    break;
1447  }
1448  case ARMII::AddrMode3: {
1449    ImmIdx = Idx+2;
1450    InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
1451    if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1452      InstrOffs = -InstrOffs;
1453    break;
1454  }
1455  case ARMII::AddrModeT1_s: {
1456    ImmIdx = Idx+1;
1457    InstrOffs = MI->getOperand(ImmIdx).getImm();
1458    Scale = 4;
1459    break;
1460  }
1461  default:
1462    llvm_unreachable("Unsupported addressing mode!");
1463    break;
1464  }
1465
1466  return InstrOffs * Scale;
1467}
1468
1469/// needsFrameBaseReg - Returns true if the instruction's frame index
1470/// reference would be better served by a base register other than FP
1471/// or SP. Used by LocalStackFrameAllocation to determine which frame index
1472/// references it should create new base registers for.
1473bool ARMBaseRegisterInfo::
1474needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1475  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
1476    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1477  }
1478
1479  // It's the load/store FI references that cause issues, as it can be difficult
1480  // to materialize the offset if it won't fit in the literal field. Estimate
1481  // based on the size of the local frame and some conservative assumptions
1482  // about the rest of the stack frame (note, this is pre-regalloc, so
1483  // we don't know everything for certain yet) whether this offset is likely
1484  // to be out of range of the immediate. Return true if so.
1485
1486  // We only generate virtual base registers for loads and stores, so
1487  // return false for everything else.
1488  unsigned Opc = MI->getOpcode();
1489  switch (Opc) {
1490  case ARM::LDR: case ARM::LDRH: case ARM::LDRB:
1491  case ARM::STR: case ARM::STRH: case ARM::STRB:
1492  case ARM::t2LDRi12: case ARM::t2LDRi8:
1493  case ARM::t2STRi12: case ARM::t2STRi8:
1494  case ARM::VLDRS: case ARM::VLDRD:
1495  case ARM::VSTRS: case ARM::VSTRD:
1496  case ARM::tSTRspi: case ARM::tLDRspi:
1497    if (ForceAllBaseRegAlloc)
1498      return true;
1499    break;
1500  default:
1501    return false;
1502  }
1503
1504  // Without a virtual base register, if the function has variable sized
1505  // objects, all fixed-size local references will be via the frame pointer,
1506  // Approximate the offset and see if it's legal for the instruction.
1507  // Note that the incoming offset is based on the SP value at function entry,
1508  // so it'll be negative.
1509  MachineFunction &MF = *MI->getParent()->getParent();
1510  MachineFrameInfo *MFI = MF.getFrameInfo();
1511  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1512
1513  // Estimate an offset from the frame pointer.
1514  // Conservatively assume all callee-saved registers get pushed. R4-R6
1515  // will be earlier than the FP, so we ignore those.
1516  // R7, LR
1517  int64_t FPOffset = Offset - 8;
1518  // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
1519  if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
1520    FPOffset -= 80;
1521  // Estimate an offset from the stack pointer.
1522  // The incoming offset is relating to the SP at the start of the function,
1523  // but when we access the local it'll be relative to the SP after local
1524  // allocation, so adjust our SP-relative offset by that allocation size.
1525  Offset = -Offset;
1526  Offset += MFI->getLocalFrameSize();
1527  // Assume that we'll have at least some spill slots allocated.
1528  // FIXME: This is a total SWAG number. We should run some statistics
1529  //        and pick a real one.
1530  Offset += 128; // 128 bytes of spill slots
1531
1532  // If there is a frame pointer, try using it.
1533  // The FP is only available if there is no dynamic realignment. We
1534  // don't know for sure yet whether we'll need that, so we guess based
1535  // on whether there are any local variables that would trigger it.
1536  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1537  if (hasFP(MF) &&
1538      !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1539    if (isFrameOffsetLegal(MI, FPOffset))
1540      return false;
1541  }
1542  // If we can reference via the stack pointer, try that.
1543  // FIXME: This (and the code that resolves the references) can be improved
1544  //        to only disallow SP relative references in the live range of
1545  //        the VLA(s). In practice, it's unclear how much difference that
1546  //        would make, but it may be worth doing.
1547  if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1548    return false;
1549
1550  // The offset likely isn't legal, we want to allocate a virtual base register.
1551  return true;
1552}
1553
1554/// materializeFrameBaseRegister - Insert defining instruction(s) for
1555/// BaseReg to be a pointer to FrameIdx before insertion point I.
1556void ARMBaseRegisterInfo::
1557materializeFrameBaseRegister(MachineBasicBlock::iterator I, unsigned BaseReg,
1558                             int FrameIdx, int64_t Offset) const {
1559  ARMFunctionInfo *AFI =
1560    I->getParent()->getParent()->getInfo<ARMFunctionInfo>();
1561  unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1562    (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1563
1564  MachineInstrBuilder MIB =
1565    BuildMI(*I->getParent(), I, I->getDebugLoc(), TII.get(ADDriOpc), BaseReg)
1566    .addFrameIndex(FrameIdx).addImm(Offset);
1567  if (!AFI->isThumb1OnlyFunction())
1568    AddDefaultCC(AddDefaultPred(MIB));
1569}
1570
1571void
1572ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1573                                       unsigned BaseReg, int64_t Offset) const {
1574  MachineInstr &MI = *I;
1575  MachineBasicBlock &MBB = *MI.getParent();
1576  MachineFunction &MF = *MBB.getParent();
1577  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1578  int Off = Offset; // ARM doesn't need the general 64-bit offsets
1579  unsigned i = 0;
1580
1581  assert(!AFI->isThumb1OnlyFunction() &&
1582         "This resolveFrameIndex does not support Thumb1!");
1583
1584  while (!MI.getOperand(i).isFI()) {
1585    ++i;
1586    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1587  }
1588  bool Done = false;
1589  if (!AFI->isThumbFunction())
1590    Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1591  else {
1592    assert(AFI->isThumb2Function());
1593    Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1594  }
1595  assert (Done && "Unable to resolve frame index!");
1596}
1597
1598bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1599                                             int64_t Offset) const {
1600  const TargetInstrDesc &Desc = MI->getDesc();
1601  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1602  unsigned i = 0;
1603
1604  while (!MI->getOperand(i).isFI()) {
1605    ++i;
1606    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1607  }
1608
1609  // AddrMode4 and AddrMode6 cannot handle any offset.
1610  if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1611    return Offset == 0;
1612
1613  unsigned NumBits = 0;
1614  unsigned Scale = 1;
1615  bool isSigned = true;
1616  switch (AddrMode) {
1617  case ARMII::AddrModeT2_i8:
1618  case ARMII::AddrModeT2_i12:
1619    // i8 supports only negative, and i12 supports only positive, so
1620    // based on Offset sign, consider the appropriate instruction
1621    Scale = 1;
1622    if (Offset < 0) {
1623      NumBits = 8;
1624      Offset = -Offset;
1625    } else {
1626      NumBits = 12;
1627    }
1628    break;
1629  case ARMII::AddrMode5:
1630    // VFP address mode.
1631    NumBits = 8;
1632    Scale = 4;
1633    break;
1634  case ARMII::AddrMode2:
1635    NumBits = 12;
1636    break;
1637  case ARMII::AddrMode3:
1638    NumBits = 8;
1639    break;
1640  case ARMII::AddrModeT1_s:
1641    NumBits = 5;
1642    Scale = 4;
1643    isSigned = false;
1644    break;
1645  default:
1646    llvm_unreachable("Unsupported addressing mode!");
1647    break;
1648  }
1649
1650  Offset += getFrameIndexInstrOffset(MI, i);
1651  // Make sure the offset is encodable for instructions that scale the
1652  // immediate.
1653  if ((Offset & (Scale-1)) != 0)
1654    return false;
1655
1656  if (isSigned && Offset < 0)
1657    Offset = -Offset;
1658
1659  unsigned Mask = (1 << NumBits) - 1;
1660  if ((unsigned)Offset <= Mask * Scale)
1661    return true;
1662
1663  return false;
1664}
1665
1666void
1667ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1668                                         int SPAdj, RegScavenger *RS) const {
1669  unsigned i = 0;
1670  MachineInstr &MI = *II;
1671  MachineBasicBlock &MBB = *MI.getParent();
1672  MachineFunction &MF = *MBB.getParent();
1673  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1674  assert(!AFI->isThumb1OnlyFunction() &&
1675         "This eliminateFrameIndex does not support Thumb1!");
1676
1677  while (!MI.getOperand(i).isFI()) {
1678    ++i;
1679    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1680  }
1681
1682  int FrameIndex = MI.getOperand(i).getIndex();
1683  unsigned FrameReg;
1684
1685  int Offset = ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1686
1687  // Special handling of dbg_value instructions.
1688  if (MI.isDebugValue()) {
1689    MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1690    MI.getOperand(i+1).ChangeToImmediate(Offset);
1691    return;
1692  }
1693
1694  // Modify MI as necessary to handle as much of 'Offset' as possible
1695  bool Done = false;
1696  if (!AFI->isThumbFunction())
1697    Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1698  else {
1699    assert(AFI->isThumb2Function());
1700    Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1701  }
1702  if (Done)
1703    return;
1704
1705  // If we get here, the immediate doesn't fit into the instruction.  We folded
1706  // as much as possible above, handle the rest, providing a register that is
1707  // SP+LargeImm.
1708  assert((Offset ||
1709          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1710          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1711         "This code isn't needed if offset already handled!");
1712
1713  unsigned ScratchReg = 0;
1714  int PIdx = MI.findFirstPredOperandIdx();
1715  ARMCC::CondCodes Pred = (PIdx == -1)
1716    ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1717  unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1718  if (Offset == 0)
1719    // Must be addrmode4/6.
1720    MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1721  else {
1722    ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1723    if (!AFI->isThumbFunction())
1724      emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1725                              Offset, Pred, PredReg, TII);
1726    else {
1727      assert(AFI->isThumb2Function());
1728      emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1729                             Offset, Pred, PredReg, TII);
1730    }
1731    MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1732  }
1733}
1734
1735/// Move iterator past the next bunch of callee save load / store ops for
1736/// the particular spill area (1: integer area 1, 2: integer area 2,
1737/// 3: fp area, 0: don't care).
1738static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
1739                                   MachineBasicBlock::iterator &MBBI,
1740                                   int Opc1, int Opc2, unsigned Area,
1741                                   const ARMSubtarget &STI) {
1742  while (MBBI != MBB.end() &&
1743         ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) &&
1744         MBBI->getOperand(1).isFI()) {
1745    if (Area != 0) {
1746      bool Done = false;
1747      unsigned Category = 0;
1748      switch (MBBI->getOperand(0).getReg()) {
1749      case ARM::R4:  case ARM::R5:  case ARM::R6: case ARM::R7:
1750      case ARM::LR:
1751        Category = 1;
1752        break;
1753      case ARM::R8:  case ARM::R9:  case ARM::R10: case ARM::R11:
1754        Category = STI.isTargetDarwin() ? 2 : 1;
1755        break;
1756      case ARM::D8:  case ARM::D9:  case ARM::D10: case ARM::D11:
1757      case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
1758        Category = 3;
1759        break;
1760      default:
1761        Done = true;
1762        break;
1763      }
1764      if (Done || Category != Area)
1765        break;
1766    }
1767
1768    ++MBBI;
1769  }
1770}
1771
1772void ARMBaseRegisterInfo::
1773emitPrologue(MachineFunction &MF) const {
1774  MachineBasicBlock &MBB = MF.front();
1775  MachineBasicBlock::iterator MBBI = MBB.begin();
1776  MachineFrameInfo  *MFI = MF.getFrameInfo();
1777  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1778  assert(!AFI->isThumb1OnlyFunction() &&
1779         "This emitPrologue does not support Thumb1!");
1780  bool isARM = !AFI->isThumbFunction();
1781  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1782  unsigned NumBytes = MFI->getStackSize();
1783  const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1784  DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1785
1786  // Determine the sizes of each callee-save spill areas and record which frame
1787  // belongs to which callee-save spill areas.
1788  unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
1789  int FramePtrSpillFI = 0;
1790
1791  // Allocate the vararg register save area. This is not counted in NumBytes.
1792  if (VARegSaveSize)
1793    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
1794
1795  if (!AFI->hasStackFrame()) {
1796    if (NumBytes != 0)
1797      emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1798    return;
1799  }
1800
1801  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1802    unsigned Reg = CSI[i].getReg();
1803    int FI = CSI[i].getFrameIdx();
1804    switch (Reg) {
1805    case ARM::R4:
1806    case ARM::R5:
1807    case ARM::R6:
1808    case ARM::R7:
1809    case ARM::LR:
1810      if (Reg == FramePtr)
1811        FramePtrSpillFI = FI;
1812      AFI->addGPRCalleeSavedArea1Frame(FI);
1813      GPRCS1Size += 4;
1814      break;
1815    case ARM::R8:
1816    case ARM::R9:
1817    case ARM::R10:
1818    case ARM::R11:
1819      if (Reg == FramePtr)
1820        FramePtrSpillFI = FI;
1821      if (STI.isTargetDarwin()) {
1822        AFI->addGPRCalleeSavedArea2Frame(FI);
1823        GPRCS2Size += 4;
1824      } else {
1825        AFI->addGPRCalleeSavedArea1Frame(FI);
1826        GPRCS1Size += 4;
1827      }
1828      break;
1829    default:
1830      AFI->addDPRCalleeSavedAreaFrame(FI);
1831      DPRCSSize += 8;
1832    }
1833  }
1834
1835  // Build the new SUBri to adjust SP for integer callee-save spill area 1.
1836  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size);
1837  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI);
1838
1839  // Set FP to point to the stack slot that contains the previous FP.
1840  // For Darwin, FP is R7, which has now been stored in spill area 1.
1841  // Otherwise, if this is not Darwin, all the callee-saved registers go
1842  // into spill area 1, including the FP in R11.  In either case, it is
1843  // now safe to emit this assignment.
1844  bool HasFP = hasFP(MF);
1845  if (HasFP) {
1846    unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
1847    MachineInstrBuilder MIB =
1848      BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
1849      .addFrameIndex(FramePtrSpillFI).addImm(0);
1850    AddDefaultCC(AddDefaultPred(MIB));
1851  }
1852
1853  // Build the new SUBri to adjust SP for integer callee-save spill area 2.
1854  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size);
1855
1856  // Build the new SUBri to adjust SP for FP callee-save spill area.
1857  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI);
1858  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize);
1859
1860  // Determine starting offsets of spill areas.
1861  unsigned DPRCSOffset  = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
1862  unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
1863  unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
1864  if (HasFP)
1865    AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
1866                                NumBytes);
1867  AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
1868  AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
1869  AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
1870
1871  movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI);
1872  NumBytes = DPRCSOffset;
1873  if (NumBytes) {
1874    // Adjust SP after all the callee-save spills.
1875    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1876    if (HasFP)
1877      AFI->setShouldRestoreSPFromFP(true);
1878  }
1879
1880  if (STI.isTargetELF() && hasFP(MF)) {
1881    MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
1882                             AFI->getFramePtrSpillOffset());
1883    AFI->setShouldRestoreSPFromFP(true);
1884  }
1885
1886  AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
1887  AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
1888  AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
1889
1890  // If we need dynamic stack realignment, do it here.
1891  if (needsStackRealignment(MF)) {
1892    unsigned MaxAlign = MFI->getMaxAlignment();
1893    assert (!AFI->isThumb1OnlyFunction());
1894    if (!AFI->isThumbFunction()) {
1895      // Emit bic sp, sp, MaxAlign
1896      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1897                                          TII.get(ARM::BICri), ARM::SP)
1898                                  .addReg(ARM::SP, RegState::Kill)
1899                                  .addImm(MaxAlign-1)));
1900    } else {
1901      // We cannot use sp as source/dest register here, thus we're emitting the
1902      // following sequence:
1903      // mov r4, sp
1904      // bic r4, r4, MaxAlign
1905      // mov sp, r4
1906      // FIXME: It will be better just to find spare register here.
1907      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
1908        .addReg(ARM::SP, RegState::Kill);
1909      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1910                                          TII.get(ARM::t2BICri), ARM::R4)
1911                                  .addReg(ARM::R4, RegState::Kill)
1912                                  .addImm(MaxAlign-1)));
1913      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
1914        .addReg(ARM::R4, RegState::Kill);
1915    }
1916
1917    AFI->setShouldRestoreSPFromFP(true);
1918  }
1919
1920  // If we need a base pointer, set it up here. It's whatever the value
1921  // of the stack pointer is at this point. Any variable size objects
1922  // will be allocated after this, so we can still use the base pointer
1923  // to reference locals.
1924  if (hasBasePointer(MF)) {
1925    if (isARM)
1926      BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), BasePtr)
1927        .addReg(ARM::SP)
1928        .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
1929    else
1930      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), BasePtr)
1931        .addReg(ARM::SP);
1932  }
1933
1934  // If the frame has variable sized objects then the epilogue must restore
1935  // the sp from fp.
1936  if (!AFI->shouldRestoreSPFromFP() && MFI->hasVarSizedObjects())
1937    AFI->setShouldRestoreSPFromFP(true);
1938}
1939
1940static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
1941  for (unsigned i = 0; CSRegs[i]; ++i)
1942    if (Reg == CSRegs[i])
1943      return true;
1944  return false;
1945}
1946
1947static bool isCSRestore(MachineInstr *MI,
1948                        const ARMBaseInstrInfo &TII,
1949                        const unsigned *CSRegs) {
1950  return ((MI->getOpcode() == (int)ARM::VLDRD ||
1951           MI->getOpcode() == (int)ARM::LDR ||
1952           MI->getOpcode() == (int)ARM::t2LDRi12) &&
1953          MI->getOperand(1).isFI() &&
1954          isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
1955}
1956
1957void ARMBaseRegisterInfo::
1958emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
1959  MachineBasicBlock::iterator MBBI = prior(MBB.end());
1960  assert(MBBI->getDesc().isReturn() &&
1961         "Can only insert epilog into returning blocks");
1962  unsigned RetOpcode = MBBI->getOpcode();
1963  DebugLoc dl = MBBI->getDebugLoc();
1964  MachineFrameInfo *MFI = MF.getFrameInfo();
1965  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1966  assert(!AFI->isThumb1OnlyFunction() &&
1967         "This emitEpilogue does not support Thumb1!");
1968  bool isARM = !AFI->isThumbFunction();
1969
1970  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1971  int NumBytes = (int)MFI->getStackSize();
1972
1973  if (!AFI->hasStackFrame()) {
1974    if (NumBytes != 0)
1975      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1976  } else {
1977    // Unwind MBBI to point to first LDR / VLDRD.
1978    const unsigned *CSRegs = getCalleeSavedRegs();
1979    if (MBBI != MBB.begin()) {
1980      do
1981        --MBBI;
1982      while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
1983      if (!isCSRestore(MBBI, TII, CSRegs))
1984        ++MBBI;
1985    }
1986
1987    // Move SP to start of FP callee save spill area.
1988    NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
1989                 AFI->getGPRCalleeSavedArea2Size() +
1990                 AFI->getDPRCalleeSavedAreaSize());
1991
1992    // Reset SP based on frame pointer only if the stack frame extends beyond
1993    // frame pointer stack slot or target is ELF and the function has FP.
1994    if (AFI->shouldRestoreSPFromFP()) {
1995      NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
1996      if (NumBytes) {
1997        if (isARM)
1998          emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
1999                                  ARMCC::AL, 0, TII);
2000        else
2001          emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
2002                                 ARMCC::AL, 0, TII);
2003      } else {
2004        // Thumb2 or ARM.
2005        if (isARM)
2006          BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
2007            .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
2008        else
2009          BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
2010            .addReg(FramePtr);
2011      }
2012    } else if (NumBytes)
2013      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
2014
2015    // Move SP to start of integer callee save spill area 2.
2016    movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI);
2017    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize());
2018
2019    // Move SP to start of integer callee save spill area 1.
2020    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI);
2021    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size());
2022
2023    // Move SP to SP upon entry to the function.
2024    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI);
2025    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
2026  }
2027
2028  if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
2029      RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
2030    // Tail call return: adjust the stack pointer and jump to callee.
2031    MBBI = prior(MBB.end());
2032    MachineOperand &JumpTarget = MBBI->getOperand(0);
2033
2034    // Jump to label or value in register.
2035    if (RetOpcode == ARM::TCRETURNdi) {
2036      BuildMI(MBB, MBBI, dl,
2037            TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)).
2038        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
2039                         JumpTarget.getTargetFlags());
2040    } else if (RetOpcode == ARM::TCRETURNdiND) {
2041      BuildMI(MBB, MBBI, dl,
2042            TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
2043        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
2044                         JumpTarget.getTargetFlags());
2045    } else if (RetOpcode == ARM::TCRETURNri) {
2046      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
2047        addReg(JumpTarget.getReg(), RegState::Kill);
2048    } else if (RetOpcode == ARM::TCRETURNriND) {
2049      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
2050        addReg(JumpTarget.getReg(), RegState::Kill);
2051    }
2052
2053    MachineInstr *NewMI = prior(MBBI);
2054    for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
2055      NewMI->addOperand(MBBI->getOperand(i));
2056
2057    // Delete the pseudo instruction TCRETURN.
2058    MBB.erase(MBBI);
2059  }
2060
2061  if (VARegSaveSize)
2062    emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
2063}
2064
2065#include "ARMGenRegisterInfo.inc"
2066