ARMBaseRegisterInfo.cpp revision 951f699afb0872bec605a3d0e84c41cddcadf7b4
1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the base ARM implementation of TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARM.h"
15#include "ARMAddressingModes.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMBaseRegisterInfo.h"
18#include "ARMInstrInfo.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMSubtarget.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/LLVMContext.h"
25#include "llvm/CodeGen/MachineConstantPool.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineLocation.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/RegisterScavenging.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetFrameInfo.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetOptions.h"
38#include "llvm/ADT/BitVector.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/Support/CommandLine.h"
41
42namespace llvm {
43static cl::opt<bool>
44ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
45          cl::desc("Force use of virtual base registers for stack load/store"));
46static cl::opt<bool>
47EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
48          cl::desc("Enable pre-regalloc stack frame index allocation"));
49}
50
51using namespace llvm;
52
53static cl::opt<bool>
54EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(false),
55          cl::desc("Enable use of a base pointer for complex stack frames"));
56
57unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned RegEnum,
58                                                   bool *isSPVFP) {
59  if (isSPVFP)
60    *isSPVFP = false;
61
62  using namespace ARM;
63  switch (RegEnum) {
64  default:
65    llvm_unreachable("Unknown ARM register!");
66  case R0:  case D0:  case Q0:  return 0;
67  case R1:  case D1:  case Q1:  return 1;
68  case R2:  case D2:  case Q2:  return 2;
69  case R3:  case D3:  case Q3:  return 3;
70  case R4:  case D4:  case Q4:  return 4;
71  case R5:  case D5:  case Q5:  return 5;
72  case R6:  case D6:  case Q6:  return 6;
73  case R7:  case D7:  case Q7:  return 7;
74  case R8:  case D8:  case Q8:  return 8;
75  case R9:  case D9:  case Q9:  return 9;
76  case R10: case D10: case Q10: return 10;
77  case R11: case D11: case Q11: return 11;
78  case R12: case D12: case Q12: return 12;
79  case SP:  case D13: case Q13: return 13;
80  case LR:  case D14: case Q14: return 14;
81  case PC:  case D15: case Q15: return 15;
82
83  case D16: return 16;
84  case D17: return 17;
85  case D18: return 18;
86  case D19: return 19;
87  case D20: return 20;
88  case D21: return 21;
89  case D22: return 22;
90  case D23: return 23;
91  case D24: return 24;
92  case D25: return 25;
93  case D26: return 26;
94  case D27: return 27;
95  case D28: return 28;
96  case D29: return 29;
97  case D30: return 30;
98  case D31: return 31;
99
100  case S0: case S1: case S2: case S3:
101  case S4: case S5: case S6: case S7:
102  case S8: case S9: case S10: case S11:
103  case S12: case S13: case S14: case S15:
104  case S16: case S17: case S18: case S19:
105  case S20: case S21: case S22: case S23:
106  case S24: case S25: case S26: case S27:
107  case S28: case S29: case S30: case S31: {
108    if (isSPVFP)
109      *isSPVFP = true;
110    switch (RegEnum) {
111    default: return 0; // Avoid compile time warning.
112    case S0: return 0;
113    case S1: return 1;
114    case S2: return 2;
115    case S3: return 3;
116    case S4: return 4;
117    case S5: return 5;
118    case S6: return 6;
119    case S7: return 7;
120    case S8: return 8;
121    case S9: return 9;
122    case S10: return 10;
123    case S11: return 11;
124    case S12: return 12;
125    case S13: return 13;
126    case S14: return 14;
127    case S15: return 15;
128    case S16: return 16;
129    case S17: return 17;
130    case S18: return 18;
131    case S19: return 19;
132    case S20: return 20;
133    case S21: return 21;
134    case S22: return 22;
135    case S23: return 23;
136    case S24: return 24;
137    case S25: return 25;
138    case S26: return 26;
139    case S27: return 27;
140    case S28: return 28;
141    case S29: return 29;
142    case S30: return 30;
143    case S31: return 31;
144    }
145  }
146  }
147}
148
149ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
150                                         const ARMSubtarget &sti)
151  : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
152    TII(tii), STI(sti),
153    FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
154    BasePtr(ARM::R6) {
155}
156
157const unsigned*
158ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
159  static const unsigned CalleeSavedRegs[] = {
160    ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
161    ARM::R7, ARM::R6,  ARM::R5,  ARM::R4,
162
163    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
164    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
165    0
166  };
167
168  static const unsigned DarwinCalleeSavedRegs[] = {
169    // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
170    // register.
171    ARM::LR,  ARM::R7,  ARM::R6, ARM::R5, ARM::R4,
172    ARM::R11, ARM::R10, ARM::R8,
173
174    ARM::D15, ARM::D14, ARM::D13, ARM::D12,
175    ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
176    0
177  };
178  return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
179}
180
181BitVector ARMBaseRegisterInfo::
182getReservedRegs(const MachineFunction &MF) const {
183  // FIXME: avoid re-calculating this everytime.
184  BitVector Reserved(getNumRegs());
185  Reserved.set(ARM::SP);
186  Reserved.set(ARM::PC);
187  Reserved.set(ARM::FPSCR);
188  if (hasFP(MF))
189    Reserved.set(FramePtr);
190  if (hasBasePointer(MF))
191    Reserved.set(BasePtr);
192  // Some targets reserve R9.
193  if (STI.isR9Reserved())
194    Reserved.set(ARM::R9);
195  return Reserved;
196}
197
198bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
199                                        unsigned Reg) const {
200  switch (Reg) {
201  default: break;
202  case ARM::SP:
203  case ARM::PC:
204    return true;
205  case ARM::R6:
206    if (hasBasePointer(MF))
207      return true;
208    break;
209  case ARM::R7:
210  case ARM::R11:
211    if (FramePtr == Reg && hasFP(MF))
212      return true;
213    break;
214  case ARM::R9:
215    return STI.isR9Reserved();
216  }
217
218  return false;
219}
220
221const TargetRegisterClass *
222ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
223                                              const TargetRegisterClass *B,
224                                              unsigned SubIdx) const {
225  switch (SubIdx) {
226  default: return 0;
227  case ARM::ssub_0:
228  case ARM::ssub_1:
229  case ARM::ssub_2:
230  case ARM::ssub_3: {
231    // S sub-registers.
232    if (A->getSize() == 8) {
233      if (B == &ARM::SPR_8RegClass)
234        return &ARM::DPR_8RegClass;
235      assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
236      if (A == &ARM::DPR_8RegClass)
237        return A;
238      return &ARM::DPR_VFP2RegClass;
239    }
240
241    if (A->getSize() == 16) {
242      if (B == &ARM::SPR_8RegClass)
243        return &ARM::QPR_8RegClass;
244      return &ARM::QPR_VFP2RegClass;
245    }
246
247    if (A->getSize() == 32) {
248      if (B == &ARM::SPR_8RegClass)
249        return 0;  // Do not allow coalescing!
250      return &ARM::QQPR_VFP2RegClass;
251    }
252
253    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
254    return 0;  // Do not allow coalescing!
255  }
256  case ARM::dsub_0:
257  case ARM::dsub_1:
258  case ARM::dsub_2:
259  case ARM::dsub_3: {
260    // D sub-registers.
261    if (A->getSize() == 16) {
262      if (B == &ARM::DPR_VFP2RegClass)
263        return &ARM::QPR_VFP2RegClass;
264      if (B == &ARM::DPR_8RegClass)
265        return 0;  // Do not allow coalescing!
266      return A;
267    }
268
269    if (A->getSize() == 32) {
270      if (B == &ARM::DPR_VFP2RegClass)
271        return &ARM::QQPR_VFP2RegClass;
272      if (B == &ARM::DPR_8RegClass)
273        return 0;  // Do not allow coalescing!
274      return A;
275    }
276
277    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
278    if (B != &ARM::DPRRegClass)
279      return 0;  // Do not allow coalescing!
280    return A;
281  }
282  case ARM::dsub_4:
283  case ARM::dsub_5:
284  case ARM::dsub_6:
285  case ARM::dsub_7: {
286    // D sub-registers of QQQQ registers.
287    if (A->getSize() == 64 && B == &ARM::DPRRegClass)
288      return A;
289    return 0;  // Do not allow coalescing!
290  }
291
292  case ARM::qsub_0:
293  case ARM::qsub_1: {
294    // Q sub-registers.
295    if (A->getSize() == 32) {
296      if (B == &ARM::QPR_VFP2RegClass)
297        return &ARM::QQPR_VFP2RegClass;
298      if (B == &ARM::QPR_8RegClass)
299        return 0;  // Do not allow coalescing!
300      return A;
301    }
302
303    assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
304    if (B == &ARM::QPRRegClass)
305      return A;
306    return 0;  // Do not allow coalescing!
307  }
308  case ARM::qsub_2:
309  case ARM::qsub_3: {
310    // Q sub-registers of QQQQ registers.
311    if (A->getSize() == 64 && B == &ARM::QPRRegClass)
312      return A;
313    return 0;  // Do not allow coalescing!
314  }
315  }
316  return 0;
317}
318
319bool
320ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
321                                          SmallVectorImpl<unsigned> &SubIndices,
322                                          unsigned &NewSubIdx) const {
323
324  unsigned Size = RC->getSize() * 8;
325  if (Size < 6)
326    return 0;
327
328  NewSubIdx = 0;  // Whole register.
329  unsigned NumRegs = SubIndices.size();
330  if (NumRegs == 8) {
331    // 8 D registers -> 1 QQQQ register.
332    return (Size == 512 &&
333            SubIndices[0] == ARM::dsub_0 &&
334            SubIndices[1] == ARM::dsub_1 &&
335            SubIndices[2] == ARM::dsub_2 &&
336            SubIndices[3] == ARM::dsub_3 &&
337            SubIndices[4] == ARM::dsub_4 &&
338            SubIndices[5] == ARM::dsub_5 &&
339            SubIndices[6] == ARM::dsub_6 &&
340            SubIndices[7] == ARM::dsub_7);
341  } else if (NumRegs == 4) {
342    if (SubIndices[0] == ARM::qsub_0) {
343      // 4 Q registers -> 1 QQQQ register.
344      return (Size == 512 &&
345              SubIndices[1] == ARM::qsub_1 &&
346              SubIndices[2] == ARM::qsub_2 &&
347              SubIndices[3] == ARM::qsub_3);
348    } else if (SubIndices[0] == ARM::dsub_0) {
349      // 4 D registers -> 1 QQ register.
350      if (Size >= 256 &&
351          SubIndices[1] == ARM::dsub_1 &&
352          SubIndices[2] == ARM::dsub_2 &&
353          SubIndices[3] == ARM::dsub_3) {
354        if (Size == 512)
355          NewSubIdx = ARM::qqsub_0;
356        return true;
357      }
358    } else if (SubIndices[0] == ARM::dsub_4) {
359      // 4 D registers -> 1 QQ register (2nd).
360      if (Size == 512 &&
361          SubIndices[1] == ARM::dsub_5 &&
362          SubIndices[2] == ARM::dsub_6 &&
363          SubIndices[3] == ARM::dsub_7) {
364        NewSubIdx = ARM::qqsub_1;
365        return true;
366      }
367    } else if (SubIndices[0] == ARM::ssub_0) {
368      // 4 S registers -> 1 Q register.
369      if (Size >= 128 &&
370          SubIndices[1] == ARM::ssub_1 &&
371          SubIndices[2] == ARM::ssub_2 &&
372          SubIndices[3] == ARM::ssub_3) {
373        if (Size >= 256)
374          NewSubIdx = ARM::qsub_0;
375        return true;
376      }
377    }
378  } else if (NumRegs == 2) {
379    if (SubIndices[0] == ARM::qsub_0) {
380      // 2 Q registers -> 1 QQ register.
381      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
382        if (Size == 512)
383          NewSubIdx = ARM::qqsub_0;
384        return true;
385      }
386    } else if (SubIndices[0] == ARM::qsub_2) {
387      // 2 Q registers -> 1 QQ register (2nd).
388      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
389        NewSubIdx = ARM::qqsub_1;
390        return true;
391      }
392    } else if (SubIndices[0] == ARM::dsub_0) {
393      // 2 D registers -> 1 Q register.
394      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
395        if (Size >= 256)
396          NewSubIdx = ARM::qsub_0;
397        return true;
398      }
399    } else if (SubIndices[0] == ARM::dsub_2) {
400      // 2 D registers -> 1 Q register (2nd).
401      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
402        NewSubIdx = ARM::qsub_1;
403        return true;
404      }
405    } else if (SubIndices[0] == ARM::dsub_4) {
406      // 2 D registers -> 1 Q register (3rd).
407      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
408        NewSubIdx = ARM::qsub_2;
409        return true;
410      }
411    } else if (SubIndices[0] == ARM::dsub_6) {
412      // 2 D registers -> 1 Q register (3rd).
413      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
414        NewSubIdx = ARM::qsub_3;
415        return true;
416      }
417    } else if (SubIndices[0] == ARM::ssub_0) {
418      // 2 S registers -> 1 D register.
419      if (SubIndices[1] == ARM::ssub_1) {
420        if (Size >= 128)
421          NewSubIdx = ARM::dsub_0;
422        return true;
423      }
424    } else if (SubIndices[0] == ARM::ssub_2) {
425      // 2 S registers -> 1 D register (2nd).
426      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
427        NewSubIdx = ARM::dsub_1;
428        return true;
429      }
430    }
431  }
432  return false;
433}
434
435
436const TargetRegisterClass *
437ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
438  return ARM::GPRRegisterClass;
439}
440
441/// getAllocationOrder - Returns the register allocation order for a specified
442/// register class in the form of a pair of TargetRegisterClass iterators.
443std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
444ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
445                                        unsigned HintType, unsigned HintReg,
446                                        const MachineFunction &MF) const {
447  // Alternative register allocation orders when favoring even / odd registers
448  // of register pairs.
449
450  // No FP, R9 is available.
451  static const unsigned GPREven1[] = {
452    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
453    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
454    ARM::R9, ARM::R11
455  };
456  static const unsigned GPROdd1[] = {
457    ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
458    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
459    ARM::R8, ARM::R10
460  };
461
462  // FP is R7, R9 is available.
463  static const unsigned GPREven2[] = {
464    ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
465    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
466    ARM::R9, ARM::R11
467  };
468  static const unsigned GPROdd2[] = {
469    ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
470    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
471    ARM::R8, ARM::R10
472  };
473
474  // FP is R11, R9 is available.
475  static const unsigned GPREven3[] = {
476    ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
477    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
478    ARM::R9
479  };
480  static const unsigned GPROdd3[] = {
481    ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
482    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
483    ARM::R8
484  };
485
486  // No FP, R9 is not available.
487  static const unsigned GPREven4[] = {
488    ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
489    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
490    ARM::R11
491  };
492  static const unsigned GPROdd4[] = {
493    ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
494    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
495    ARM::R10
496  };
497
498  // FP is R7, R9 is not available.
499  static const unsigned GPREven5[] = {
500    ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
501    ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
502    ARM::R11
503  };
504  static const unsigned GPROdd5[] = {
505    ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
506    ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
507    ARM::R10
508  };
509
510  // FP is R11, R9 is not available.
511  static const unsigned GPREven6[] = {
512    ARM::R0, ARM::R2, ARM::R4, ARM::R6,
513    ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
514  };
515  static const unsigned GPROdd6[] = {
516    ARM::R1, ARM::R3, ARM::R5, ARM::R7,
517    ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
518  };
519
520
521  if (HintType == ARMRI::RegPairEven) {
522    if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
523      // It's no longer possible to fulfill this hint. Return the default
524      // allocation order.
525      return std::make_pair(RC->allocation_order_begin(MF),
526                            RC->allocation_order_end(MF));
527
528    if (!hasFP(MF)) {
529      if (!STI.isR9Reserved())
530        return std::make_pair(GPREven1,
531                              GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
532      else
533        return std::make_pair(GPREven4,
534                              GPREven4 + (sizeof(GPREven4)/sizeof(unsigned)));
535    } else if (FramePtr == ARM::R7) {
536      if (!STI.isR9Reserved())
537        return std::make_pair(GPREven2,
538                              GPREven2 + (sizeof(GPREven2)/sizeof(unsigned)));
539      else
540        return std::make_pair(GPREven5,
541                              GPREven5 + (sizeof(GPREven5)/sizeof(unsigned)));
542    } else { // FramePtr == ARM::R11
543      if (!STI.isR9Reserved())
544        return std::make_pair(GPREven3,
545                              GPREven3 + (sizeof(GPREven3)/sizeof(unsigned)));
546      else
547        return std::make_pair(GPREven6,
548                              GPREven6 + (sizeof(GPREven6)/sizeof(unsigned)));
549    }
550  } else if (HintType == ARMRI::RegPairOdd) {
551    if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
552      // It's no longer possible to fulfill this hint. Return the default
553      // allocation order.
554      return std::make_pair(RC->allocation_order_begin(MF),
555                            RC->allocation_order_end(MF));
556
557    if (!hasFP(MF)) {
558      if (!STI.isR9Reserved())
559        return std::make_pair(GPROdd1,
560                              GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
561      else
562        return std::make_pair(GPROdd4,
563                              GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned)));
564    } else if (FramePtr == ARM::R7) {
565      if (!STI.isR9Reserved())
566        return std::make_pair(GPROdd2,
567                              GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned)));
568      else
569        return std::make_pair(GPROdd5,
570                              GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned)));
571    } else { // FramePtr == ARM::R11
572      if (!STI.isR9Reserved())
573        return std::make_pair(GPROdd3,
574                              GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned)));
575      else
576        return std::make_pair(GPROdd6,
577                              GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned)));
578    }
579  }
580  return std::make_pair(RC->allocation_order_begin(MF),
581                        RC->allocation_order_end(MF));
582}
583
584/// ResolveRegAllocHint - Resolves the specified register allocation hint
585/// to a physical register. Returns the physical register if it is successful.
586unsigned
587ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
588                                         const MachineFunction &MF) const {
589  if (Reg == 0 || !isPhysicalRegister(Reg))
590    return 0;
591  if (Type == 0)
592    return Reg;
593  else if (Type == (unsigned)ARMRI::RegPairOdd)
594    // Odd register.
595    return getRegisterPairOdd(Reg, MF);
596  else if (Type == (unsigned)ARMRI::RegPairEven)
597    // Even register.
598    return getRegisterPairEven(Reg, MF);
599  return 0;
600}
601
602void
603ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
604                                        MachineFunction &MF) const {
605  MachineRegisterInfo *MRI = &MF.getRegInfo();
606  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
607  if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
608       Hint.first == (unsigned)ARMRI::RegPairEven) &&
609      Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) {
610    // If 'Reg' is one of the even / odd register pair and it's now changed
611    // (e.g. coalesced) into a different register. The other register of the
612    // pair allocation hint must be updated to reflect the relationship
613    // change.
614    unsigned OtherReg = Hint.second;
615    Hint = MRI->getRegAllocationHint(OtherReg);
616    if (Hint.second == Reg)
617      // Make sure the pair has not already divorced.
618      MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
619  }
620}
621
622/// hasFP - Return true if the specified function should have a dedicated frame
623/// pointer register.  This is true if the function has variable sized allocas
624/// or if frame pointer elimination is disabled.
625///
626bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const {
627  // Mac OS X requires FP not to be clobbered for backtracing purpose.
628  if (STI.isTargetDarwin())
629    return true;
630
631  const MachineFrameInfo *MFI = MF.getFrameInfo();
632  // Always eliminate non-leaf frame pointers.
633  return ((DisableFramePointerElim(MF) && MFI->hasCalls()) ||
634          needsStackRealignment(MF) ||
635          MFI->hasVarSizedObjects() ||
636          MFI->isFrameAddressTaken());
637}
638
639bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
640  const MachineFrameInfo *MFI = MF.getFrameInfo();
641  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
642
643  if (!EnableBasePointer)
644    return false;
645
646  if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
647    return true;
648
649  // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
650  // negative range for ldr/str (255), and thumb1 is positive offsets only.
651  // It's going to be better to use the SP or Base Pointer instead. When there
652  // are variable sized objects, we can't reference off of the SP, so we
653  // reserve a Base Pointer.
654  if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
655    // Conservatively estimate whether the negative offset from the frame
656    // pointer will be sufficient to reach. If a function has a smallish
657    // frame, it's less likely to have lots of spills and callee saved
658    // space, so it's all more likely to be within range of the frame pointer.
659    // If it's wrong, the scavenger will still enable access to work, it just
660    // won't be optimal.
661    if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
662      return false;
663    return true;
664  }
665
666  return false;
667}
668
669bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
670  const MachineFrameInfo *MFI = MF.getFrameInfo();
671  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
672  // We can't realign the stack if:
673  // 1. Dynamic stack realignment is explicitly disabled,
674  // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
675  // 3. There are VLAs in the function and the base pointer is disabled.
676  return (RealignStack && !AFI->isThumb1OnlyFunction() &&
677          (!MFI->hasVarSizedObjects() || EnableBasePointer));
678}
679
680bool ARMBaseRegisterInfo::
681needsStackRealignment(const MachineFunction &MF) const {
682  const MachineFrameInfo *MFI = MF.getFrameInfo();
683  const Function *F = MF.getFunction();
684  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
685  bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
686                               F->hasFnAttr(Attribute::StackAlignment));
687
688  return requiresRealignment && canRealignStack(MF);
689}
690
691bool ARMBaseRegisterInfo::
692cannotEliminateFrame(const MachineFunction &MF) const {
693  const MachineFrameInfo *MFI = MF.getFrameInfo();
694  if (DisableFramePointerElim(MF) && MFI->adjustsStack())
695    return true;
696  return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
697    || needsStackRealignment(MF);
698}
699
700/// estimateStackSize - Estimate and return the size of the frame.
701static unsigned estimateStackSize(MachineFunction &MF) {
702  const MachineFrameInfo *FFI = MF.getFrameInfo();
703  int Offset = 0;
704  for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
705    int FixedOff = -FFI->getObjectOffset(i);
706    if (FixedOff > Offset) Offset = FixedOff;
707  }
708  for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
709    if (FFI->isDeadObjectIndex(i))
710      continue;
711    Offset += FFI->getObjectSize(i);
712    unsigned Align = FFI->getObjectAlignment(i);
713    // Adjust to alignment boundary
714    Offset = (Offset+Align-1)/Align*Align;
715  }
716  return (unsigned)Offset;
717}
718
719/// estimateRSStackSizeLimit - Look at each instruction that references stack
720/// frames and return the stack size limit beyond which some of these
721/// instructions will require a scratch register during their expansion later.
722unsigned
723ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
724  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
725  unsigned Limit = (1 << 12) - 1;
726  for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
727    for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
728         I != E; ++I) {
729      for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
730        if (!I->getOperand(i).isFI()) continue;
731
732        // When using ADDri to get the address of a stack object, 255 is the
733        // largest offset guaranteed to fit in the immediate offset.
734        if (I->getOpcode() == ARM::ADDri) {
735          Limit = std::min(Limit, (1U << 8) - 1);
736          break;
737        }
738
739        // Otherwise check the addressing mode.
740        switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
741        case ARMII::AddrMode3:
742        case ARMII::AddrModeT2_i8:
743          Limit = std::min(Limit, (1U << 8) - 1);
744          break;
745        case ARMII::AddrMode5:
746        case ARMII::AddrModeT2_i8s4:
747          Limit = std::min(Limit, ((1U << 8) - 1) * 4);
748          break;
749        case ARMII::AddrModeT2_i12:
750          // i12 supports only positive offset so these will be converted to
751          // i8 opcodes. See llvm::rewriteT2FrameIndex.
752          if (hasFP(MF) && AFI->hasStackFrame())
753            Limit = std::min(Limit, (1U << 8) - 1);
754          break;
755        case ARMII::AddrMode6:
756          // Addressing mode 6 (load/store) instructions can't encode an
757          // immediate offset for stack references.
758          return 0;
759        default:
760          break;
761        }
762        break; // At most one FI per instruction
763      }
764    }
765  }
766
767  return Limit;
768}
769
770static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
771                                       const ARMBaseInstrInfo &TII) {
772  unsigned FnSize = 0;
773  for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
774       MBBI != E; ++MBBI) {
775    const MachineBasicBlock &MBB = *MBBI;
776    for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
777         I != E; ++I)
778      FnSize += TII.GetInstSizeInBytes(I);
779  }
780  return FnSize;
781}
782
783void
784ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
785                                                       RegScavenger *RS) const {
786  // This tells PEI to spill the FP as if it is any other callee-save register
787  // to take advantage the eliminateFrameIndex machinery. This also ensures it
788  // is spilled in the order specified by getCalleeSavedRegs() to make it easier
789  // to combine multiple loads / stores.
790  bool CanEliminateFrame = true;
791  bool CS1Spilled = false;
792  bool LRSpilled = false;
793  unsigned NumGPRSpills = 0;
794  SmallVector<unsigned, 4> UnspilledCS1GPRs;
795  SmallVector<unsigned, 4> UnspilledCS2GPRs;
796  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
797  MachineFrameInfo *MFI = MF.getFrameInfo();
798
799  // Spill R4 if Thumb2 function requires stack realignment - it will be used as
800  // scratch register.
801  // FIXME: It will be better just to find spare register here.
802  if (needsStackRealignment(MF) &&
803      AFI->isThumb2Function())
804    MF.getRegInfo().setPhysRegUsed(ARM::R4);
805
806  // Spill LR if Thumb1 function uses variable length argument lists.
807  if (AFI->isThumb1OnlyFunction() && AFI->getVarArgsRegSaveSize() > 0)
808    MF.getRegInfo().setPhysRegUsed(ARM::LR);
809
810  // Spill the BasePtr if it's used.
811  if (hasBasePointer(MF))
812    MF.getRegInfo().setPhysRegUsed(BasePtr);
813
814  // Don't spill FP if the frame can be eliminated. This is determined
815  // by scanning the callee-save registers to see if any is used.
816  const unsigned *CSRegs = getCalleeSavedRegs();
817  for (unsigned i = 0; CSRegs[i]; ++i) {
818    unsigned Reg = CSRegs[i];
819    bool Spilled = false;
820    if (MF.getRegInfo().isPhysRegUsed(Reg)) {
821      AFI->setCSRegisterIsSpilled(Reg);
822      Spilled = true;
823      CanEliminateFrame = false;
824    } else {
825      // Check alias registers too.
826      for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
827        if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
828          Spilled = true;
829          CanEliminateFrame = false;
830        }
831      }
832    }
833
834    if (!ARM::GPRRegisterClass->contains(Reg))
835      continue;
836
837    if (Spilled) {
838      NumGPRSpills++;
839
840      if (!STI.isTargetDarwin()) {
841        if (Reg == ARM::LR)
842          LRSpilled = true;
843        CS1Spilled = true;
844        continue;
845      }
846
847      // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
848      switch (Reg) {
849      case ARM::LR:
850        LRSpilled = true;
851        // Fallthrough
852      case ARM::R4:
853      case ARM::R5:
854      case ARM::R6:
855      case ARM::R7:
856        CS1Spilled = true;
857        break;
858      default:
859        break;
860      }
861    } else {
862      if (!STI.isTargetDarwin()) {
863        UnspilledCS1GPRs.push_back(Reg);
864        continue;
865      }
866
867      switch (Reg) {
868      case ARM::R4:
869      case ARM::R5:
870      case ARM::R6:
871      case ARM::R7:
872      case ARM::LR:
873        UnspilledCS1GPRs.push_back(Reg);
874        break;
875      default:
876        UnspilledCS2GPRs.push_back(Reg);
877        break;
878      }
879    }
880  }
881
882  bool ForceLRSpill = false;
883  if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
884    unsigned FnSize = GetFunctionSizeInBytes(MF, TII);
885    // Force LR to be spilled if the Thumb function size is > 2048. This enables
886    // use of BL to implement far jump. If it turns out that it's not needed
887    // then the branch fix up path will undo it.
888    if (FnSize >= (1 << 11)) {
889      CanEliminateFrame = false;
890      ForceLRSpill = true;
891    }
892  }
893
894  // If any of the stack slot references may be out of range of an immediate
895  // offset, make sure a register (or a spill slot) is available for the
896  // register scavenger. Note that if we're indexing off the frame pointer, the
897  // effective stack size is 4 bytes larger since the FP points to the stack
898  // slot of the previous FP. Also, if we have variable sized objects in the
899  // function, stack slot references will often be negative, and some of
900  // our instructions are positive-offset only, so conservatively consider
901  // that case to want a spill slot (or register) as well. Similarly, if
902  // the function adjusts the stack pointer during execution and the
903  // adjustments aren't already part of our stack size estimate, our offset
904  // calculations may be off, so be conservative.
905  // FIXME: We could add logic to be more precise about negative offsets
906  //        and which instructions will need a scratch register for them. Is it
907  //        worth the effort and added fragility?
908  bool BigStack =
909    (RS &&
910     (estimateStackSize(MF) + ((hasFP(MF) && AFI->hasStackFrame()) ? 4:0) >=
911      estimateRSStackSizeLimit(MF)))
912    || MFI->hasVarSizedObjects()
913    || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF));
914
915  bool ExtraCSSpill = false;
916  if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
917    AFI->setHasStackFrame(true);
918
919    // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
920    // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
921    if (!LRSpilled && CS1Spilled) {
922      MF.getRegInfo().setPhysRegUsed(ARM::LR);
923      AFI->setCSRegisterIsSpilled(ARM::LR);
924      NumGPRSpills++;
925      UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
926                                    UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
927      ForceLRSpill = false;
928      ExtraCSSpill = true;
929    }
930
931    if (hasFP(MF)) {
932      MF.getRegInfo().setPhysRegUsed(FramePtr);
933      NumGPRSpills++;
934    }
935
936    // If stack and double are 8-byte aligned and we are spilling an odd number
937    // of GPRs. Spill one extra callee save GPR so we won't have to pad between
938    // the integer and double callee save areas.
939    unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
940    if (TargetAlign == 8 && (NumGPRSpills & 1)) {
941      if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
942        for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
943          unsigned Reg = UnspilledCS1GPRs[i];
944          // Don't spill high register if the function is thumb1
945          if (!AFI->isThumb1OnlyFunction() ||
946              isARMLowRegister(Reg) || Reg == ARM::LR) {
947            MF.getRegInfo().setPhysRegUsed(Reg);
948            AFI->setCSRegisterIsSpilled(Reg);
949            if (!isReservedReg(MF, Reg))
950              ExtraCSSpill = true;
951            break;
952          }
953        }
954      } else if (!UnspilledCS2GPRs.empty() &&
955                 !AFI->isThumb1OnlyFunction()) {
956        unsigned Reg = UnspilledCS2GPRs.front();
957        MF.getRegInfo().setPhysRegUsed(Reg);
958        AFI->setCSRegisterIsSpilled(Reg);
959        if (!isReservedReg(MF, Reg))
960          ExtraCSSpill = true;
961      }
962    }
963
964    // Estimate if we might need to scavenge a register at some point in order
965    // to materialize a stack offset. If so, either spill one additional
966    // callee-saved register or reserve a special spill slot to facilitate
967    // register scavenging. Thumb1 needs a spill slot for stack pointer
968    // adjustments also, even when the frame itself is small.
969    if (BigStack && !ExtraCSSpill) {
970      // If any non-reserved CS register isn't spilled, just spill one or two
971      // extra. That should take care of it!
972      unsigned NumExtras = TargetAlign / 4;
973      SmallVector<unsigned, 2> Extras;
974      while (NumExtras && !UnspilledCS1GPRs.empty()) {
975        unsigned Reg = UnspilledCS1GPRs.back();
976        UnspilledCS1GPRs.pop_back();
977        if (!isReservedReg(MF, Reg) &&
978            (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
979             Reg == ARM::LR)) {
980          Extras.push_back(Reg);
981          NumExtras--;
982        }
983      }
984      // For non-Thumb1 functions, also check for hi-reg CS registers
985      if (!AFI->isThumb1OnlyFunction()) {
986        while (NumExtras && !UnspilledCS2GPRs.empty()) {
987          unsigned Reg = UnspilledCS2GPRs.back();
988          UnspilledCS2GPRs.pop_back();
989          if (!isReservedReg(MF, Reg)) {
990            Extras.push_back(Reg);
991            NumExtras--;
992          }
993        }
994      }
995      if (Extras.size() && NumExtras == 0) {
996        for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
997          MF.getRegInfo().setPhysRegUsed(Extras[i]);
998          AFI->setCSRegisterIsSpilled(Extras[i]);
999        }
1000      } else if (!AFI->isThumb1OnlyFunction()) {
1001        // note: Thumb1 functions spill to R12, not the stack.  Reserve a slot
1002        // closest to SP or frame pointer.
1003        const TargetRegisterClass *RC = ARM::GPRRegisterClass;
1004        RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
1005                                                           RC->getAlignment(),
1006                                                           false));
1007      }
1008    }
1009  }
1010
1011  if (ForceLRSpill) {
1012    MF.getRegInfo().setPhysRegUsed(ARM::LR);
1013    AFI->setCSRegisterIsSpilled(ARM::LR);
1014    AFI->setLRIsSpilledForFarJump(true);
1015  }
1016}
1017
1018unsigned ARMBaseRegisterInfo::getRARegister() const {
1019  return ARM::LR;
1020}
1021
1022unsigned
1023ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1024  if (hasFP(MF))
1025    return FramePtr;
1026  return ARM::SP;
1027}
1028
1029// Provide a base+offset reference to an FI slot for debug info. It's the
1030// same as what we use for resolving the code-gen references for now.
1031// FIXME: This can go wrong when references are SP-relative and simple call
1032//        frames aren't used.
1033int
1034ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI,
1035                                            unsigned &FrameReg) const {
1036  return ResolveFrameIndexReference(MF, FI, FrameReg, 0);
1037}
1038
1039int
1040ARMBaseRegisterInfo::ResolveFrameIndexReference(const MachineFunction &MF,
1041                                                int FI,
1042                                                unsigned &FrameReg,
1043                                                int SPAdj) const {
1044  const MachineFrameInfo *MFI = MF.getFrameInfo();
1045  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1046  int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
1047  int FPOffset = Offset - AFI->getFramePtrSpillOffset();
1048  bool isFixed = MFI->isFixedObjectIndex(FI);
1049
1050  FrameReg = ARM::SP;
1051  Offset += SPAdj;
1052  if (AFI->isGPRCalleeSavedArea1Frame(FI))
1053    return Offset - AFI->getGPRCalleeSavedArea1Offset();
1054  else if (AFI->isGPRCalleeSavedArea2Frame(FI))
1055    return Offset - AFI->getGPRCalleeSavedArea2Offset();
1056  else if (AFI->isDPRCalleeSavedAreaFrame(FI))
1057    return Offset - AFI->getDPRCalleeSavedAreaOffset();
1058
1059  // When dynamically realigning the stack, use the frame pointer for
1060  // parameters, and the stack/base pointer for locals.
1061  if (needsStackRealignment(MF)) {
1062    assert (hasFP(MF) && "dynamic stack realignment without a FP!");
1063    if (isFixed) {
1064      FrameReg = getFrameRegister(MF);
1065      Offset = FPOffset;
1066    } else if (MFI->hasVarSizedObjects()) {
1067      assert(hasBasePointer(MF) &&
1068             "VLAs and dynamic stack alignment, but missing base pointer!");
1069      FrameReg = BasePtr;
1070    }
1071    return Offset;
1072  }
1073
1074  // If there is a frame pointer, use it when we can.
1075  if (hasFP(MF) && AFI->hasStackFrame()) {
1076    // Use frame pointer to reference fixed objects. Use it for locals if
1077    // there are VLAs (and thus the SP isn't reliable as a base).
1078    if (isFixed || (MFI->hasVarSizedObjects() && !hasBasePointer(MF))) {
1079      FrameReg = getFrameRegister(MF);
1080      return FPOffset;
1081    } else if (MFI->hasVarSizedObjects()) {
1082      assert(hasBasePointer(MF) && "missing base pointer!");
1083      // Use the base register since we have it.
1084      FrameReg = BasePtr;
1085    } else if (AFI->isThumb2Function()) {
1086      // In Thumb2 mode, the negative offset is very limited. Try to avoid
1087      // out of range references.
1088      if (FPOffset >= -255 && FPOffset < 0) {
1089        FrameReg = getFrameRegister(MF);
1090        return FPOffset;
1091      }
1092    } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) {
1093      // Otherwise, use SP or FP, whichever is closer to the stack slot.
1094      FrameReg = getFrameRegister(MF);
1095      return FPOffset;
1096    }
1097  }
1098  // Use the base pointer if we have one.
1099  if (hasBasePointer(MF))
1100    FrameReg = BasePtr;
1101  return Offset;
1102}
1103
1104int
1105ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
1106                                         int FI) const {
1107  unsigned FrameReg;
1108  return getFrameIndexReference(MF, FI, FrameReg);
1109}
1110
1111unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
1112  llvm_unreachable("What is the exception register");
1113  return 0;
1114}
1115
1116unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
1117  llvm_unreachable("What is the exception handler register");
1118  return 0;
1119}
1120
1121int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
1122  return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
1123}
1124
1125unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
1126                                              const MachineFunction &MF) const {
1127  switch (Reg) {
1128  default: break;
1129  // Return 0 if either register of the pair is a special register.
1130  // So no R12, etc.
1131  case ARM::R1:
1132    return ARM::R0;
1133  case ARM::R3:
1134    return ARM::R2;
1135  case ARM::R5:
1136    return ARM::R4;
1137  case ARM::R7:
1138    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
1139      ? 0 : ARM::R6;
1140  case ARM::R9:
1141    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
1142  case ARM::R11:
1143    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
1144
1145  case ARM::S1:
1146    return ARM::S0;
1147  case ARM::S3:
1148    return ARM::S2;
1149  case ARM::S5:
1150    return ARM::S4;
1151  case ARM::S7:
1152    return ARM::S6;
1153  case ARM::S9:
1154    return ARM::S8;
1155  case ARM::S11:
1156    return ARM::S10;
1157  case ARM::S13:
1158    return ARM::S12;
1159  case ARM::S15:
1160    return ARM::S14;
1161  case ARM::S17:
1162    return ARM::S16;
1163  case ARM::S19:
1164    return ARM::S18;
1165  case ARM::S21:
1166    return ARM::S20;
1167  case ARM::S23:
1168    return ARM::S22;
1169  case ARM::S25:
1170    return ARM::S24;
1171  case ARM::S27:
1172    return ARM::S26;
1173  case ARM::S29:
1174    return ARM::S28;
1175  case ARM::S31:
1176    return ARM::S30;
1177
1178  case ARM::D1:
1179    return ARM::D0;
1180  case ARM::D3:
1181    return ARM::D2;
1182  case ARM::D5:
1183    return ARM::D4;
1184  case ARM::D7:
1185    return ARM::D6;
1186  case ARM::D9:
1187    return ARM::D8;
1188  case ARM::D11:
1189    return ARM::D10;
1190  case ARM::D13:
1191    return ARM::D12;
1192  case ARM::D15:
1193    return ARM::D14;
1194  case ARM::D17:
1195    return ARM::D16;
1196  case ARM::D19:
1197    return ARM::D18;
1198  case ARM::D21:
1199    return ARM::D20;
1200  case ARM::D23:
1201    return ARM::D22;
1202  case ARM::D25:
1203    return ARM::D24;
1204  case ARM::D27:
1205    return ARM::D26;
1206  case ARM::D29:
1207    return ARM::D28;
1208  case ARM::D31:
1209    return ARM::D30;
1210  }
1211
1212  return 0;
1213}
1214
1215unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
1216                                             const MachineFunction &MF) const {
1217  switch (Reg) {
1218  default: break;
1219  // Return 0 if either register of the pair is a special register.
1220  // So no R12, etc.
1221  case ARM::R0:
1222    return ARM::R1;
1223  case ARM::R2:
1224    return ARM::R3;
1225  case ARM::R4:
1226    return ARM::R5;
1227  case ARM::R6:
1228    return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
1229      ? 0 : ARM::R7;
1230  case ARM::R8:
1231    return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
1232  case ARM::R10:
1233    return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
1234
1235  case ARM::S0:
1236    return ARM::S1;
1237  case ARM::S2:
1238    return ARM::S3;
1239  case ARM::S4:
1240    return ARM::S5;
1241  case ARM::S6:
1242    return ARM::S7;
1243  case ARM::S8:
1244    return ARM::S9;
1245  case ARM::S10:
1246    return ARM::S11;
1247  case ARM::S12:
1248    return ARM::S13;
1249  case ARM::S14:
1250    return ARM::S15;
1251  case ARM::S16:
1252    return ARM::S17;
1253  case ARM::S18:
1254    return ARM::S19;
1255  case ARM::S20:
1256    return ARM::S21;
1257  case ARM::S22:
1258    return ARM::S23;
1259  case ARM::S24:
1260    return ARM::S25;
1261  case ARM::S26:
1262    return ARM::S27;
1263  case ARM::S28:
1264    return ARM::S29;
1265  case ARM::S30:
1266    return ARM::S31;
1267
1268  case ARM::D0:
1269    return ARM::D1;
1270  case ARM::D2:
1271    return ARM::D3;
1272  case ARM::D4:
1273    return ARM::D5;
1274  case ARM::D6:
1275    return ARM::D7;
1276  case ARM::D8:
1277    return ARM::D9;
1278  case ARM::D10:
1279    return ARM::D11;
1280  case ARM::D12:
1281    return ARM::D13;
1282  case ARM::D14:
1283    return ARM::D15;
1284  case ARM::D16:
1285    return ARM::D17;
1286  case ARM::D18:
1287    return ARM::D19;
1288  case ARM::D20:
1289    return ARM::D21;
1290  case ARM::D22:
1291    return ARM::D23;
1292  case ARM::D24:
1293    return ARM::D25;
1294  case ARM::D26:
1295    return ARM::D27;
1296  case ARM::D28:
1297    return ARM::D29;
1298  case ARM::D30:
1299    return ARM::D31;
1300  }
1301
1302  return 0;
1303}
1304
1305/// emitLoadConstPool - Emits a load from constpool to materialize the
1306/// specified immediate.
1307void ARMBaseRegisterInfo::
1308emitLoadConstPool(MachineBasicBlock &MBB,
1309                  MachineBasicBlock::iterator &MBBI,
1310                  DebugLoc dl,
1311                  unsigned DestReg, unsigned SubIdx, int Val,
1312                  ARMCC::CondCodes Pred,
1313                  unsigned PredReg) const {
1314  MachineFunction &MF = *MBB.getParent();
1315  MachineConstantPool *ConstantPool = MF.getConstantPool();
1316  const Constant *C =
1317        ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
1318  unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
1319
1320  BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
1321    .addReg(DestReg, getDefRegState(true), SubIdx)
1322    .addConstantPoolIndex(Idx)
1323    .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
1324}
1325
1326bool ARMBaseRegisterInfo::
1327requiresRegisterScavenging(const MachineFunction &MF) const {
1328  return true;
1329}
1330
1331bool ARMBaseRegisterInfo::
1332requiresFrameIndexScavenging(const MachineFunction &MF) const {
1333  return true;
1334}
1335
1336bool ARMBaseRegisterInfo::
1337requiresVirtualBaseRegisters(const MachineFunction &MF) const {
1338  return EnableLocalStackAlloc;
1339}
1340
1341// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
1342// not required, we reserve argument space for call sites in the function
1343// immediately on entry to the current function. This eliminates the need for
1344// add/sub sp brackets around call sites. Returns true if the call frame is
1345// included as part of the stack frame.
1346bool ARMBaseRegisterInfo::
1347hasReservedCallFrame(const MachineFunction &MF) const {
1348  const MachineFrameInfo *FFI = MF.getFrameInfo();
1349  unsigned CFSize = FFI->getMaxCallFrameSize();
1350  // It's not always a good idea to include the call frame as part of the
1351  // stack frame. ARM (especially Thumb) has small immediate offset to
1352  // address the stack frame. So a large call frame can cause poor codegen
1353  // and may even makes it impossible to scavenge a register.
1354  if (CFSize >= ((1 << 12) - 1) / 2)  // Half of imm12
1355    return false;
1356
1357  return !MF.getFrameInfo()->hasVarSizedObjects();
1358}
1359
1360// canSimplifyCallFramePseudos - If there is a reserved call frame, the
1361// call frame pseudos can be simplified. Unlike most targets, having a FP
1362// is not sufficient here since we still may reference some objects via SP
1363// even when FP is available in Thumb2 mode.
1364bool ARMBaseRegisterInfo::
1365canSimplifyCallFramePseudos(const MachineFunction &MF) const {
1366  return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
1367}
1368
1369static void
1370emitSPUpdate(bool isARM,
1371             MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
1372             DebugLoc dl, const ARMBaseInstrInfo &TII,
1373             int NumBytes,
1374             ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
1375  if (isARM)
1376    emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1377                            Pred, PredReg, TII);
1378  else
1379    emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1380                           Pred, PredReg, TII);
1381}
1382
1383
1384void ARMBaseRegisterInfo::
1385eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1386                              MachineBasicBlock::iterator I) const {
1387  if (!hasReservedCallFrame(MF)) {
1388    // If we have alloca, convert as follows:
1389    // ADJCALLSTACKDOWN -> sub, sp, sp, amount
1390    // ADJCALLSTACKUP   -> add, sp, sp, amount
1391    MachineInstr *Old = I;
1392    DebugLoc dl = Old->getDebugLoc();
1393    unsigned Amount = Old->getOperand(0).getImm();
1394    if (Amount != 0) {
1395      // We need to keep the stack aligned properly.  To do this, we round the
1396      // amount of space needed for the outgoing arguments up to the next
1397      // alignment boundary.
1398      unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1399      Amount = (Amount+Align-1)/Align*Align;
1400
1401      ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1402      assert(!AFI->isThumb1OnlyFunction() &&
1403             "This eliminateCallFramePseudoInstr does not support Thumb1!");
1404      bool isARM = !AFI->isThumbFunction();
1405
1406      // Replace the pseudo instruction with a new instruction...
1407      unsigned Opc = Old->getOpcode();
1408      int PIdx = Old->findFirstPredOperandIdx();
1409      ARMCC::CondCodes Pred = (PIdx == -1)
1410        ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
1411      if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
1412        // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
1413        unsigned PredReg = Old->getOperand(2).getReg();
1414        emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
1415      } else {
1416        // Note: PredReg is operand 3 for ADJCALLSTACKUP.
1417        unsigned PredReg = Old->getOperand(3).getReg();
1418        assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
1419        emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
1420      }
1421    }
1422  }
1423  MBB.erase(I);
1424}
1425
1426int64_t ARMBaseRegisterInfo::
1427getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
1428  const TargetInstrDesc &Desc = MI->getDesc();
1429  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1430  int64_t InstrOffs = 0;;
1431  int Scale = 1;
1432  unsigned ImmIdx = 0;
1433  switch (AddrMode) {
1434  case ARMII::AddrModeT2_i8:
1435  case ARMII::AddrModeT2_i12:
1436    // i8 supports only negative, and i12 supports only positive, so
1437    // based on Offset sign, consider the appropriate instruction
1438    InstrOffs = MI->getOperand(Idx+1).getImm();
1439    Scale = 1;
1440    break;
1441  case ARMII::AddrMode5: {
1442    // VFP address mode.
1443    const MachineOperand &OffOp = MI->getOperand(Idx+1);
1444    InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
1445    if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
1446      InstrOffs = -InstrOffs;
1447    Scale = 4;
1448    break;
1449  }
1450  case ARMII::AddrMode2: {
1451    ImmIdx = Idx+2;
1452    InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
1453    if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1454      InstrOffs = -InstrOffs;
1455    break;
1456  }
1457  case ARMII::AddrMode3: {
1458    ImmIdx = Idx+2;
1459    InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
1460    if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1461      InstrOffs = -InstrOffs;
1462    break;
1463  }
1464  case ARMII::AddrModeT1_s: {
1465    ImmIdx = Idx+1;
1466    InstrOffs = MI->getOperand(ImmIdx).getImm();
1467    Scale = 4;
1468    break;
1469  }
1470  default:
1471    llvm_unreachable("Unsupported addressing mode!");
1472    break;
1473  }
1474
1475  return InstrOffs * Scale;
1476}
1477
1478/// needsFrameBaseReg - Returns true if the instruction's frame index
1479/// reference would be better served by a base register other than FP
1480/// or SP. Used by LocalStackFrameAllocation to determine which frame index
1481/// references it should create new base registers for.
1482bool ARMBaseRegisterInfo::
1483needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1484  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
1485    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1486  }
1487
1488  // It's the load/store FI references that cause issues, as it can be difficult
1489  // to materialize the offset if it won't fit in the literal field. Estimate
1490  // based on the size of the local frame and some conservative assumptions
1491  // about the rest of the stack frame (note, this is pre-regalloc, so
1492  // we don't know everything for certain yet) whether this offset is likely
1493  // to be out of range of the immediate. Return true if so.
1494
1495  // We only generate virtual base registers for loads and stores, so
1496  // return false for everything else.
1497  unsigned Opc = MI->getOpcode();
1498  switch (Opc) {
1499  case ARM::LDR: case ARM::LDRH: case ARM::LDRB:
1500  case ARM::STR: case ARM::STRH: case ARM::STRB:
1501  case ARM::t2LDRi12: case ARM::t2LDRi8:
1502  case ARM::t2STRi12: case ARM::t2STRi8:
1503  case ARM::VLDRS: case ARM::VLDRD:
1504  case ARM::VSTRS: case ARM::VSTRD:
1505  case ARM::tSTRspi: case ARM::tLDRspi:
1506    if (ForceAllBaseRegAlloc)
1507      return true;
1508    break;
1509  default:
1510    return false;
1511  }
1512
1513  // Without a virtual base register, if the function has variable sized
1514  // objects, all fixed-size local references will be via the frame pointer,
1515  // Approximate the offset and see if it's legal for the instruction.
1516  // Note that the incoming offset is based on the SP value at function entry,
1517  // so it'll be negative.
1518  MachineFunction &MF = *MI->getParent()->getParent();
1519  MachineFrameInfo *MFI = MF.getFrameInfo();
1520  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1521
1522  // Estimate an offset from the frame pointer.
1523  // Conservatively assume all callee-saved registers get pushed. R4-R6
1524  // will be earlier than the FP, so we ignore those.
1525  // R7, LR
1526  int64_t FPOffset = Offset - 8;
1527  // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
1528  if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
1529    FPOffset -= 80;
1530  // Estimate an offset from the stack pointer.
1531  // The incoming offset is relating to the SP at the start of the function,
1532  // but when we access the local it'll be relative to the SP after local
1533  // allocation, so adjust our SP-relative offset by that allocation size.
1534  Offset = -Offset;
1535  Offset += MFI->getLocalFrameSize();
1536  // Assume that we'll have at least some spill slots allocated.
1537  // FIXME: This is a total SWAG number. We should run some statistics
1538  //        and pick a real one.
1539  Offset += 128; // 128 bytes of spill slots
1540
1541  // If there is a frame pointer, try using it.
1542  // The FP is only available if there is no dynamic realignment. We
1543  // don't know for sure yet whether we'll need that, so we guess based
1544  // on whether there are any local variables that would trigger it.
1545  unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1546  if (hasFP(MF) &&
1547      !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1548    if (isFrameOffsetLegal(MI, FPOffset))
1549      return false;
1550  }
1551  // If we can reference via the stack pointer, try that.
1552  // FIXME: This (and the code that resolves the references) can be improved
1553  //        to only disallow SP relative references in the live range of
1554  //        the VLA(s). In practice, it's unclear how much difference that
1555  //        would make, but it may be worth doing.
1556  if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1557    return false;
1558
1559  // The offset likely isn't legal, we want to allocate a virtual base register.
1560  return true;
1561}
1562
1563/// materializeFrameBaseRegister - Insert defining instruction(s) for
1564/// BaseReg to be a pointer to FrameIdx before insertion point I.
1565void ARMBaseRegisterInfo::
1566materializeFrameBaseRegister(MachineBasicBlock::iterator I, unsigned BaseReg,
1567                             int FrameIdx, int64_t Offset) const {
1568  ARMFunctionInfo *AFI =
1569    I->getParent()->getParent()->getInfo<ARMFunctionInfo>();
1570  unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1571    (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1572
1573  MachineInstrBuilder MIB =
1574    BuildMI(*I->getParent(), I, I->getDebugLoc(), TII.get(ADDriOpc), BaseReg)
1575    .addFrameIndex(FrameIdx).addImm(Offset);
1576  if (!AFI->isThumb1OnlyFunction())
1577    AddDefaultCC(AddDefaultPred(MIB));
1578}
1579
1580void
1581ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1582                                       unsigned BaseReg, int64_t Offset) const {
1583  MachineInstr &MI = *I;
1584  MachineBasicBlock &MBB = *MI.getParent();
1585  MachineFunction &MF = *MBB.getParent();
1586  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1587  int Off = Offset; // ARM doesn't need the general 64-bit offsets
1588  unsigned i = 0;
1589
1590  assert(!AFI->isThumb1OnlyFunction() &&
1591         "This resolveFrameIndex does not support Thumb1!");
1592
1593  while (!MI.getOperand(i).isFI()) {
1594    ++i;
1595    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1596  }
1597  bool Done = false;
1598  if (!AFI->isThumbFunction())
1599    Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1600  else {
1601    assert(AFI->isThumb2Function());
1602    Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1603  }
1604  assert (Done && "Unable to resolve frame index!");
1605}
1606
1607bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1608                                             int64_t Offset) const {
1609  const TargetInstrDesc &Desc = MI->getDesc();
1610  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1611  unsigned i = 0;
1612
1613  while (!MI->getOperand(i).isFI()) {
1614    ++i;
1615    assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1616  }
1617
1618  // AddrMode4 and AddrMode6 cannot handle any offset.
1619  if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1620    return Offset == 0;
1621
1622  unsigned NumBits = 0;
1623  unsigned Scale = 1;
1624  bool isSigned = true;
1625  switch (AddrMode) {
1626  case ARMII::AddrModeT2_i8:
1627  case ARMII::AddrModeT2_i12:
1628    // i8 supports only negative, and i12 supports only positive, so
1629    // based on Offset sign, consider the appropriate instruction
1630    Scale = 1;
1631    if (Offset < 0) {
1632      NumBits = 8;
1633      Offset = -Offset;
1634    } else {
1635      NumBits = 12;
1636    }
1637    break;
1638  case ARMII::AddrMode5:
1639    // VFP address mode.
1640    NumBits = 8;
1641    Scale = 4;
1642    break;
1643  case ARMII::AddrMode2:
1644    NumBits = 12;
1645    break;
1646  case ARMII::AddrMode3:
1647    NumBits = 8;
1648    break;
1649  case ARMII::AddrModeT1_s:
1650    NumBits = 5;
1651    Scale = 4;
1652    isSigned = false;
1653    break;
1654  default:
1655    llvm_unreachable("Unsupported addressing mode!");
1656    break;
1657  }
1658
1659  Offset += getFrameIndexInstrOffset(MI, i);
1660  // Make sure the offset is encodable for instructions that scale the
1661  // immediate.
1662  if ((Offset & (Scale-1)) != 0)
1663    return false;
1664
1665  if (isSigned && Offset < 0)
1666    Offset = -Offset;
1667
1668  unsigned Mask = (1 << NumBits) - 1;
1669  if ((unsigned)Offset <= Mask * Scale)
1670    return true;
1671
1672  return false;
1673}
1674
1675void
1676ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1677                                         int SPAdj, RegScavenger *RS) const {
1678  unsigned i = 0;
1679  MachineInstr &MI = *II;
1680  MachineBasicBlock &MBB = *MI.getParent();
1681  MachineFunction &MF = *MBB.getParent();
1682  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1683  assert(!AFI->isThumb1OnlyFunction() &&
1684         "This eliminateFrameIndex does not support Thumb1!");
1685
1686  while (!MI.getOperand(i).isFI()) {
1687    ++i;
1688    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1689  }
1690
1691  int FrameIndex = MI.getOperand(i).getIndex();
1692  unsigned FrameReg;
1693
1694  int Offset = ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1695
1696  // Special handling of dbg_value instructions.
1697  if (MI.isDebugValue()) {
1698    MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1699    MI.getOperand(i+1).ChangeToImmediate(Offset);
1700    return;
1701  }
1702
1703  // Modify MI as necessary to handle as much of 'Offset' as possible
1704  bool Done = false;
1705  if (!AFI->isThumbFunction())
1706    Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1707  else {
1708    assert(AFI->isThumb2Function());
1709    Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1710  }
1711  if (Done)
1712    return;
1713
1714  // If we get here, the immediate doesn't fit into the instruction.  We folded
1715  // as much as possible above, handle the rest, providing a register that is
1716  // SP+LargeImm.
1717  assert((Offset ||
1718          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1719          (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1720         "This code isn't needed if offset already handled!");
1721
1722  unsigned ScratchReg = 0;
1723  int PIdx = MI.findFirstPredOperandIdx();
1724  ARMCC::CondCodes Pred = (PIdx == -1)
1725    ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1726  unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1727  if (Offset == 0)
1728    // Must be addrmode4/6.
1729    MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1730  else {
1731    ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1732    if (!AFI->isThumbFunction())
1733      emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1734                              Offset, Pred, PredReg, TII);
1735    else {
1736      assert(AFI->isThumb2Function());
1737      emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1738                             Offset, Pred, PredReg, TII);
1739    }
1740    MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1741  }
1742}
1743
1744/// Move iterator past the next bunch of callee save load / store ops for
1745/// the particular spill area (1: integer area 1, 2: integer area 2,
1746/// 3: fp area, 0: don't care).
1747static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
1748                                   MachineBasicBlock::iterator &MBBI,
1749                                   int Opc1, int Opc2, unsigned Area,
1750                                   const ARMSubtarget &STI) {
1751  while (MBBI != MBB.end() &&
1752         ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) &&
1753         MBBI->getOperand(1).isFI()) {
1754    if (Area != 0) {
1755      bool Done = false;
1756      unsigned Category = 0;
1757      switch (MBBI->getOperand(0).getReg()) {
1758      case ARM::R4:  case ARM::R5:  case ARM::R6: case ARM::R7:
1759      case ARM::LR:
1760        Category = 1;
1761        break;
1762      case ARM::R8:  case ARM::R9:  case ARM::R10: case ARM::R11:
1763        Category = STI.isTargetDarwin() ? 2 : 1;
1764        break;
1765      case ARM::D8:  case ARM::D9:  case ARM::D10: case ARM::D11:
1766      case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
1767        Category = 3;
1768        break;
1769      default:
1770        Done = true;
1771        break;
1772      }
1773      if (Done || Category != Area)
1774        break;
1775    }
1776
1777    ++MBBI;
1778  }
1779}
1780
1781void ARMBaseRegisterInfo::
1782emitPrologue(MachineFunction &MF) const {
1783  MachineBasicBlock &MBB = MF.front();
1784  MachineBasicBlock::iterator MBBI = MBB.begin();
1785  MachineFrameInfo  *MFI = MF.getFrameInfo();
1786  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1787  assert(!AFI->isThumb1OnlyFunction() &&
1788         "This emitPrologue does not support Thumb1!");
1789  bool isARM = !AFI->isThumbFunction();
1790  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1791  unsigned NumBytes = MFI->getStackSize();
1792  const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1793  DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1794
1795  // Determine the sizes of each callee-save spill areas and record which frame
1796  // belongs to which callee-save spill areas.
1797  unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
1798  int FramePtrSpillFI = 0;
1799
1800  // Allocate the vararg register save area. This is not counted in NumBytes.
1801  if (VARegSaveSize)
1802    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
1803
1804  if (!AFI->hasStackFrame()) {
1805    if (NumBytes != 0)
1806      emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1807    return;
1808  }
1809
1810  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1811    unsigned Reg = CSI[i].getReg();
1812    int FI = CSI[i].getFrameIdx();
1813    switch (Reg) {
1814    case ARM::R4:
1815    case ARM::R5:
1816    case ARM::R6:
1817    case ARM::R7:
1818    case ARM::LR:
1819      if (Reg == FramePtr)
1820        FramePtrSpillFI = FI;
1821      AFI->addGPRCalleeSavedArea1Frame(FI);
1822      GPRCS1Size += 4;
1823      break;
1824    case ARM::R8:
1825    case ARM::R9:
1826    case ARM::R10:
1827    case ARM::R11:
1828      if (Reg == FramePtr)
1829        FramePtrSpillFI = FI;
1830      if (STI.isTargetDarwin()) {
1831        AFI->addGPRCalleeSavedArea2Frame(FI);
1832        GPRCS2Size += 4;
1833      } else {
1834        AFI->addGPRCalleeSavedArea1Frame(FI);
1835        GPRCS1Size += 4;
1836      }
1837      break;
1838    default:
1839      AFI->addDPRCalleeSavedAreaFrame(FI);
1840      DPRCSSize += 8;
1841    }
1842  }
1843
1844  // Build the new SUBri to adjust SP for integer callee-save spill area 1.
1845  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size);
1846  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI);
1847
1848  // Set FP to point to the stack slot that contains the previous FP.
1849  // For Darwin, FP is R7, which has now been stored in spill area 1.
1850  // Otherwise, if this is not Darwin, all the callee-saved registers go
1851  // into spill area 1, including the FP in R11.  In either case, it is
1852  // now safe to emit this assignment.
1853  bool HasFP = hasFP(MF);
1854  if (HasFP) {
1855    unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
1856    MachineInstrBuilder MIB =
1857      BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
1858      .addFrameIndex(FramePtrSpillFI).addImm(0);
1859    AddDefaultCC(AddDefaultPred(MIB));
1860  }
1861
1862  // Build the new SUBri to adjust SP for integer callee-save spill area 2.
1863  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size);
1864
1865  // Build the new SUBri to adjust SP for FP callee-save spill area.
1866  movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI);
1867  emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize);
1868
1869  // Determine starting offsets of spill areas.
1870  unsigned DPRCSOffset  = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
1871  unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
1872  unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
1873  if (HasFP)
1874    AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
1875                                NumBytes);
1876  AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
1877  AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
1878  AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
1879
1880  movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI);
1881  NumBytes = DPRCSOffset;
1882  if (NumBytes) {
1883    // Adjust SP after all the callee-save spills.
1884    emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1885    if (HasFP)
1886      AFI->setShouldRestoreSPFromFP(true);
1887  }
1888
1889  if (STI.isTargetELF() && hasFP(MF)) {
1890    MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
1891                             AFI->getFramePtrSpillOffset());
1892    AFI->setShouldRestoreSPFromFP(true);
1893  }
1894
1895  AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
1896  AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
1897  AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
1898
1899  // If we need dynamic stack realignment, do it here. Be paranoid and make
1900  // sure if we also have VLAs, we have a base pointer for frame access.
1901  if (needsStackRealignment(MF)) {
1902    unsigned MaxAlign = MFI->getMaxAlignment();
1903    assert (!AFI->isThumb1OnlyFunction());
1904    if (!AFI->isThumbFunction()) {
1905      // Emit bic sp, sp, MaxAlign
1906      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1907                                          TII.get(ARM::BICri), ARM::SP)
1908                                  .addReg(ARM::SP, RegState::Kill)
1909                                  .addImm(MaxAlign-1)));
1910    } else {
1911      // We cannot use sp as source/dest register here, thus we're emitting the
1912      // following sequence:
1913      // mov r4, sp
1914      // bic r4, r4, MaxAlign
1915      // mov sp, r4
1916      // FIXME: It will be better just to find spare register here.
1917      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
1918        .addReg(ARM::SP, RegState::Kill);
1919      AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1920                                          TII.get(ARM::t2BICri), ARM::R4)
1921                                  .addReg(ARM::R4, RegState::Kill)
1922                                  .addImm(MaxAlign-1)));
1923      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
1924        .addReg(ARM::R4, RegState::Kill);
1925    }
1926
1927    AFI->setShouldRestoreSPFromFP(true);
1928  }
1929
1930  // If we need a base pointer, set it up here. It's whatever the value
1931  // of the stack pointer is at this point. Any variable size objects
1932  // will be allocated after this, so we can still use the base pointer
1933  // to reference locals.
1934  if (hasBasePointer(MF)) {
1935    if (isARM)
1936      BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), BasePtr)
1937        .addReg(ARM::SP)
1938        .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
1939    else
1940      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), BasePtr)
1941        .addReg(ARM::SP);
1942  }
1943
1944  // If the frame has variable sized objects then the epilogue must restore
1945  // the sp from fp.
1946  if (!AFI->shouldRestoreSPFromFP() && MFI->hasVarSizedObjects())
1947    AFI->setShouldRestoreSPFromFP(true);
1948}
1949
1950static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
1951  for (unsigned i = 0; CSRegs[i]; ++i)
1952    if (Reg == CSRegs[i])
1953      return true;
1954  return false;
1955}
1956
1957static bool isCSRestore(MachineInstr *MI,
1958                        const ARMBaseInstrInfo &TII,
1959                        const unsigned *CSRegs) {
1960  return ((MI->getOpcode() == (int)ARM::VLDRD ||
1961           MI->getOpcode() == (int)ARM::LDR ||
1962           MI->getOpcode() == (int)ARM::t2LDRi12) &&
1963          MI->getOperand(1).isFI() &&
1964          isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
1965}
1966
1967void ARMBaseRegisterInfo::
1968emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
1969  MachineBasicBlock::iterator MBBI = prior(MBB.end());
1970  assert(MBBI->getDesc().isReturn() &&
1971         "Can only insert epilog into returning blocks");
1972  unsigned RetOpcode = MBBI->getOpcode();
1973  DebugLoc dl = MBBI->getDebugLoc();
1974  MachineFrameInfo *MFI = MF.getFrameInfo();
1975  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1976  assert(!AFI->isThumb1OnlyFunction() &&
1977         "This emitEpilogue does not support Thumb1!");
1978  bool isARM = !AFI->isThumbFunction();
1979
1980  unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1981  int NumBytes = (int)MFI->getStackSize();
1982
1983  if (!AFI->hasStackFrame()) {
1984    if (NumBytes != 0)
1985      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1986  } else {
1987    // Unwind MBBI to point to first LDR / VLDRD.
1988    const unsigned *CSRegs = getCalleeSavedRegs();
1989    if (MBBI != MBB.begin()) {
1990      do
1991        --MBBI;
1992      while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
1993      if (!isCSRestore(MBBI, TII, CSRegs))
1994        ++MBBI;
1995    }
1996
1997    // Move SP to start of FP callee save spill area.
1998    NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
1999                 AFI->getGPRCalleeSavedArea2Size() +
2000                 AFI->getDPRCalleeSavedAreaSize());
2001
2002    // Reset SP based on frame pointer only if the stack frame extends beyond
2003    // frame pointer stack slot or target is ELF and the function has FP.
2004    if (AFI->shouldRestoreSPFromFP()) {
2005      NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
2006      if (NumBytes) {
2007        if (isARM)
2008          emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
2009                                  ARMCC::AL, 0, TII);
2010        else
2011          emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
2012                                 ARMCC::AL, 0, TII);
2013      } else {
2014        // Thumb2 or ARM.
2015        if (isARM)
2016          BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
2017            .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
2018        else
2019          BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
2020            .addReg(FramePtr);
2021      }
2022    } else if (NumBytes)
2023      emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
2024
2025    // Move SP to start of integer callee save spill area 2.
2026    movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI);
2027    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize());
2028
2029    // Move SP to start of integer callee save spill area 1.
2030    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI);
2031    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size());
2032
2033    // Move SP to SP upon entry to the function.
2034    movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI);
2035    emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
2036  }
2037
2038  if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
2039      RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
2040    // Tail call return: adjust the stack pointer and jump to callee.
2041    MBBI = prior(MBB.end());
2042    MachineOperand &JumpTarget = MBBI->getOperand(0);
2043
2044    // Jump to label or value in register.
2045    if (RetOpcode == ARM::TCRETURNdi) {
2046      BuildMI(MBB, MBBI, dl,
2047            TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)).
2048        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
2049                         JumpTarget.getTargetFlags());
2050    } else if (RetOpcode == ARM::TCRETURNdiND) {
2051      BuildMI(MBB, MBBI, dl,
2052            TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
2053        addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
2054                         JumpTarget.getTargetFlags());
2055    } else if (RetOpcode == ARM::TCRETURNri) {
2056      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
2057        addReg(JumpTarget.getReg(), RegState::Kill);
2058    } else if (RetOpcode == ARM::TCRETURNriND) {
2059      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
2060        addReg(JumpTarget.getReg(), RegState::Kill);
2061    }
2062
2063    MachineInstr *NewMI = prior(MBBI);
2064    for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
2065      NewMI->addOperand(MBBI->getOperand(i));
2066
2067    // Delete the pseudo instruction TCRETURN.
2068    MBB.erase(MBBI);
2069  }
2070
2071  if (VARegSaveSize)
2072    emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
2073}
2074
2075#include "ARMGenRegisterInfo.inc"
2076