ARMBaseRegisterInfo.cpp revision 68eec39bca280f98bef1256a5e89531ac1a77d1a
1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the base ARM implementation of TargetRegisterInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARM.h" 15#include "ARMAddressingModes.h" 16#include "ARMBaseInstrInfo.h" 17#include "ARMBaseRegisterInfo.h" 18#include "ARMInstrInfo.h" 19#include "ARMMachineFunctionInfo.h" 20#include "ARMSubtarget.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/LLVMContext.h" 25#include "llvm/CodeGen/MachineConstantPool.h" 26#include "llvm/CodeGen/MachineFrameInfo.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineInstrBuilder.h" 29#include "llvm/CodeGen/MachineLocation.h" 30#include "llvm/CodeGen/MachineRegisterInfo.h" 31#include "llvm/CodeGen/RegisterScavenging.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/raw_ostream.h" 35#include "llvm/Target/TargetFrameInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/SmallVector.h" 40#include "llvm/Support/CommandLine.h" 41 42namespace llvm { 43cl::opt<bool> 44ReuseFrameIndexVals("arm-reuse-frame-index-vals", cl::Hidden, cl::init(true), 45 cl::desc("Reuse repeated frame index values")); 46} 47 48using namespace llvm; 49 50unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned RegEnum, 51 bool *isSPVFP) { 52 if (isSPVFP) 53 *isSPVFP = false; 54 55 using namespace ARM; 56 switch (RegEnum) { 57 default: 58 llvm_unreachable("Unknown ARM register!"); 59 case R0: case D0: case Q0: return 0; 60 case R1: case D1: case Q1: return 1; 61 case R2: case D2: case Q2: return 2; 62 case R3: case D3: case Q3: return 3; 63 case R4: case D4: case Q4: return 4; 64 case R5: case D5: case Q5: return 5; 65 case R6: case D6: case Q6: return 6; 66 case R7: case D7: case Q7: return 7; 67 case R8: case D8: case Q8: return 8; 68 case R9: case D9: case Q9: return 9; 69 case R10: case D10: case Q10: return 10; 70 case R11: case D11: case Q11: return 11; 71 case R12: case D12: case Q12: return 12; 72 case SP: case D13: case Q13: return 13; 73 case LR: case D14: case Q14: return 14; 74 case PC: case D15: case Q15: return 15; 75 76 case D16: return 16; 77 case D17: return 17; 78 case D18: return 18; 79 case D19: return 19; 80 case D20: return 20; 81 case D21: return 21; 82 case D22: return 22; 83 case D23: return 23; 84 case D24: return 24; 85 case D25: return 25; 86 case D26: return 26; 87 case D27: return 27; 88 case D28: return 28; 89 case D29: return 29; 90 case D30: return 30; 91 case D31: return 31; 92 93 case S0: case S1: case S2: case S3: 94 case S4: case S5: case S6: case S7: 95 case S8: case S9: case S10: case S11: 96 case S12: case S13: case S14: case S15: 97 case S16: case S17: case S18: case S19: 98 case S20: case S21: case S22: case S23: 99 case S24: case S25: case S26: case S27: 100 case S28: case S29: case S30: case S31: { 101 if (isSPVFP) 102 *isSPVFP = true; 103 switch (RegEnum) { 104 default: return 0; // Avoid compile time warning. 105 case S0: return 0; 106 case S1: return 1; 107 case S2: return 2; 108 case S3: return 3; 109 case S4: return 4; 110 case S5: return 5; 111 case S6: return 6; 112 case S7: return 7; 113 case S8: return 8; 114 case S9: return 9; 115 case S10: return 10; 116 case S11: return 11; 117 case S12: return 12; 118 case S13: return 13; 119 case S14: return 14; 120 case S15: return 15; 121 case S16: return 16; 122 case S17: return 17; 123 case S18: return 18; 124 case S19: return 19; 125 case S20: return 20; 126 case S21: return 21; 127 case S22: return 22; 128 case S23: return 23; 129 case S24: return 24; 130 case S25: return 25; 131 case S26: return 26; 132 case S27: return 27; 133 case S28: return 28; 134 case S29: return 29; 135 case S30: return 30; 136 case S31: return 31; 137 } 138 } 139 } 140} 141 142ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii, 143 const ARMSubtarget &sti) 144 : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 145 TII(tii), STI(sti), 146 FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11) { 147} 148 149const unsigned* 150ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 151 static const unsigned CalleeSavedRegs[] = { 152 ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8, 153 ARM::R7, ARM::R6, ARM::R5, ARM::R4, 154 155 ARM::D15, ARM::D14, ARM::D13, ARM::D12, 156 ARM::D11, ARM::D10, ARM::D9, ARM::D8, 157 0 158 }; 159 160 static const unsigned DarwinCalleeSavedRegs[] = { 161 // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved 162 // register. 163 ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4, 164 ARM::R11, ARM::R10, ARM::R8, 165 166 ARM::D15, ARM::D14, ARM::D13, ARM::D12, 167 ARM::D11, ARM::D10, ARM::D9, ARM::D8, 168 0 169 }; 170 return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs; 171} 172 173BitVector ARMBaseRegisterInfo:: 174getReservedRegs(const MachineFunction &MF) const { 175 // FIXME: avoid re-calculating this everytime. 176 BitVector Reserved(getNumRegs()); 177 Reserved.set(ARM::SP); 178 Reserved.set(ARM::PC); 179 if (STI.isTargetDarwin() || hasFP(MF)) 180 Reserved.set(FramePtr); 181 // Some targets reserve R9. 182 if (STI.isR9Reserved()) 183 Reserved.set(ARM::R9); 184 return Reserved; 185} 186 187bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF, 188 unsigned Reg) const { 189 switch (Reg) { 190 default: break; 191 case ARM::SP: 192 case ARM::PC: 193 return true; 194 case ARM::R7: 195 case ARM::R11: 196 if (FramePtr == Reg && (STI.isTargetDarwin() || hasFP(MF))) 197 return true; 198 break; 199 case ARM::R9: 200 return STI.isR9Reserved(); 201 } 202 203 return false; 204} 205 206const TargetRegisterClass * 207ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 208 const TargetRegisterClass *B, 209 unsigned SubIdx) const { 210 switch (SubIdx) { 211 default: return 0; 212 case ARM::ssub_0: 213 case ARM::ssub_1: 214 case ARM::ssub_2: 215 case ARM::ssub_3: { 216 // S sub-registers. 217 if (A->getSize() == 8) { 218 if (B == &ARM::SPR_8RegClass) 219 return &ARM::DPR_8RegClass; 220 assert(B == &ARM::SPRRegClass && "Expecting SPR register class!"); 221 if (A == &ARM::DPR_8RegClass) 222 return A; 223 return &ARM::DPR_VFP2RegClass; 224 } 225 226 if (A->getSize() == 16) { 227 if (B == &ARM::SPR_8RegClass) 228 return &ARM::QPR_8RegClass; 229 return &ARM::QPR_VFP2RegClass; 230 } 231 232 if (A->getSize() == 32) { 233 if (B == &ARM::SPR_8RegClass) 234 return 0; // Do not allow coalescing! 235 return &ARM::QQPR_VFP2RegClass; 236 } 237 238 assert(A->getSize() == 64 && "Expecting a QQQQ register class!"); 239 return 0; // Do not allow coalescing! 240 } 241 case ARM::dsub_0: 242 case ARM::dsub_1: 243 case ARM::dsub_2: 244 case ARM::dsub_3: { 245 // D sub-registers. 246 if (A->getSize() == 16) { 247 if (B == &ARM::DPR_VFP2RegClass) 248 return &ARM::QPR_VFP2RegClass; 249 if (B == &ARM::DPR_8RegClass) 250 return 0; // Do not allow coalescing! 251 return A; 252 } 253 254 if (A->getSize() == 32) { 255 if (B == &ARM::DPR_VFP2RegClass) 256 return &ARM::QQPR_VFP2RegClass; 257 if (B == &ARM::DPR_8RegClass) 258 return 0; // Do not allow coalescing! 259 return A; 260 } 261 262 assert(A->getSize() == 64 && "Expecting a QQQQ register class!"); 263 if (B != &ARM::DPRRegClass) 264 return 0; // Do not allow coalescing! 265 return A; 266 } 267 case ARM::dsub_4: 268 case ARM::dsub_5: 269 case ARM::dsub_6: 270 case ARM::dsub_7: { 271 // D sub-registers of QQQQ registers. 272 if (A->getSize() == 64 && B == &ARM::DPRRegClass) 273 return A; 274 return 0; // Do not allow coalescing! 275 } 276 277 case ARM::qsub_0: 278 case ARM::qsub_1: { 279 // Q sub-registers. 280 if (A->getSize() == 32) { 281 if (B == &ARM::QPR_VFP2RegClass) 282 return &ARM::QQPR_VFP2RegClass; 283 if (B == &ARM::QPR_8RegClass) 284 return 0; // Do not allow coalescing! 285 return A; 286 } 287 288 assert(A->getSize() == 64 && "Expecting a QQQQ register class!"); 289 if (B == &ARM::QPRRegClass) 290 return A; 291 return 0; // Do not allow coalescing! 292 } 293 case ARM::qsub_2: 294 case ARM::qsub_3: { 295 // Q sub-registers of QQQQ registers. 296 if (A->getSize() == 64 && B == &ARM::QPRRegClass) 297 return A; 298 return 0; // Do not allow coalescing! 299 } 300 } 301 return 0; 302} 303 304bool 305ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC, 306 SmallVectorImpl<unsigned> &SubIndices, 307 unsigned &NewSubIdx) const { 308 309 unsigned Size = RC->getSize() * 8; 310 if (Size < 6) 311 return 0; 312 313 NewSubIdx = 0; // Whole register. 314 unsigned NumRegs = SubIndices.size(); 315 if (NumRegs == 8) { 316 // 8 D registers -> 1 QQQQ register. 317 return (Size == 512 && 318 SubIndices[0] == ARM::dsub_0 && 319 SubIndices[1] == ARM::dsub_1 && 320 SubIndices[2] == ARM::dsub_2 && 321 SubIndices[3] == ARM::dsub_3 && 322 SubIndices[4] == ARM::dsub_4 && 323 SubIndices[5] == ARM::dsub_5 && 324 SubIndices[6] == ARM::dsub_6 && 325 SubIndices[7] == ARM::dsub_7); 326 } else if (NumRegs == 4) { 327 if (SubIndices[0] == ARM::qsub_0) { 328 // 4 Q registers -> 1 QQQQ register. 329 return (Size == 512 && 330 SubIndices[1] == ARM::qsub_1 && 331 SubIndices[2] == ARM::qsub_2 && 332 SubIndices[3] == ARM::qsub_3); 333 } else if (SubIndices[0] == ARM::dsub_0) { 334 // 4 D registers -> 1 QQ register. 335 if (Size >= 256 && 336 SubIndices[1] == ARM::dsub_1 && 337 SubIndices[2] == ARM::dsub_2 && 338 SubIndices[3] == ARM::dsub_3) { 339 if (Size == 512) 340 NewSubIdx = ARM::qqsub_0; 341 return true; 342 } 343 } else if (SubIndices[0] == ARM::dsub_4) { 344 // 4 D registers -> 1 QQ register (2nd). 345 if (Size == 512 && 346 SubIndices[1] == ARM::dsub_5 && 347 SubIndices[2] == ARM::dsub_6 && 348 SubIndices[3] == ARM::dsub_7) { 349 NewSubIdx = ARM::qqsub_1; 350 return true; 351 } 352 } else if (SubIndices[0] == ARM::ssub_0) { 353 // 4 S registers -> 1 Q register. 354 if (Size >= 128 && 355 SubIndices[1] == ARM::ssub_1 && 356 SubIndices[2] == ARM::ssub_2 && 357 SubIndices[3] == ARM::ssub_3) { 358 if (Size >= 256) 359 NewSubIdx = ARM::qsub_0; 360 return true; 361 } 362 } 363 } else if (NumRegs == 2) { 364 if (SubIndices[0] == ARM::qsub_0) { 365 // 2 Q registers -> 1 QQ register. 366 if (Size >= 256 && SubIndices[1] == ARM::qsub_1) { 367 if (Size == 512) 368 NewSubIdx = ARM::qqsub_0; 369 return true; 370 } 371 } else if (SubIndices[0] == ARM::qsub_2) { 372 // 2 Q registers -> 1 QQ register (2nd). 373 if (Size == 512 && SubIndices[1] == ARM::qsub_3) { 374 NewSubIdx = ARM::qqsub_1; 375 return true; 376 } 377 } else if (SubIndices[0] == ARM::dsub_0) { 378 // 2 D registers -> 1 Q register. 379 if (Size >= 128 && SubIndices[1] == ARM::dsub_1) { 380 if (Size >= 256) 381 NewSubIdx = ARM::qsub_0; 382 return true; 383 } 384 } else if (SubIndices[0] == ARM::dsub_2) { 385 // 2 D registers -> 1 Q register (2nd). 386 if (Size >= 256 && SubIndices[1] == ARM::dsub_3) { 387 NewSubIdx = ARM::qsub_1; 388 return true; 389 } 390 } else if (SubIndices[0] == ARM::dsub_4) { 391 // 2 D registers -> 1 Q register (3rd). 392 if (Size == 512 && SubIndices[1] == ARM::dsub_5) { 393 NewSubIdx = ARM::qsub_2; 394 return true; 395 } 396 } else if (SubIndices[0] == ARM::dsub_6) { 397 // 2 D registers -> 1 Q register (3rd). 398 if (Size == 512 && SubIndices[1] == ARM::dsub_7) { 399 NewSubIdx = ARM::qsub_3; 400 return true; 401 } 402 } else if (SubIndices[0] == ARM::ssub_0) { 403 // 2 S registers -> 1 D register. 404 if (SubIndices[1] == ARM::ssub_1) { 405 if (Size >= 128) 406 NewSubIdx = ARM::dsub_0; 407 return true; 408 } 409 } else if (SubIndices[0] == ARM::ssub_2) { 410 // 2 S registers -> 1 D register (2nd). 411 if (Size >= 128 && SubIndices[1] == ARM::ssub_3) { 412 NewSubIdx = ARM::dsub_1; 413 return true; 414 } 415 } 416 } 417 return false; 418} 419 420 421const TargetRegisterClass * 422ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const { 423 return ARM::GPRRegisterClass; 424} 425 426/// getAllocationOrder - Returns the register allocation order for a specified 427/// register class in the form of a pair of TargetRegisterClass iterators. 428std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator> 429ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC, 430 unsigned HintType, unsigned HintReg, 431 const MachineFunction &MF) const { 432 // Alternative register allocation orders when favoring even / odd registers 433 // of register pairs. 434 435 // No FP, R9 is available. 436 static const unsigned GPREven1[] = { 437 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10, 438 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, 439 ARM::R9, ARM::R11 440 }; 441 static const unsigned GPROdd1[] = { 442 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11, 443 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, 444 ARM::R8, ARM::R10 445 }; 446 447 // FP is R7, R9 is available. 448 static const unsigned GPREven2[] = { 449 ARM::R0, ARM::R2, ARM::R4, ARM::R8, ARM::R10, 450 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, 451 ARM::R9, ARM::R11 452 }; 453 static const unsigned GPROdd2[] = { 454 ARM::R1, ARM::R3, ARM::R5, ARM::R9, ARM::R11, 455 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, 456 ARM::R8, ARM::R10 457 }; 458 459 // FP is R11, R9 is available. 460 static const unsigned GPREven3[] = { 461 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, 462 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, 463 ARM::R9 464 }; 465 static const unsigned GPROdd3[] = { 466 ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9, 467 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7, 468 ARM::R8 469 }; 470 471 // No FP, R9 is not available. 472 static const unsigned GPREven4[] = { 473 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R10, 474 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8, 475 ARM::R11 476 }; 477 static const unsigned GPROdd4[] = { 478 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R11, 479 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8, 480 ARM::R10 481 }; 482 483 // FP is R7, R9 is not available. 484 static const unsigned GPREven5[] = { 485 ARM::R0, ARM::R2, ARM::R4, ARM::R10, 486 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8, 487 ARM::R11 488 }; 489 static const unsigned GPROdd5[] = { 490 ARM::R1, ARM::R3, ARM::R5, ARM::R11, 491 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8, 492 ARM::R10 493 }; 494 495 // FP is R11, R9 is not available. 496 static const unsigned GPREven6[] = { 497 ARM::R0, ARM::R2, ARM::R4, ARM::R6, 498 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8 499 }; 500 static const unsigned GPROdd6[] = { 501 ARM::R1, ARM::R3, ARM::R5, ARM::R7, 502 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8 503 }; 504 505 506 if (HintType == ARMRI::RegPairEven) { 507 if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0) 508 // It's no longer possible to fulfill this hint. Return the default 509 // allocation order. 510 return std::make_pair(RC->allocation_order_begin(MF), 511 RC->allocation_order_end(MF)); 512 513 if (!STI.isTargetDarwin() && !hasFP(MF)) { 514 if (!STI.isR9Reserved()) 515 return std::make_pair(GPREven1, 516 GPREven1 + (sizeof(GPREven1)/sizeof(unsigned))); 517 else 518 return std::make_pair(GPREven4, 519 GPREven4 + (sizeof(GPREven4)/sizeof(unsigned))); 520 } else if (FramePtr == ARM::R7) { 521 if (!STI.isR9Reserved()) 522 return std::make_pair(GPREven2, 523 GPREven2 + (sizeof(GPREven2)/sizeof(unsigned))); 524 else 525 return std::make_pair(GPREven5, 526 GPREven5 + (sizeof(GPREven5)/sizeof(unsigned))); 527 } else { // FramePtr == ARM::R11 528 if (!STI.isR9Reserved()) 529 return std::make_pair(GPREven3, 530 GPREven3 + (sizeof(GPREven3)/sizeof(unsigned))); 531 else 532 return std::make_pair(GPREven6, 533 GPREven6 + (sizeof(GPREven6)/sizeof(unsigned))); 534 } 535 } else if (HintType == ARMRI::RegPairOdd) { 536 if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0) 537 // It's no longer possible to fulfill this hint. Return the default 538 // allocation order. 539 return std::make_pair(RC->allocation_order_begin(MF), 540 RC->allocation_order_end(MF)); 541 542 if (!STI.isTargetDarwin() && !hasFP(MF)) { 543 if (!STI.isR9Reserved()) 544 return std::make_pair(GPROdd1, 545 GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned))); 546 else 547 return std::make_pair(GPROdd4, 548 GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned))); 549 } else if (FramePtr == ARM::R7) { 550 if (!STI.isR9Reserved()) 551 return std::make_pair(GPROdd2, 552 GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned))); 553 else 554 return std::make_pair(GPROdd5, 555 GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned))); 556 } else { // FramePtr == ARM::R11 557 if (!STI.isR9Reserved()) 558 return std::make_pair(GPROdd3, 559 GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned))); 560 else 561 return std::make_pair(GPROdd6, 562 GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned))); 563 } 564 } 565 return std::make_pair(RC->allocation_order_begin(MF), 566 RC->allocation_order_end(MF)); 567} 568 569/// ResolveRegAllocHint - Resolves the specified register allocation hint 570/// to a physical register. Returns the physical register if it is successful. 571unsigned 572ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg, 573 const MachineFunction &MF) const { 574 if (Reg == 0 || !isPhysicalRegister(Reg)) 575 return 0; 576 if (Type == 0) 577 return Reg; 578 else if (Type == (unsigned)ARMRI::RegPairOdd) 579 // Odd register. 580 return getRegisterPairOdd(Reg, MF); 581 else if (Type == (unsigned)ARMRI::RegPairEven) 582 // Even register. 583 return getRegisterPairEven(Reg, MF); 584 return 0; 585} 586 587void 588ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg, 589 MachineFunction &MF) const { 590 MachineRegisterInfo *MRI = &MF.getRegInfo(); 591 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); 592 if ((Hint.first == (unsigned)ARMRI::RegPairOdd || 593 Hint.first == (unsigned)ARMRI::RegPairEven) && 594 Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) { 595 // If 'Reg' is one of the even / odd register pair and it's now changed 596 // (e.g. coalesced) into a different register. The other register of the 597 // pair allocation hint must be updated to reflect the relationship 598 // change. 599 unsigned OtherReg = Hint.second; 600 Hint = MRI->getRegAllocationHint(OtherReg); 601 if (Hint.second == Reg) 602 // Make sure the pair has not already divorced. 603 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 604 } 605} 606 607/// hasFP - Return true if the specified function should have a dedicated frame 608/// pointer register. This is true if the function has variable sized allocas 609/// or if frame pointer elimination is disabled. 610/// 611bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const { 612 const MachineFrameInfo *MFI = MF.getFrameInfo(); 613 return ((DisableFramePointerElim(MF) && MFI->adjustsStack())|| 614 needsStackRealignment(MF) || 615 MFI->hasVarSizedObjects() || 616 MFI->isFrameAddressTaken()); 617} 618 619bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 620 const MachineFrameInfo *MFI = MF.getFrameInfo(); 621 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 622 return (RealignStack && 623 !AFI->isThumb1OnlyFunction() && 624 !MFI->hasVarSizedObjects()); 625} 626 627bool ARMBaseRegisterInfo:: 628needsStackRealignment(const MachineFunction &MF) const { 629 const MachineFrameInfo *MFI = MF.getFrameInfo(); 630 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 631 unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment(); 632 return (RealignStack && 633 !AFI->isThumb1OnlyFunction() && 634 (MFI->getMaxAlignment() > StackAlign) && 635 !MFI->hasVarSizedObjects()); 636} 637 638bool ARMBaseRegisterInfo:: 639cannotEliminateFrame(const MachineFunction &MF) const { 640 const MachineFrameInfo *MFI = MF.getFrameInfo(); 641 if (DisableFramePointerElim(MF) && MFI->adjustsStack()) 642 return true; 643 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() 644 || needsStackRealignment(MF); 645} 646 647/// estimateStackSize - Estimate and return the size of the frame. 648static unsigned estimateStackSize(MachineFunction &MF) { 649 const MachineFrameInfo *FFI = MF.getFrameInfo(); 650 int Offset = 0; 651 for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) { 652 int FixedOff = -FFI->getObjectOffset(i); 653 if (FixedOff > Offset) Offset = FixedOff; 654 } 655 for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) { 656 if (FFI->isDeadObjectIndex(i)) 657 continue; 658 Offset += FFI->getObjectSize(i); 659 unsigned Align = FFI->getObjectAlignment(i); 660 // Adjust to alignment boundary 661 Offset = (Offset+Align-1)/Align*Align; 662 } 663 return (unsigned)Offset; 664} 665 666/// estimateRSStackSizeLimit - Look at each instruction that references stack 667/// frames and return the stack size limit beyond which some of these 668/// instructions will require a scratch register during their expansion later. 669unsigned 670ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const { 671 unsigned Limit = (1 << 12) - 1; 672 for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) { 673 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); 674 I != E; ++I) { 675 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 676 if (!I->getOperand(i).isFI()) continue; 677 678 // When using ADDri to get the address of a stack object, 255 is the 679 // largest offset guaranteed to fit in the immediate offset. 680 if (I->getOpcode() == ARM::ADDri) { 681 Limit = std::min(Limit, (1U << 8) - 1); 682 break; 683 } 684 685 // Otherwise check the addressing mode. 686 switch (I->getDesc().TSFlags & ARMII::AddrModeMask) { 687 case ARMII::AddrMode3: 688 case ARMII::AddrModeT2_i8: 689 Limit = std::min(Limit, (1U << 8) - 1); 690 break; 691 case ARMII::AddrMode5: 692 case ARMII::AddrModeT2_i8s4: 693 Limit = std::min(Limit, ((1U << 8) - 1) * 4); 694 break; 695 case ARMII::AddrModeT2_i12: 696 if (hasFP(MF)) Limit = std::min(Limit, (1U << 8) - 1); 697 break; 698 case ARMII::AddrMode6: 699 // Addressing mode 6 (load/store) instructions can't encode an 700 // immediate offset for stack references. 701 return 0; 702 default: 703 break; 704 } 705 break; // At most one FI per instruction 706 } 707 } 708 } 709 710 return Limit; 711} 712 713void 714ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 715 RegScavenger *RS) const { 716 // This tells PEI to spill the FP as if it is any other callee-save register 717 // to take advantage the eliminateFrameIndex machinery. This also ensures it 718 // is spilled in the order specified by getCalleeSavedRegs() to make it easier 719 // to combine multiple loads / stores. 720 bool CanEliminateFrame = true; 721 bool CS1Spilled = false; 722 bool LRSpilled = false; 723 unsigned NumGPRSpills = 0; 724 SmallVector<unsigned, 4> UnspilledCS1GPRs; 725 SmallVector<unsigned, 4> UnspilledCS2GPRs; 726 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 727 MachineFrameInfo *MFI = MF.getFrameInfo(); 728 729 // Spill R4 if Thumb2 function requires stack realignment - it will be used as 730 // scratch register. 731 // FIXME: It will be better just to find spare register here. 732 if (needsStackRealignment(MF) && 733 AFI->isThumb2Function()) 734 MF.getRegInfo().setPhysRegUsed(ARM::R4); 735 736 // Spill LR if Thumb1 function uses variable length argument lists. 737 if (AFI->isThumb1OnlyFunction() && AFI->getVarArgsRegSaveSize() > 0) 738 MF.getRegInfo().setPhysRegUsed(ARM::LR); 739 740 // Don't spill FP if the frame can be eliminated. This is determined 741 // by scanning the callee-save registers to see if any is used. 742 const unsigned *CSRegs = getCalleeSavedRegs(); 743 for (unsigned i = 0; CSRegs[i]; ++i) { 744 unsigned Reg = CSRegs[i]; 745 bool Spilled = false; 746 if (MF.getRegInfo().isPhysRegUsed(Reg)) { 747 AFI->setCSRegisterIsSpilled(Reg); 748 Spilled = true; 749 CanEliminateFrame = false; 750 } else { 751 // Check alias registers too. 752 for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) { 753 if (MF.getRegInfo().isPhysRegUsed(*Aliases)) { 754 Spilled = true; 755 CanEliminateFrame = false; 756 } 757 } 758 } 759 760 if (!ARM::GPRRegisterClass->contains(Reg)) 761 continue; 762 763 if (Spilled) { 764 NumGPRSpills++; 765 766 if (!STI.isTargetDarwin()) { 767 if (Reg == ARM::LR) 768 LRSpilled = true; 769 CS1Spilled = true; 770 continue; 771 } 772 773 // Keep track if LR and any of R4, R5, R6, and R7 is spilled. 774 switch (Reg) { 775 case ARM::LR: 776 LRSpilled = true; 777 // Fallthrough 778 case ARM::R4: 779 case ARM::R5: 780 case ARM::R6: 781 case ARM::R7: 782 CS1Spilled = true; 783 break; 784 default: 785 break; 786 } 787 } else { 788 if (!STI.isTargetDarwin()) { 789 UnspilledCS1GPRs.push_back(Reg); 790 continue; 791 } 792 793 switch (Reg) { 794 case ARM::R4: 795 case ARM::R5: 796 case ARM::R6: 797 case ARM::R7: 798 case ARM::LR: 799 UnspilledCS1GPRs.push_back(Reg); 800 break; 801 default: 802 UnspilledCS2GPRs.push_back(Reg); 803 break; 804 } 805 } 806 } 807 808 bool ForceLRSpill = false; 809 if (!LRSpilled && AFI->isThumb1OnlyFunction()) { 810 unsigned FnSize = TII.GetFunctionSizeInBytes(MF); 811 // Force LR to be spilled if the Thumb function size is > 2048. This enables 812 // use of BL to implement far jump. If it turns out that it's not needed 813 // then the branch fix up path will undo it. 814 if (FnSize >= (1 << 11)) { 815 CanEliminateFrame = false; 816 ForceLRSpill = true; 817 } 818 } 819 820 // If any of the stack slot references may be out of range of an immediate 821 // offset, make sure a register (or a spill slot) is available for the 822 // register scavenger. Note that if we're indexing off the frame pointer, the 823 // effective stack size is 4 bytes larger since the FP points to the stack 824 // slot of the previous FP. Also, if we have variable sized objects in the 825 // function, stack slot references will often be negative, and some of 826 // our instructions are positive-offset only, so conservatively consider 827 // that case to want a spill slot (or register) as well. 828 // FIXME: We could add logic to be more precise about negative offsets 829 // and which instructions will need a scratch register for them. Is it 830 // worth the effort and added fragility? 831 bool BigStack = 832 (RS && (estimateStackSize(MF) + (hasFP(MF) ? 4:0) >= 833 estimateRSStackSizeLimit(MF))) || MFI->hasVarSizedObjects(); 834 835 bool ExtraCSSpill = false; 836 if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) { 837 AFI->setHasStackFrame(true); 838 839 // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled. 840 // Spill LR as well so we can fold BX_RET to the registers restore (LDM). 841 if (!LRSpilled && CS1Spilled) { 842 MF.getRegInfo().setPhysRegUsed(ARM::LR); 843 AFI->setCSRegisterIsSpilled(ARM::LR); 844 NumGPRSpills++; 845 UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(), 846 UnspilledCS1GPRs.end(), (unsigned)ARM::LR)); 847 ForceLRSpill = false; 848 ExtraCSSpill = true; 849 } 850 851 // Darwin ABI requires FP to point to the stack slot that contains the 852 // previous FP. 853 if (STI.isTargetDarwin() || hasFP(MF)) { 854 MF.getRegInfo().setPhysRegUsed(FramePtr); 855 NumGPRSpills++; 856 } 857 858 // If stack and double are 8-byte aligned and we are spilling an odd number 859 // of GPRs. Spill one extra callee save GPR so we won't have to pad between 860 // the integer and double callee save areas. 861 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment(); 862 if (TargetAlign == 8 && (NumGPRSpills & 1)) { 863 if (CS1Spilled && !UnspilledCS1GPRs.empty()) { 864 for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) { 865 unsigned Reg = UnspilledCS1GPRs[i]; 866 // Don't spill high register if the function is thumb1 867 if (!AFI->isThumb1OnlyFunction() || 868 isARMLowRegister(Reg) || Reg == ARM::LR) { 869 MF.getRegInfo().setPhysRegUsed(Reg); 870 AFI->setCSRegisterIsSpilled(Reg); 871 if (!isReservedReg(MF, Reg)) 872 ExtraCSSpill = true; 873 break; 874 } 875 } 876 } else if (!UnspilledCS2GPRs.empty() && 877 !AFI->isThumb1OnlyFunction()) { 878 unsigned Reg = UnspilledCS2GPRs.front(); 879 MF.getRegInfo().setPhysRegUsed(Reg); 880 AFI->setCSRegisterIsSpilled(Reg); 881 if (!isReservedReg(MF, Reg)) 882 ExtraCSSpill = true; 883 } 884 } 885 886 // Estimate if we might need to scavenge a register at some point in order 887 // to materialize a stack offset. If so, either spill one additional 888 // callee-saved register or reserve a special spill slot to facilitate 889 // register scavenging. Thumb1 needs a spill slot for stack pointer 890 // adjustments also, even when the frame itself is small. 891 if (BigStack && !ExtraCSSpill) { 892 // If any non-reserved CS register isn't spilled, just spill one or two 893 // extra. That should take care of it! 894 unsigned NumExtras = TargetAlign / 4; 895 SmallVector<unsigned, 2> Extras; 896 while (NumExtras && !UnspilledCS1GPRs.empty()) { 897 unsigned Reg = UnspilledCS1GPRs.back(); 898 UnspilledCS1GPRs.pop_back(); 899 if (!isReservedReg(MF, Reg) && 900 (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) || 901 Reg == ARM::LR)) { 902 Extras.push_back(Reg); 903 NumExtras--; 904 } 905 } 906 // For non-Thumb1 functions, also check for hi-reg CS registers 907 if (!AFI->isThumb1OnlyFunction()) { 908 while (NumExtras && !UnspilledCS2GPRs.empty()) { 909 unsigned Reg = UnspilledCS2GPRs.back(); 910 UnspilledCS2GPRs.pop_back(); 911 if (!isReservedReg(MF, Reg)) { 912 Extras.push_back(Reg); 913 NumExtras--; 914 } 915 } 916 } 917 if (Extras.size() && NumExtras == 0) { 918 for (unsigned i = 0, e = Extras.size(); i != e; ++i) { 919 MF.getRegInfo().setPhysRegUsed(Extras[i]); 920 AFI->setCSRegisterIsSpilled(Extras[i]); 921 } 922 } else if (!AFI->isThumb1OnlyFunction()) { 923 // note: Thumb1 functions spill to R12, not the stack. Reserve a slot 924 // closest to SP or frame pointer. 925 const TargetRegisterClass *RC = ARM::GPRRegisterClass; 926 RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), 927 RC->getAlignment(), 928 false)); 929 } 930 } 931 } 932 933 if (ForceLRSpill) { 934 MF.getRegInfo().setPhysRegUsed(ARM::LR); 935 AFI->setCSRegisterIsSpilled(ARM::LR); 936 AFI->setLRIsSpilledForFarJump(true); 937 } 938} 939 940unsigned ARMBaseRegisterInfo::getRARegister() const { 941 return ARM::LR; 942} 943 944unsigned 945ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 946 if (STI.isTargetDarwin() || hasFP(MF)) 947 return FramePtr; 948 return ARM::SP; 949} 950 951int 952ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI, 953 unsigned &FrameReg) const { 954 const MachineFrameInfo *MFI = MF.getFrameInfo(); 955 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 956 int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize(); 957 bool isFixed = MFI->isFixedObjectIndex(FI); 958 959 FrameReg = ARM::SP; 960 if (AFI->isGPRCalleeSavedArea1Frame(FI)) 961 Offset -= AFI->getGPRCalleeSavedArea1Offset(); 962 else if (AFI->isGPRCalleeSavedArea2Frame(FI)) 963 Offset -= AFI->getGPRCalleeSavedArea2Offset(); 964 else if (AFI->isDPRCalleeSavedAreaFrame(FI)) 965 Offset -= AFI->getDPRCalleeSavedAreaOffset(); 966 else if (needsStackRealignment(MF)) { 967 // When dynamically realigning the stack, use the frame pointer for 968 // parameters, and the stack pointer for locals. 969 assert (hasFP(MF) && "dynamic stack realignment without a FP!"); 970 if (isFixed) { 971 FrameReg = getFrameRegister(MF); 972 Offset -= AFI->getFramePtrSpillOffset(); 973 } 974 } else if (hasFP(MF) && AFI->hasStackFrame()) { 975 if (isFixed || MFI->hasVarSizedObjects()) { 976 // Use frame pointer to reference fixed objects unless this is a 977 // frameless function. 978 FrameReg = getFrameRegister(MF); 979 Offset -= AFI->getFramePtrSpillOffset(); 980 } else if (AFI->isThumb2Function()) { 981 // In Thumb2 mode, the negative offset is very limited. 982 int FPOffset = Offset - AFI->getFramePtrSpillOffset(); 983 if (FPOffset >= -255 && FPOffset < 0) { 984 FrameReg = getFrameRegister(MF); 985 Offset = FPOffset; 986 } 987 } 988 } 989 return Offset; 990} 991 992 993int 994ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF, 995 int FI) const { 996 unsigned FrameReg; 997 return getFrameIndexReference(MF, FI, FrameReg); 998} 999 1000unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const { 1001 llvm_unreachable("What is the exception register"); 1002 return 0; 1003} 1004 1005unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const { 1006 llvm_unreachable("What is the exception handler register"); 1007 return 0; 1008} 1009 1010int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const { 1011 return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0); 1012} 1013 1014unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg, 1015 const MachineFunction &MF) const { 1016 switch (Reg) { 1017 default: break; 1018 // Return 0 if either register of the pair is a special register. 1019 // So no R12, etc. 1020 case ARM::R1: 1021 return ARM::R0; 1022 case ARM::R3: 1023 return ARM::R2; 1024 case ARM::R5: 1025 return ARM::R4; 1026 case ARM::R7: 1027 return isReservedReg(MF, ARM::R7) ? 0 : ARM::R6; 1028 case ARM::R9: 1029 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8; 1030 case ARM::R11: 1031 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10; 1032 1033 case ARM::S1: 1034 return ARM::S0; 1035 case ARM::S3: 1036 return ARM::S2; 1037 case ARM::S5: 1038 return ARM::S4; 1039 case ARM::S7: 1040 return ARM::S6; 1041 case ARM::S9: 1042 return ARM::S8; 1043 case ARM::S11: 1044 return ARM::S10; 1045 case ARM::S13: 1046 return ARM::S12; 1047 case ARM::S15: 1048 return ARM::S14; 1049 case ARM::S17: 1050 return ARM::S16; 1051 case ARM::S19: 1052 return ARM::S18; 1053 case ARM::S21: 1054 return ARM::S20; 1055 case ARM::S23: 1056 return ARM::S22; 1057 case ARM::S25: 1058 return ARM::S24; 1059 case ARM::S27: 1060 return ARM::S26; 1061 case ARM::S29: 1062 return ARM::S28; 1063 case ARM::S31: 1064 return ARM::S30; 1065 1066 case ARM::D1: 1067 return ARM::D0; 1068 case ARM::D3: 1069 return ARM::D2; 1070 case ARM::D5: 1071 return ARM::D4; 1072 case ARM::D7: 1073 return ARM::D6; 1074 case ARM::D9: 1075 return ARM::D8; 1076 case ARM::D11: 1077 return ARM::D10; 1078 case ARM::D13: 1079 return ARM::D12; 1080 case ARM::D15: 1081 return ARM::D14; 1082 case ARM::D17: 1083 return ARM::D16; 1084 case ARM::D19: 1085 return ARM::D18; 1086 case ARM::D21: 1087 return ARM::D20; 1088 case ARM::D23: 1089 return ARM::D22; 1090 case ARM::D25: 1091 return ARM::D24; 1092 case ARM::D27: 1093 return ARM::D26; 1094 case ARM::D29: 1095 return ARM::D28; 1096 case ARM::D31: 1097 return ARM::D30; 1098 } 1099 1100 return 0; 1101} 1102 1103unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg, 1104 const MachineFunction &MF) const { 1105 switch (Reg) { 1106 default: break; 1107 // Return 0 if either register of the pair is a special register. 1108 // So no R12, etc. 1109 case ARM::R0: 1110 return ARM::R1; 1111 case ARM::R2: 1112 return ARM::R3; 1113 case ARM::R4: 1114 return ARM::R5; 1115 case ARM::R6: 1116 return isReservedReg(MF, ARM::R7) ? 0 : ARM::R7; 1117 case ARM::R8: 1118 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9; 1119 case ARM::R10: 1120 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11; 1121 1122 case ARM::S0: 1123 return ARM::S1; 1124 case ARM::S2: 1125 return ARM::S3; 1126 case ARM::S4: 1127 return ARM::S5; 1128 case ARM::S6: 1129 return ARM::S7; 1130 case ARM::S8: 1131 return ARM::S9; 1132 case ARM::S10: 1133 return ARM::S11; 1134 case ARM::S12: 1135 return ARM::S13; 1136 case ARM::S14: 1137 return ARM::S15; 1138 case ARM::S16: 1139 return ARM::S17; 1140 case ARM::S18: 1141 return ARM::S19; 1142 case ARM::S20: 1143 return ARM::S21; 1144 case ARM::S22: 1145 return ARM::S23; 1146 case ARM::S24: 1147 return ARM::S25; 1148 case ARM::S26: 1149 return ARM::S27; 1150 case ARM::S28: 1151 return ARM::S29; 1152 case ARM::S30: 1153 return ARM::S31; 1154 1155 case ARM::D0: 1156 return ARM::D1; 1157 case ARM::D2: 1158 return ARM::D3; 1159 case ARM::D4: 1160 return ARM::D5; 1161 case ARM::D6: 1162 return ARM::D7; 1163 case ARM::D8: 1164 return ARM::D9; 1165 case ARM::D10: 1166 return ARM::D11; 1167 case ARM::D12: 1168 return ARM::D13; 1169 case ARM::D14: 1170 return ARM::D15; 1171 case ARM::D16: 1172 return ARM::D17; 1173 case ARM::D18: 1174 return ARM::D19; 1175 case ARM::D20: 1176 return ARM::D21; 1177 case ARM::D22: 1178 return ARM::D23; 1179 case ARM::D24: 1180 return ARM::D25; 1181 case ARM::D26: 1182 return ARM::D27; 1183 case ARM::D28: 1184 return ARM::D29; 1185 case ARM::D30: 1186 return ARM::D31; 1187 } 1188 1189 return 0; 1190} 1191 1192/// emitLoadConstPool - Emits a load from constpool to materialize the 1193/// specified immediate. 1194void ARMBaseRegisterInfo:: 1195emitLoadConstPool(MachineBasicBlock &MBB, 1196 MachineBasicBlock::iterator &MBBI, 1197 DebugLoc dl, 1198 unsigned DestReg, unsigned SubIdx, int Val, 1199 ARMCC::CondCodes Pred, 1200 unsigned PredReg) const { 1201 MachineFunction &MF = *MBB.getParent(); 1202 MachineConstantPool *ConstantPool = MF.getConstantPool(); 1203 const Constant *C = 1204 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val); 1205 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); 1206 1207 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 1208 .addReg(DestReg, getDefRegState(true), SubIdx) 1209 .addConstantPoolIndex(Idx) 1210 .addReg(0).addImm(0).addImm(Pred).addReg(PredReg); 1211} 1212 1213bool ARMBaseRegisterInfo:: 1214requiresRegisterScavenging(const MachineFunction &MF) const { 1215 return true; 1216} 1217 1218bool ARMBaseRegisterInfo:: 1219requiresFrameIndexScavenging(const MachineFunction &MF) const { 1220 return true; 1221} 1222 1223// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 1224// not required, we reserve argument space for call sites in the function 1225// immediately on entry to the current function. This eliminates the need for 1226// add/sub sp brackets around call sites. Returns true if the call frame is 1227// included as part of the stack frame. 1228bool ARMBaseRegisterInfo:: 1229hasReservedCallFrame(MachineFunction &MF) const { 1230 const MachineFrameInfo *FFI = MF.getFrameInfo(); 1231 unsigned CFSize = FFI->getMaxCallFrameSize(); 1232 // It's not always a good idea to include the call frame as part of the 1233 // stack frame. ARM (especially Thumb) has small immediate offset to 1234 // address the stack frame. So a large call frame can cause poor codegen 1235 // and may even makes it impossible to scavenge a register. 1236 if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12 1237 return false; 1238 1239 return !MF.getFrameInfo()->hasVarSizedObjects(); 1240} 1241 1242// canSimplifyCallFramePseudos - If there is a reserved call frame, the 1243// call frame pseudos can be simplified. Unlike most targets, having a FP 1244// is not sufficient here since we still may reference some objects via SP 1245// even when FP is available in Thumb2 mode. 1246bool ARMBaseRegisterInfo:: 1247canSimplifyCallFramePseudos(MachineFunction &MF) const { 1248 return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects(); 1249} 1250 1251static void 1252emitSPUpdate(bool isARM, 1253 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 1254 DebugLoc dl, const ARMBaseInstrInfo &TII, 1255 int NumBytes, 1256 ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) { 1257 if (isARM) 1258 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, 1259 Pred, PredReg, TII); 1260 else 1261 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, 1262 Pred, PredReg, TII); 1263} 1264 1265 1266void ARMBaseRegisterInfo:: 1267eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 1268 MachineBasicBlock::iterator I) const { 1269 if (!hasReservedCallFrame(MF)) { 1270 // If we have alloca, convert as follows: 1271 // ADJCALLSTACKDOWN -> sub, sp, sp, amount 1272 // ADJCALLSTACKUP -> add, sp, sp, amount 1273 MachineInstr *Old = I; 1274 DebugLoc dl = Old->getDebugLoc(); 1275 unsigned Amount = Old->getOperand(0).getImm(); 1276 if (Amount != 0) { 1277 // We need to keep the stack aligned properly. To do this, we round the 1278 // amount of space needed for the outgoing arguments up to the next 1279 // alignment boundary. 1280 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 1281 Amount = (Amount+Align-1)/Align*Align; 1282 1283 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1284 assert(!AFI->isThumb1OnlyFunction() && 1285 "This eliminateCallFramePseudoInstr does not support Thumb1!"); 1286 bool isARM = !AFI->isThumbFunction(); 1287 1288 // Replace the pseudo instruction with a new instruction... 1289 unsigned Opc = Old->getOpcode(); 1290 int PIdx = Old->findFirstPredOperandIdx(); 1291 ARMCC::CondCodes Pred = (PIdx == -1) 1292 ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm(); 1293 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) { 1294 // Note: PredReg is operand 2 for ADJCALLSTACKDOWN. 1295 unsigned PredReg = Old->getOperand(2).getReg(); 1296 emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg); 1297 } else { 1298 // Note: PredReg is operand 3 for ADJCALLSTACKUP. 1299 unsigned PredReg = Old->getOperand(3).getReg(); 1300 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP); 1301 emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg); 1302 } 1303 } 1304 } 1305 MBB.erase(I); 1306} 1307 1308unsigned 1309ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 1310 int SPAdj, FrameIndexValue *Value, 1311 RegScavenger *RS) const { 1312 unsigned i = 0; 1313 MachineInstr &MI = *II; 1314 MachineBasicBlock &MBB = *MI.getParent(); 1315 MachineFunction &MF = *MBB.getParent(); 1316 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1317 assert(!AFI->isThumb1OnlyFunction() && 1318 "This eliminateFrameIndex does not support Thumb1!"); 1319 1320 while (!MI.getOperand(i).isFI()) { 1321 ++i; 1322 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 1323 } 1324 1325 int FrameIndex = MI.getOperand(i).getIndex(); 1326 unsigned FrameReg; 1327 1328 int Offset = getFrameIndexReference(MF, FrameIndex, FrameReg); 1329 if (FrameReg != ARM::SP) 1330 SPAdj = 0; 1331 Offset += SPAdj; 1332 1333 // Special handling of dbg_value instructions. 1334 if (MI.isDebugValue()) { 1335 MI.getOperand(i). ChangeToRegister(FrameReg, false /*isDef*/); 1336 MI.getOperand(i+1).ChangeToImmediate(Offset); 1337 return 0; 1338 } 1339 1340 // Modify MI as necessary to handle as much of 'Offset' as possible 1341 bool Done = false; 1342 if (!AFI->isThumbFunction()) 1343 Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII); 1344 else { 1345 assert(AFI->isThumb2Function()); 1346 Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII); 1347 } 1348 if (Done) 1349 return 0; 1350 1351 // If we get here, the immediate doesn't fit into the instruction. We folded 1352 // as much as possible above, handle the rest, providing a register that is 1353 // SP+LargeImm. 1354 assert((Offset || 1355 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 1356 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) && 1357 "This code isn't needed if offset already handled!"); 1358 1359 unsigned ScratchReg = 0; 1360 int PIdx = MI.findFirstPredOperandIdx(); 1361 ARMCC::CondCodes Pred = (PIdx == -1) 1362 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 1363 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg(); 1364 if (Offset == 0) 1365 // Must be addrmode4/6. 1366 MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false); 1367 else { 1368 ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass); 1369 if (Value) { 1370 Value->first = FrameReg; // use the frame register as a kind indicator 1371 Value->second = Offset; 1372 } 1373 if (!AFI->isThumbFunction()) 1374 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 1375 Offset, Pred, PredReg, TII); 1376 else { 1377 assert(AFI->isThumb2Function()); 1378 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 1379 Offset, Pred, PredReg, TII); 1380 } 1381 MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true); 1382 if (!ReuseFrameIndexVals) 1383 ScratchReg = 0; 1384 } 1385 return ScratchReg; 1386} 1387 1388/// Move iterator past the next bunch of callee save load / store ops for 1389/// the particular spill area (1: integer area 1, 2: integer area 2, 1390/// 3: fp area, 0: don't care). 1391static void movePastCSLoadStoreOps(MachineBasicBlock &MBB, 1392 MachineBasicBlock::iterator &MBBI, 1393 int Opc1, int Opc2, unsigned Area, 1394 const ARMSubtarget &STI) { 1395 while (MBBI != MBB.end() && 1396 ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) && 1397 MBBI->getOperand(1).isFI()) { 1398 if (Area != 0) { 1399 bool Done = false; 1400 unsigned Category = 0; 1401 switch (MBBI->getOperand(0).getReg()) { 1402 case ARM::R4: case ARM::R5: case ARM::R6: case ARM::R7: 1403 case ARM::LR: 1404 Category = 1; 1405 break; 1406 case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11: 1407 Category = STI.isTargetDarwin() ? 2 : 1; 1408 break; 1409 case ARM::D8: case ARM::D9: case ARM::D10: case ARM::D11: 1410 case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15: 1411 Category = 3; 1412 break; 1413 default: 1414 Done = true; 1415 break; 1416 } 1417 if (Done || Category != Area) 1418 break; 1419 } 1420 1421 ++MBBI; 1422 } 1423} 1424 1425void ARMBaseRegisterInfo:: 1426emitPrologue(MachineFunction &MF) const { 1427 MachineBasicBlock &MBB = MF.front(); 1428 MachineBasicBlock::iterator MBBI = MBB.begin(); 1429 MachineFrameInfo *MFI = MF.getFrameInfo(); 1430 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1431 assert(!AFI->isThumb1OnlyFunction() && 1432 "This emitPrologue does not support Thumb1!"); 1433 bool isARM = !AFI->isThumbFunction(); 1434 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); 1435 unsigned NumBytes = MFI->getStackSize(); 1436 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 1437 DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 1438 1439 // Determine the sizes of each callee-save spill areas and record which frame 1440 // belongs to which callee-save spill areas. 1441 unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0; 1442 int FramePtrSpillFI = 0; 1443 1444 // Allocate the vararg register save area. This is not counted in NumBytes. 1445 if (VARegSaveSize) 1446 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize); 1447 1448 if (!AFI->hasStackFrame()) { 1449 if (NumBytes != 0) 1450 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes); 1451 return; 1452 } 1453 1454 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1455 unsigned Reg = CSI[i].getReg(); 1456 int FI = CSI[i].getFrameIdx(); 1457 switch (Reg) { 1458 case ARM::R4: 1459 case ARM::R5: 1460 case ARM::R6: 1461 case ARM::R7: 1462 case ARM::LR: 1463 if (Reg == FramePtr) 1464 FramePtrSpillFI = FI; 1465 AFI->addGPRCalleeSavedArea1Frame(FI); 1466 GPRCS1Size += 4; 1467 break; 1468 case ARM::R8: 1469 case ARM::R9: 1470 case ARM::R10: 1471 case ARM::R11: 1472 if (Reg == FramePtr) 1473 FramePtrSpillFI = FI; 1474 if (STI.isTargetDarwin()) { 1475 AFI->addGPRCalleeSavedArea2Frame(FI); 1476 GPRCS2Size += 4; 1477 } else { 1478 AFI->addGPRCalleeSavedArea1Frame(FI); 1479 GPRCS1Size += 4; 1480 } 1481 break; 1482 default: 1483 AFI->addDPRCalleeSavedAreaFrame(FI); 1484 DPRCSSize += 8; 1485 } 1486 } 1487 1488 // Build the new SUBri to adjust SP for integer callee-save spill area 1. 1489 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size); 1490 movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI); 1491 1492 // Set FP to point to the stack slot that contains the previous FP. 1493 // For Darwin, FP is R7, which has now been stored in spill area 1. 1494 // Otherwise, if this is not Darwin, all the callee-saved registers go 1495 // into spill area 1, including the FP in R11. In either case, it is 1496 // now safe to emit this assignment. 1497 if (STI.isTargetDarwin() || hasFP(MF)) { 1498 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri; 1499 MachineInstrBuilder MIB = 1500 BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr) 1501 .addFrameIndex(FramePtrSpillFI).addImm(0); 1502 AddDefaultCC(AddDefaultPred(MIB)); 1503 } 1504 1505 // Build the new SUBri to adjust SP for integer callee-save spill area 2. 1506 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size); 1507 1508 // Build the new SUBri to adjust SP for FP callee-save spill area. 1509 movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI); 1510 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize); 1511 1512 // Determine starting offsets of spill areas. 1513 unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize); 1514 unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize; 1515 unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size; 1516 if (STI.isTargetDarwin() || hasFP(MF)) 1517 AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + 1518 NumBytes); 1519 AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset); 1520 AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset); 1521 AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset); 1522 1523 movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI); 1524 NumBytes = DPRCSOffset; 1525 if (NumBytes) { 1526 // Adjust SP after all the callee-save spills. 1527 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes); 1528 } 1529 1530 if (STI.isTargetELF() && hasFP(MF)) { 1531 MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() - 1532 AFI->getFramePtrSpillOffset()); 1533 } 1534 1535 AFI->setGPRCalleeSavedArea1Size(GPRCS1Size); 1536 AFI->setGPRCalleeSavedArea2Size(GPRCS2Size); 1537 AFI->setDPRCalleeSavedAreaSize(DPRCSSize); 1538 1539 // If we need dynamic stack realignment, do it here. 1540 if (needsStackRealignment(MF)) { 1541 unsigned MaxAlign = MFI->getMaxAlignment(); 1542 assert (!AFI->isThumb1OnlyFunction()); 1543 if (!AFI->isThumbFunction()) { 1544 // Emit bic sp, sp, MaxAlign 1545 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, 1546 TII.get(ARM::BICri), ARM::SP) 1547 .addReg(ARM::SP, RegState::Kill) 1548 .addImm(MaxAlign-1))); 1549 } else { 1550 // We cannot use sp as source/dest register here, thus we're emitting the 1551 // following sequence: 1552 // mov r4, sp 1553 // bic r4, r4, MaxAlign 1554 // mov sp, r4 1555 // FIXME: It will be better just to find spare register here. 1556 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4) 1557 .addReg(ARM::SP, RegState::Kill); 1558 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, 1559 TII.get(ARM::t2BICri), ARM::R4) 1560 .addReg(ARM::R4, RegState::Kill) 1561 .addImm(MaxAlign-1))); 1562 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP) 1563 .addReg(ARM::R4, RegState::Kill); 1564 } 1565 } 1566} 1567 1568static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) { 1569 for (unsigned i = 0; CSRegs[i]; ++i) 1570 if (Reg == CSRegs[i]) 1571 return true; 1572 return false; 1573} 1574 1575static bool isCSRestore(MachineInstr *MI, 1576 const ARMBaseInstrInfo &TII, 1577 const unsigned *CSRegs) { 1578 return ((MI->getOpcode() == (int)ARM::VLDRD || 1579 MI->getOpcode() == (int)ARM::LDR || 1580 MI->getOpcode() == (int)ARM::t2LDRi12) && 1581 MI->getOperand(1).isFI() && 1582 isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs)); 1583} 1584 1585void ARMBaseRegisterInfo:: 1586emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { 1587 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1588 assert(MBBI->getDesc().isReturn() && 1589 "Can only insert epilog into returning blocks"); 1590 unsigned RetOpcode = MBBI->getOpcode(); 1591 DebugLoc dl = MBBI->getDebugLoc(); 1592 MachineFrameInfo *MFI = MF.getFrameInfo(); 1593 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1594 assert(!AFI->isThumb1OnlyFunction() && 1595 "This emitEpilogue does not support Thumb1!"); 1596 bool isARM = !AFI->isThumbFunction(); 1597 1598 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); 1599 int NumBytes = (int)MFI->getStackSize(); 1600 1601 if (!AFI->hasStackFrame()) { 1602 if (NumBytes != 0) 1603 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes); 1604 } else { 1605 // Unwind MBBI to point to first LDR / VLDRD. 1606 const unsigned *CSRegs = getCalleeSavedRegs(); 1607 if (MBBI != MBB.begin()) { 1608 do 1609 --MBBI; 1610 while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs)); 1611 if (!isCSRestore(MBBI, TII, CSRegs)) 1612 ++MBBI; 1613 } 1614 1615 // Move SP to start of FP callee save spill area. 1616 NumBytes -= (AFI->getGPRCalleeSavedArea1Size() + 1617 AFI->getGPRCalleeSavedArea2Size() + 1618 AFI->getDPRCalleeSavedAreaSize()); 1619 1620 // Darwin ABI requires FP to point to the stack slot that contains the 1621 // previous FP. 1622 bool HasFP = hasFP(MF); 1623 if ((STI.isTargetDarwin() && NumBytes) || HasFP) { 1624 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes; 1625 // Reset SP based on frame pointer only if the stack frame extends beyond 1626 // frame pointer stack slot or target is ELF and the function has FP. 1627 if (HasFP || 1628 AFI->getGPRCalleeSavedArea2Size() || 1629 AFI->getDPRCalleeSavedAreaSize() || 1630 AFI->getDPRCalleeSavedAreaOffset()) { 1631 if (NumBytes) { 1632 if (isARM) 1633 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes, 1634 ARMCC::AL, 0, TII); 1635 else 1636 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes, 1637 ARMCC::AL, 0, TII); 1638 } else { 1639 // Thumb2 or ARM. 1640 if (isARM) 1641 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP) 1642 .addReg(FramePtr) 1643 .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 1644 else 1645 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP) 1646 .addReg(FramePtr); 1647 } 1648 } 1649 } else if (NumBytes) 1650 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes); 1651 1652 // Move SP to start of integer callee save spill area 2. 1653 movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI); 1654 emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize()); 1655 1656 // Move SP to start of integer callee save spill area 1. 1657 movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI); 1658 emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size()); 1659 1660 // Move SP to SP upon entry to the function. 1661 movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI); 1662 emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size()); 1663 } 1664 1665 if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND || 1666 RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) { 1667 // Tail call return: adjust the stack pointer and jump to callee. 1668 MBBI = prior(MBB.end()); 1669 MachineOperand &JumpTarget = MBBI->getOperand(0); 1670 1671 // Jump to label or value in register. 1672 if (RetOpcode == ARM::TCRETURNdi) { 1673 BuildMI(MBB, MBBI, dl, 1674 TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)). 1675 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1676 JumpTarget.getTargetFlags()); 1677 } else if (RetOpcode == ARM::TCRETURNdiND) { 1678 BuildMI(MBB, MBBI, dl, 1679 TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)). 1680 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1681 JumpTarget.getTargetFlags()); 1682 } else if (RetOpcode == ARM::TCRETURNri) { 1683 BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)). 1684 addReg(JumpTarget.getReg(), RegState::Kill); 1685 } else if (RetOpcode == ARM::TCRETURNriND) { 1686 BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)). 1687 addReg(JumpTarget.getReg(), RegState::Kill); 1688 } 1689 1690 MachineInstr *NewMI = prior(MBBI); 1691 for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i) 1692 NewMI->addOperand(MBBI->getOperand(i)); 1693 1694 // Delete the pseudo instruction TCRETURN. 1695 MBB.erase(MBBI); 1696 } 1697 1698 if (VARegSaveSize) 1699 emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize); 1700} 1701 1702#include "ARMGenRegisterInfo.inc" 1703