ARMFrameLowering.cpp revision dc3beb90178fc316f63790812b22201884eaa017
1//===-- ARMFrameLowering.cpp - ARM Frame Information ----------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the ARM implementation of TargetFrameLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARMFrameLowering.h" 15#include "ARMBaseInstrInfo.h" 16#include "ARMBaseRegisterInfo.h" 17#include "ARMMachineFunctionInfo.h" 18#include "MCTargetDesc/ARMAddressingModes.h" 19#include "llvm/CodeGen/MachineFrameInfo.h" 20#include "llvm/CodeGen/MachineFunction.h" 21#include "llvm/CodeGen/MachineInstrBuilder.h" 22#include "llvm/CodeGen/MachineRegisterInfo.h" 23#include "llvm/CodeGen/RegisterScavenging.h" 24#include "llvm/IR/CallingConv.h" 25#include "llvm/IR/Function.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Target/TargetOptions.h" 28 29using namespace llvm; 30 31static cl::opt<bool> 32SpillAlignedNEONRegs("align-neon-spills", cl::Hidden, cl::init(true), 33 cl::desc("Align ARM NEON spills in prolog and epilog")); 34 35static MachineBasicBlock::iterator 36skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI, 37 unsigned NumAlignedDPRCS2Regs); 38 39/// hasFP - Return true if the specified function should have a dedicated frame 40/// pointer register. This is true if the function has variable sized allocas 41/// or if frame pointer elimination is disabled. 42bool ARMFrameLowering::hasFP(const MachineFunction &MF) const { 43 const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo(); 44 45 // iOS requires FP not to be clobbered for backtracing purpose. 46 if (STI.isTargetIOS()) 47 return true; 48 49 const MachineFrameInfo *MFI = MF.getFrameInfo(); 50 // Always eliminate non-leaf frame pointers. 51 return ((MF.getTarget().Options.DisableFramePointerElim(MF) && 52 MFI->hasCalls()) || 53 RegInfo->needsStackRealignment(MF) || 54 MFI->hasVarSizedObjects() || 55 MFI->isFrameAddressTaken()); 56} 57 58/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 59/// not required, we reserve argument space for call sites in the function 60/// immediately on entry to the current function. This eliminates the need for 61/// add/sub sp brackets around call sites. Returns true if the call frame is 62/// included as part of the stack frame. 63bool ARMFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 64 const MachineFrameInfo *FFI = MF.getFrameInfo(); 65 unsigned CFSize = FFI->getMaxCallFrameSize(); 66 // It's not always a good idea to include the call frame as part of the 67 // stack frame. ARM (especially Thumb) has small immediate offset to 68 // address the stack frame. So a large call frame can cause poor codegen 69 // and may even makes it impossible to scavenge a register. 70 if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12 71 return false; 72 73 return !MF.getFrameInfo()->hasVarSizedObjects(); 74} 75 76/// canSimplifyCallFramePseudos - If there is a reserved call frame, the 77/// call frame pseudos can be simplified. Unlike most targets, having a FP 78/// is not sufficient here since we still may reference some objects via SP 79/// even when FP is available in Thumb2 mode. 80bool 81ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const { 82 return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects(); 83} 84 85static bool isCalleeSavedRegister(unsigned Reg, const uint16_t *CSRegs) { 86 for (unsigned i = 0; CSRegs[i]; ++i) 87 if (Reg == CSRegs[i]) 88 return true; 89 return false; 90} 91 92static bool isCSRestore(MachineInstr *MI, 93 const ARMBaseInstrInfo &TII, 94 const uint16_t *CSRegs) { 95 // Integer spill area is handled with "pop". 96 if (MI->getOpcode() == ARM::LDMIA_RET || 97 MI->getOpcode() == ARM::t2LDMIA_RET || 98 MI->getOpcode() == ARM::LDMIA_UPD || 99 MI->getOpcode() == ARM::t2LDMIA_UPD || 100 MI->getOpcode() == ARM::VLDMDIA_UPD) { 101 // The first two operands are predicates. The last two are 102 // imp-def and imp-use of SP. Check everything in between. 103 for (int i = 5, e = MI->getNumOperands(); i != e; ++i) 104 if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs)) 105 return false; 106 return true; 107 } 108 if ((MI->getOpcode() == ARM::LDR_POST_IMM || 109 MI->getOpcode() == ARM::LDR_POST_REG || 110 MI->getOpcode() == ARM::t2LDR_POST) && 111 isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) && 112 MI->getOperand(1).getReg() == ARM::SP) 113 return true; 114 115 return false; 116} 117 118static void 119emitSPUpdate(bool isARM, 120 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 121 DebugLoc dl, const ARMBaseInstrInfo &TII, 122 int NumBytes, unsigned MIFlags = MachineInstr::NoFlags, 123 ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) { 124 if (isARM) 125 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, 126 Pred, PredReg, TII, MIFlags); 127 else 128 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, 129 Pred, PredReg, TII, MIFlags); 130} 131 132void ARMFrameLowering::emitPrologue(MachineFunction &MF) const { 133 MachineBasicBlock &MBB = MF.front(); 134 MachineBasicBlock::iterator MBBI = MBB.begin(); 135 MachineFrameInfo *MFI = MF.getFrameInfo(); 136 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 137 const ARMBaseRegisterInfo *RegInfo = 138 static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo()); 139 const ARMBaseInstrInfo &TII = 140 *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo()); 141 assert(!AFI->isThumb1OnlyFunction() && 142 "This emitPrologue does not support Thumb1!"); 143 bool isARM = !AFI->isThumbFunction(); 144 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); 145 unsigned NumBytes = MFI->getStackSize(); 146 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 147 DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 148 unsigned FramePtr = RegInfo->getFrameRegister(MF); 149 150 // Determine the sizes of each callee-save spill areas and record which frame 151 // belongs to which callee-save spill areas. 152 unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0; 153 int FramePtrSpillFI = 0; 154 int D8SpillFI = 0; 155 156 // All calls are tail calls in GHC calling conv, and functions have no 157 // prologue/epilogue. 158 if (MF.getFunction()->getCallingConv() == CallingConv::GHC) 159 return; 160 161 // Allocate the vararg register save area. This is not counted in NumBytes. 162 if (VARegSaveSize) 163 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize, 164 MachineInstr::FrameSetup); 165 166 if (!AFI->hasStackFrame()) { 167 if (NumBytes != 0) 168 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes, 169 MachineInstr::FrameSetup); 170 return; 171 } 172 173 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 174 unsigned Reg = CSI[i].getReg(); 175 int FI = CSI[i].getFrameIdx(); 176 switch (Reg) { 177 case ARM::R4: 178 case ARM::R5: 179 case ARM::R6: 180 case ARM::R7: 181 case ARM::LR: 182 if (Reg == FramePtr) 183 FramePtrSpillFI = FI; 184 AFI->addGPRCalleeSavedArea1Frame(FI); 185 GPRCS1Size += 4; 186 break; 187 case ARM::R8: 188 case ARM::R9: 189 case ARM::R10: 190 case ARM::R11: 191 if (Reg == FramePtr) 192 FramePtrSpillFI = FI; 193 if (STI.isTargetIOS()) { 194 AFI->addGPRCalleeSavedArea2Frame(FI); 195 GPRCS2Size += 4; 196 } else { 197 AFI->addGPRCalleeSavedArea1Frame(FI); 198 GPRCS1Size += 4; 199 } 200 break; 201 default: 202 // This is a DPR. Exclude the aligned DPRCS2 spills. 203 if (Reg == ARM::D8) 204 D8SpillFI = FI; 205 if (Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs()) { 206 AFI->addDPRCalleeSavedAreaFrame(FI); 207 DPRCSSize += 8; 208 } 209 } 210 } 211 212 // Move past area 1. 213 if (GPRCS1Size > 0) MBBI++; 214 215 // Set FP to point to the stack slot that contains the previous FP. 216 // For iOS, FP is R7, which has now been stored in spill area 1. 217 // Otherwise, if this is not iOS, all the callee-saved registers go 218 // into spill area 1, including the FP in R11. In either case, it is 219 // now safe to emit this assignment. 220 bool HasFP = hasFP(MF); 221 if (HasFP) { 222 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri; 223 MachineInstrBuilder MIB = 224 BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr) 225 .addFrameIndex(FramePtrSpillFI).addImm(0) 226 .setMIFlag(MachineInstr::FrameSetup); 227 AddDefaultCC(AddDefaultPred(MIB)); 228 } 229 230 // Move past area 2. 231 if (GPRCS2Size > 0) MBBI++; 232 233 // Determine starting offsets of spill areas. 234 unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize); 235 unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize; 236 unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size; 237 if (HasFP) 238 AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + 239 NumBytes); 240 AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset); 241 AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset); 242 AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset); 243 244 // Move past area 3. 245 if (DPRCSSize > 0) { 246 MBBI++; 247 // Since vpush register list cannot have gaps, there may be multiple vpush 248 // instructions in the prologue. 249 while (MBBI->getOpcode() == ARM::VSTMDDB_UPD) 250 MBBI++; 251 } 252 253 // Move past the aligned DPRCS2 area. 254 if (AFI->getNumAlignedDPRCS2Regs() > 0) { 255 MBBI = skipAlignedDPRCS2Spills(MBBI, AFI->getNumAlignedDPRCS2Regs()); 256 // The code inserted by emitAlignedDPRCS2Spills realigns the stack, and 257 // leaves the stack pointer pointing to the DPRCS2 area. 258 // 259 // Adjust NumBytes to represent the stack slots below the DPRCS2 area. 260 NumBytes += MFI->getObjectOffset(D8SpillFI); 261 } else 262 NumBytes = DPRCSOffset; 263 264 if (NumBytes) { 265 // Adjust SP after all the callee-save spills. 266 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes, 267 MachineInstr::FrameSetup); 268 if (HasFP && isARM) 269 // Restore from fp only in ARM mode: e.g. sub sp, r7, #24 270 // Note it's not safe to do this in Thumb2 mode because it would have 271 // taken two instructions: 272 // mov sp, r7 273 // sub sp, #24 274 // If an interrupt is taken between the two instructions, then sp is in 275 // an inconsistent state (pointing to the middle of callee-saved area). 276 // The interrupt handler can end up clobbering the registers. 277 AFI->setShouldRestoreSPFromFP(true); 278 } 279 280 if (STI.isTargetELF() && hasFP(MF)) 281 MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() - 282 AFI->getFramePtrSpillOffset()); 283 284 AFI->setGPRCalleeSavedArea1Size(GPRCS1Size); 285 AFI->setGPRCalleeSavedArea2Size(GPRCS2Size); 286 AFI->setDPRCalleeSavedAreaSize(DPRCSSize); 287 288 // If we need dynamic stack realignment, do it here. Be paranoid and make 289 // sure if we also have VLAs, we have a base pointer for frame access. 290 // If aligned NEON registers were spilled, the stack has already been 291 // realigned. 292 if (!AFI->getNumAlignedDPRCS2Regs() && RegInfo->needsStackRealignment(MF)) { 293 unsigned MaxAlign = MFI->getMaxAlignment(); 294 assert (!AFI->isThumb1OnlyFunction()); 295 if (!AFI->isThumbFunction()) { 296 // Emit bic sp, sp, MaxAlign 297 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, 298 TII.get(ARM::BICri), ARM::SP) 299 .addReg(ARM::SP, RegState::Kill) 300 .addImm(MaxAlign-1))); 301 } else { 302 // We cannot use sp as source/dest register here, thus we're emitting the 303 // following sequence: 304 // mov r4, sp 305 // bic r4, r4, MaxAlign 306 // mov sp, r4 307 // FIXME: It will be better just to find spare register here. 308 AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4) 309 .addReg(ARM::SP, RegState::Kill)); 310 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, 311 TII.get(ARM::t2BICri), ARM::R4) 312 .addReg(ARM::R4, RegState::Kill) 313 .addImm(MaxAlign-1))); 314 AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) 315 .addReg(ARM::R4, RegState::Kill)); 316 } 317 318 AFI->setShouldRestoreSPFromFP(true); 319 } 320 321 // If we need a base pointer, set it up here. It's whatever the value 322 // of the stack pointer is at this point. Any variable size objects 323 // will be allocated after this, so we can still use the base pointer 324 // to reference locals. 325 // FIXME: Clarify FrameSetup flags here. 326 if (RegInfo->hasBasePointer(MF)) { 327 if (isARM) 328 BuildMI(MBB, MBBI, dl, 329 TII.get(ARM::MOVr), RegInfo->getBaseRegister()) 330 .addReg(ARM::SP) 331 .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 332 else 333 AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), 334 RegInfo->getBaseRegister()) 335 .addReg(ARM::SP)); 336 } 337 338 // If the frame has variable sized objects then the epilogue must restore 339 // the sp from fp. We can assume there's an FP here since hasFP already 340 // checks for hasVarSizedObjects. 341 if (MFI->hasVarSizedObjects()) 342 AFI->setShouldRestoreSPFromFP(true); 343} 344 345void ARMFrameLowering::emitEpilogue(MachineFunction &MF, 346 MachineBasicBlock &MBB) const { 347 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 348 assert(MBBI->isReturn() && "Can only insert epilog into returning blocks"); 349 unsigned RetOpcode = MBBI->getOpcode(); 350 DebugLoc dl = MBBI->getDebugLoc(); 351 MachineFrameInfo *MFI = MF.getFrameInfo(); 352 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 353 const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo(); 354 const ARMBaseInstrInfo &TII = 355 *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo()); 356 assert(!AFI->isThumb1OnlyFunction() && 357 "This emitEpilogue does not support Thumb1!"); 358 bool isARM = !AFI->isThumbFunction(); 359 360 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); 361 int NumBytes = (int)MFI->getStackSize(); 362 unsigned FramePtr = RegInfo->getFrameRegister(MF); 363 364 // All calls are tail calls in GHC calling conv, and functions have no 365 // prologue/epilogue. 366 if (MF.getFunction()->getCallingConv() == CallingConv::GHC) 367 return; 368 369 if (!AFI->hasStackFrame()) { 370 if (NumBytes != 0) 371 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes); 372 } else { 373 // Unwind MBBI to point to first LDR / VLDRD. 374 const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(); 375 if (MBBI != MBB.begin()) { 376 do 377 --MBBI; 378 while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs)); 379 if (!isCSRestore(MBBI, TII, CSRegs)) 380 ++MBBI; 381 } 382 383 // Move SP to start of FP callee save spill area. 384 NumBytes -= (AFI->getGPRCalleeSavedArea1Size() + 385 AFI->getGPRCalleeSavedArea2Size() + 386 AFI->getDPRCalleeSavedAreaSize()); 387 388 // Reset SP based on frame pointer only if the stack frame extends beyond 389 // frame pointer stack slot or target is ELF and the function has FP. 390 if (AFI->shouldRestoreSPFromFP()) { 391 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes; 392 if (NumBytes) { 393 if (isARM) 394 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes, 395 ARMCC::AL, 0, TII); 396 else { 397 // It's not possible to restore SP from FP in a single instruction. 398 // For iOS, this looks like: 399 // mov sp, r7 400 // sub sp, #24 401 // This is bad, if an interrupt is taken after the mov, sp is in an 402 // inconsistent state. 403 // Use the first callee-saved register as a scratch register. 404 assert(MF.getRegInfo().isPhysRegUsed(ARM::R4) && 405 "No scratch register to restore SP from FP!"); 406 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes, 407 ARMCC::AL, 0, TII); 408 AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), 409 ARM::SP) 410 .addReg(ARM::R4)); 411 } 412 } else { 413 // Thumb2 or ARM. 414 if (isARM) 415 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP) 416 .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 417 else 418 AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), 419 ARM::SP) 420 .addReg(FramePtr)); 421 } 422 } else if (NumBytes) 423 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes); 424 425 // Increment past our save areas. 426 if (AFI->getDPRCalleeSavedAreaSize()) { 427 MBBI++; 428 // Since vpop register list cannot have gaps, there may be multiple vpop 429 // instructions in the epilogue. 430 while (MBBI->getOpcode() == ARM::VLDMDIA_UPD) 431 MBBI++; 432 } 433 if (AFI->getGPRCalleeSavedArea2Size()) MBBI++; 434 if (AFI->getGPRCalleeSavedArea1Size()) MBBI++; 435 } 436 437 if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNri) { 438 // Tail call return: adjust the stack pointer and jump to callee. 439 MBBI = MBB.getLastNonDebugInstr(); 440 MachineOperand &JumpTarget = MBBI->getOperand(0); 441 442 // Jump to label or value in register. 443 if (RetOpcode == ARM::TCRETURNdi) { 444 unsigned TCOpcode = STI.isThumb() ? 445 (STI.isTargetIOS() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) : 446 ARM::TAILJMPd; 447 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode)); 448 if (JumpTarget.isGlobal()) 449 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 450 JumpTarget.getTargetFlags()); 451 else { 452 assert(JumpTarget.isSymbol()); 453 MIB.addExternalSymbol(JumpTarget.getSymbolName(), 454 JumpTarget.getTargetFlags()); 455 } 456 457 // Add the default predicate in Thumb mode. 458 if (STI.isThumb()) MIB.addImm(ARMCC::AL).addReg(0); 459 } else if (RetOpcode == ARM::TCRETURNri) { 460 BuildMI(MBB, MBBI, dl, 461 TII.get(STI.isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr)). 462 addReg(JumpTarget.getReg(), RegState::Kill); 463 } 464 465 MachineInstr *NewMI = prior(MBBI); 466 for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i) 467 NewMI->addOperand(MBBI->getOperand(i)); 468 469 // Delete the pseudo instruction TCRETURN. 470 MBB.erase(MBBI); 471 MBBI = NewMI; 472 } 473 474 if (VARegSaveSize) 475 emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize); 476} 477 478/// getFrameIndexReference - Provide a base+offset reference to an FI slot for 479/// debug info. It's the same as what we use for resolving the code-gen 480/// references for now. FIXME: This can go wrong when references are 481/// SP-relative and simple call frames aren't used. 482int 483ARMFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 484 unsigned &FrameReg) const { 485 return ResolveFrameIndexReference(MF, FI, FrameReg, 0); 486} 487 488int 489ARMFrameLowering::ResolveFrameIndexReference(const MachineFunction &MF, 490 int FI, unsigned &FrameReg, 491 int SPAdj) const { 492 const MachineFrameInfo *MFI = MF.getFrameInfo(); 493 const ARMBaseRegisterInfo *RegInfo = 494 static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo()); 495 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 496 int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize(); 497 int FPOffset = Offset - AFI->getFramePtrSpillOffset(); 498 bool isFixed = MFI->isFixedObjectIndex(FI); 499 500 FrameReg = ARM::SP; 501 Offset += SPAdj; 502 if (AFI->isGPRCalleeSavedArea1Frame(FI)) 503 return Offset - AFI->getGPRCalleeSavedArea1Offset(); 504 else if (AFI->isGPRCalleeSavedArea2Frame(FI)) 505 return Offset - AFI->getGPRCalleeSavedArea2Offset(); 506 else if (AFI->isDPRCalleeSavedAreaFrame(FI)) 507 return Offset - AFI->getDPRCalleeSavedAreaOffset(); 508 509 // SP can move around if there are allocas. We may also lose track of SP 510 // when emergency spilling inside a non-reserved call frame setup. 511 bool hasMovingSP = !hasReservedCallFrame(MF); 512 513 // When dynamically realigning the stack, use the frame pointer for 514 // parameters, and the stack/base pointer for locals. 515 if (RegInfo->needsStackRealignment(MF)) { 516 assert (hasFP(MF) && "dynamic stack realignment without a FP!"); 517 if (isFixed) { 518 FrameReg = RegInfo->getFrameRegister(MF); 519 Offset = FPOffset; 520 } else if (hasMovingSP) { 521 assert(RegInfo->hasBasePointer(MF) && 522 "VLAs and dynamic stack alignment, but missing base pointer!"); 523 FrameReg = RegInfo->getBaseRegister(); 524 } 525 return Offset; 526 } 527 528 // If there is a frame pointer, use it when we can. 529 if (hasFP(MF) && AFI->hasStackFrame()) { 530 // Use frame pointer to reference fixed objects. Use it for locals if 531 // there are VLAs (and thus the SP isn't reliable as a base). 532 if (isFixed || (hasMovingSP && !RegInfo->hasBasePointer(MF))) { 533 FrameReg = RegInfo->getFrameRegister(MF); 534 return FPOffset; 535 } else if (hasMovingSP) { 536 assert(RegInfo->hasBasePointer(MF) && "missing base pointer!"); 537 if (AFI->isThumb2Function()) { 538 // Try to use the frame pointer if we can, else use the base pointer 539 // since it's available. This is handy for the emergency spill slot, in 540 // particular. 541 if (FPOffset >= -255 && FPOffset < 0) { 542 FrameReg = RegInfo->getFrameRegister(MF); 543 return FPOffset; 544 } 545 } 546 } else if (AFI->isThumb2Function()) { 547 // Use add <rd>, sp, #<imm8> 548 // ldr <rd>, [sp, #<imm8>] 549 // if at all possible to save space. 550 if (Offset >= 0 && (Offset & 3) == 0 && Offset <= 1020) 551 return Offset; 552 // In Thumb2 mode, the negative offset is very limited. Try to avoid 553 // out of range references. ldr <rt>,[<rn>, #-<imm8>] 554 if (FPOffset >= -255 && FPOffset < 0) { 555 FrameReg = RegInfo->getFrameRegister(MF); 556 return FPOffset; 557 } 558 } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) { 559 // Otherwise, use SP or FP, whichever is closer to the stack slot. 560 FrameReg = RegInfo->getFrameRegister(MF); 561 return FPOffset; 562 } 563 } 564 // Use the base pointer if we have one. 565 if (RegInfo->hasBasePointer(MF)) 566 FrameReg = RegInfo->getBaseRegister(); 567 return Offset; 568} 569 570int ARMFrameLowering::getFrameIndexOffset(const MachineFunction &MF, 571 int FI) const { 572 unsigned FrameReg; 573 return getFrameIndexReference(MF, FI, FrameReg); 574} 575 576void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB, 577 MachineBasicBlock::iterator MI, 578 const std::vector<CalleeSavedInfo> &CSI, 579 unsigned StmOpc, unsigned StrOpc, 580 bool NoGap, 581 bool(*Func)(unsigned, bool), 582 unsigned NumAlignedDPRCS2Regs, 583 unsigned MIFlags) const { 584 MachineFunction &MF = *MBB.getParent(); 585 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 586 587 DebugLoc DL; 588 if (MI != MBB.end()) DL = MI->getDebugLoc(); 589 590 SmallVector<std::pair<unsigned,bool>, 4> Regs; 591 unsigned i = CSI.size(); 592 while (i != 0) { 593 unsigned LastReg = 0; 594 for (; i != 0; --i) { 595 unsigned Reg = CSI[i-1].getReg(); 596 if (!(Func)(Reg, STI.isTargetIOS())) continue; 597 598 // D-registers in the aligned area DPRCS2 are NOT spilled here. 599 if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs) 600 continue; 601 602 // Add the callee-saved register as live-in unless it's LR and 603 // @llvm.returnaddress is called. If LR is returned for 604 // @llvm.returnaddress then it's already added to the function and 605 // entry block live-in sets. 606 bool isKill = true; 607 if (Reg == ARM::LR) { 608 if (MF.getFrameInfo()->isReturnAddressTaken() && 609 MF.getRegInfo().isLiveIn(Reg)) 610 isKill = false; 611 } 612 613 if (isKill) 614 MBB.addLiveIn(Reg); 615 616 // If NoGap is true, push consecutive registers and then leave the rest 617 // for other instructions. e.g. 618 // vpush {d8, d10, d11} -> vpush {d8}, vpush {d10, d11} 619 if (NoGap && LastReg && LastReg != Reg-1) 620 break; 621 LastReg = Reg; 622 Regs.push_back(std::make_pair(Reg, isKill)); 623 } 624 625 if (Regs.empty()) 626 continue; 627 if (Regs.size() > 1 || StrOpc== 0) { 628 MachineInstrBuilder MIB = 629 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(StmOpc), ARM::SP) 630 .addReg(ARM::SP).setMIFlags(MIFlags)); 631 for (unsigned i = 0, e = Regs.size(); i < e; ++i) 632 MIB.addReg(Regs[i].first, getKillRegState(Regs[i].second)); 633 } else if (Regs.size() == 1) { 634 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc), 635 ARM::SP) 636 .addReg(Regs[0].first, getKillRegState(Regs[0].second)) 637 .addReg(ARM::SP).setMIFlags(MIFlags) 638 .addImm(-4); 639 AddDefaultPred(MIB); 640 } 641 Regs.clear(); 642 } 643} 644 645void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB, 646 MachineBasicBlock::iterator MI, 647 const std::vector<CalleeSavedInfo> &CSI, 648 unsigned LdmOpc, unsigned LdrOpc, 649 bool isVarArg, bool NoGap, 650 bool(*Func)(unsigned, bool), 651 unsigned NumAlignedDPRCS2Regs) const { 652 MachineFunction &MF = *MBB.getParent(); 653 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 654 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 655 DebugLoc DL = MI->getDebugLoc(); 656 unsigned RetOpcode = MI->getOpcode(); 657 bool isTailCall = (RetOpcode == ARM::TCRETURNdi || 658 RetOpcode == ARM::TCRETURNri); 659 660 SmallVector<unsigned, 4> Regs; 661 unsigned i = CSI.size(); 662 while (i != 0) { 663 unsigned LastReg = 0; 664 bool DeleteRet = false; 665 for (; i != 0; --i) { 666 unsigned Reg = CSI[i-1].getReg(); 667 if (!(Func)(Reg, STI.isTargetIOS())) continue; 668 669 // The aligned reloads from area DPRCS2 are not inserted here. 670 if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs) 671 continue; 672 673 if (Reg == ARM::LR && !isTailCall && !isVarArg && STI.hasV5TOps()) { 674 Reg = ARM::PC; 675 LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_RET : ARM::LDMIA_RET; 676 // Fold the return instruction into the LDM. 677 DeleteRet = true; 678 } 679 680 // If NoGap is true, pop consecutive registers and then leave the rest 681 // for other instructions. e.g. 682 // vpop {d8, d10, d11} -> vpop {d8}, vpop {d10, d11} 683 if (NoGap && LastReg && LastReg != Reg-1) 684 break; 685 686 LastReg = Reg; 687 Regs.push_back(Reg); 688 } 689 690 if (Regs.empty()) 691 continue; 692 if (Regs.size() > 1 || LdrOpc == 0) { 693 MachineInstrBuilder MIB = 694 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(LdmOpc), ARM::SP) 695 .addReg(ARM::SP)); 696 for (unsigned i = 0, e = Regs.size(); i < e; ++i) 697 MIB.addReg(Regs[i], getDefRegState(true)); 698 if (DeleteRet) { 699 MIB.copyImplicitOps(&*MI); 700 MI->eraseFromParent(); 701 } 702 MI = MIB; 703 } else if (Regs.size() == 1) { 704 // If we adjusted the reg to PC from LR above, switch it back here. We 705 // only do that for LDM. 706 if (Regs[0] == ARM::PC) 707 Regs[0] = ARM::LR; 708 MachineInstrBuilder MIB = 709 BuildMI(MBB, MI, DL, TII.get(LdrOpc), Regs[0]) 710 .addReg(ARM::SP, RegState::Define) 711 .addReg(ARM::SP); 712 // ARM mode needs an extra reg0 here due to addrmode2. Will go away once 713 // that refactoring is complete (eventually). 714 if (LdrOpc == ARM::LDR_POST_REG || LdrOpc == ARM::LDR_POST_IMM) { 715 MIB.addReg(0); 716 MIB.addImm(ARM_AM::getAM2Opc(ARM_AM::add, 4, ARM_AM::no_shift)); 717 } else 718 MIB.addImm(4); 719 AddDefaultPred(MIB); 720 } 721 Regs.clear(); 722 } 723} 724 725/// Emit aligned spill instructions for NumAlignedDPRCS2Regs D-registers 726/// starting from d8. Also insert stack realignment code and leave the stack 727/// pointer pointing to the d8 spill slot. 728static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB, 729 MachineBasicBlock::iterator MI, 730 unsigned NumAlignedDPRCS2Regs, 731 const std::vector<CalleeSavedInfo> &CSI, 732 const TargetRegisterInfo *TRI) { 733 MachineFunction &MF = *MBB.getParent(); 734 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 735 DebugLoc DL = MI->getDebugLoc(); 736 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 737 MachineFrameInfo &MFI = *MF.getFrameInfo(); 738 739 // Mark the D-register spill slots as properly aligned. Since MFI computes 740 // stack slot layout backwards, this can actually mean that the d-reg stack 741 // slot offsets can be wrong. The offset for d8 will always be correct. 742 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 743 unsigned DNum = CSI[i].getReg() - ARM::D8; 744 if (DNum >= 8) 745 continue; 746 int FI = CSI[i].getFrameIdx(); 747 // The even-numbered registers will be 16-byte aligned, the odd-numbered 748 // registers will be 8-byte aligned. 749 MFI.setObjectAlignment(FI, DNum % 2 ? 8 : 16); 750 751 // The stack slot for D8 needs to be maximally aligned because this is 752 // actually the point where we align the stack pointer. MachineFrameInfo 753 // computes all offsets relative to the incoming stack pointer which is a 754 // bit weird when realigning the stack. Any extra padding for this 755 // over-alignment is not realized because the code inserted below adjusts 756 // the stack pointer by numregs * 8 before aligning the stack pointer. 757 if (DNum == 0) 758 MFI.setObjectAlignment(FI, MFI.getMaxAlignment()); 759 } 760 761 // Move the stack pointer to the d8 spill slot, and align it at the same 762 // time. Leave the stack slot address in the scratch register r4. 763 // 764 // sub r4, sp, #numregs * 8 765 // bic r4, r4, #align - 1 766 // mov sp, r4 767 // 768 bool isThumb = AFI->isThumbFunction(); 769 assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1"); 770 AFI->setShouldRestoreSPFromFP(true); 771 772 // sub r4, sp, #numregs * 8 773 // The immediate is <= 64, so it doesn't need any special encoding. 774 unsigned Opc = isThumb ? ARM::t2SUBri : ARM::SUBri; 775 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) 776 .addReg(ARM::SP) 777 .addImm(8 * NumAlignedDPRCS2Regs))); 778 779 // bic r4, r4, #align-1 780 Opc = isThumb ? ARM::t2BICri : ARM::BICri; 781 unsigned MaxAlign = MF.getFrameInfo()->getMaxAlignment(); 782 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) 783 .addReg(ARM::R4, RegState::Kill) 784 .addImm(MaxAlign - 1))); 785 786 // mov sp, r4 787 // The stack pointer must be adjusted before spilling anything, otherwise 788 // the stack slots could be clobbered by an interrupt handler. 789 // Leave r4 live, it is used below. 790 Opc = isThumb ? ARM::tMOVr : ARM::MOVr; 791 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(Opc), ARM::SP) 792 .addReg(ARM::R4); 793 MIB = AddDefaultPred(MIB); 794 if (!isThumb) 795 AddDefaultCC(MIB); 796 797 // Now spill NumAlignedDPRCS2Regs registers starting from d8. 798 // r4 holds the stack slot address. 799 unsigned NextReg = ARM::D8; 800 801 // 16-byte aligned vst1.64 with 4 d-regs and address writeback. 802 // The writeback is only needed when emitting two vst1.64 instructions. 803 if (NumAlignedDPRCS2Regs >= 6) { 804 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 805 &ARM::QQPRRegClass); 806 MBB.addLiveIn(SupReg); 807 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Qwb_fixed), 808 ARM::R4) 809 .addReg(ARM::R4, RegState::Kill).addImm(16) 810 .addReg(NextReg) 811 .addReg(SupReg, RegState::ImplicitKill)); 812 NextReg += 4; 813 NumAlignedDPRCS2Regs -= 4; 814 } 815 816 // We won't modify r4 beyond this point. It currently points to the next 817 // register to be spilled. 818 unsigned R4BaseReg = NextReg; 819 820 // 16-byte aligned vst1.64 with 4 d-regs, no writeback. 821 if (NumAlignedDPRCS2Regs >= 4) { 822 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 823 &ARM::QQPRRegClass); 824 MBB.addLiveIn(SupReg); 825 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Q)) 826 .addReg(ARM::R4).addImm(16).addReg(NextReg) 827 .addReg(SupReg, RegState::ImplicitKill)); 828 NextReg += 4; 829 NumAlignedDPRCS2Regs -= 4; 830 } 831 832 // 16-byte aligned vst1.64 with 2 d-regs. 833 if (NumAlignedDPRCS2Regs >= 2) { 834 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 835 &ARM::QPRRegClass); 836 MBB.addLiveIn(SupReg); 837 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1q64)) 838 .addReg(ARM::R4).addImm(16).addReg(SupReg)); 839 NextReg += 2; 840 NumAlignedDPRCS2Regs -= 2; 841 } 842 843 // Finally, use a vanilla vstr.64 for the odd last register. 844 if (NumAlignedDPRCS2Regs) { 845 MBB.addLiveIn(NextReg); 846 // vstr.64 uses addrmode5 which has an offset scale of 4. 847 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VSTRD)) 848 .addReg(NextReg) 849 .addReg(ARM::R4).addImm((NextReg-R4BaseReg)*2)); 850 } 851 852 // The last spill instruction inserted should kill the scratch register r4. 853 llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI); 854} 855 856/// Skip past the code inserted by emitAlignedDPRCS2Spills, and return an 857/// iterator to the following instruction. 858static MachineBasicBlock::iterator 859skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI, 860 unsigned NumAlignedDPRCS2Regs) { 861 // sub r4, sp, #numregs * 8 862 // bic r4, r4, #align - 1 863 // mov sp, r4 864 ++MI; ++MI; ++MI; 865 assert(MI->mayStore() && "Expecting spill instruction"); 866 867 // These switches all fall through. 868 switch(NumAlignedDPRCS2Regs) { 869 case 7: 870 ++MI; 871 assert(MI->mayStore() && "Expecting spill instruction"); 872 default: 873 ++MI; 874 assert(MI->mayStore() && "Expecting spill instruction"); 875 case 1: 876 case 2: 877 case 4: 878 assert(MI->killsRegister(ARM::R4) && "Missed kill flag"); 879 ++MI; 880 } 881 return MI; 882} 883 884/// Emit aligned reload instructions for NumAlignedDPRCS2Regs D-registers 885/// starting from d8. These instructions are assumed to execute while the 886/// stack is still aligned, unlike the code inserted by emitPopInst. 887static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB, 888 MachineBasicBlock::iterator MI, 889 unsigned NumAlignedDPRCS2Regs, 890 const std::vector<CalleeSavedInfo> &CSI, 891 const TargetRegisterInfo *TRI) { 892 MachineFunction &MF = *MBB.getParent(); 893 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 894 DebugLoc DL = MI->getDebugLoc(); 895 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 896 897 // Find the frame index assigned to d8. 898 int D8SpillFI = 0; 899 for (unsigned i = 0, e = CSI.size(); i != e; ++i) 900 if (CSI[i].getReg() == ARM::D8) { 901 D8SpillFI = CSI[i].getFrameIdx(); 902 break; 903 } 904 905 // Materialize the address of the d8 spill slot into the scratch register r4. 906 // This can be fairly complicated if the stack frame is large, so just use 907 // the normal frame index elimination mechanism to do it. This code runs as 908 // the initial part of the epilog where the stack and base pointers haven't 909 // been changed yet. 910 bool isThumb = AFI->isThumbFunction(); 911 assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1"); 912 913 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 914 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) 915 .addFrameIndex(D8SpillFI).addImm(0))); 916 917 // Now restore NumAlignedDPRCS2Regs registers starting from d8. 918 unsigned NextReg = ARM::D8; 919 920 // 16-byte aligned vld1.64 with 4 d-regs and writeback. 921 if (NumAlignedDPRCS2Regs >= 6) { 922 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 923 &ARM::QQPRRegClass); 924 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Qwb_fixed), NextReg) 925 .addReg(ARM::R4, RegState::Define) 926 .addReg(ARM::R4, RegState::Kill).addImm(16) 927 .addReg(SupReg, RegState::ImplicitDefine)); 928 NextReg += 4; 929 NumAlignedDPRCS2Regs -= 4; 930 } 931 932 // We won't modify r4 beyond this point. It currently points to the next 933 // register to be spilled. 934 unsigned R4BaseReg = NextReg; 935 936 // 16-byte aligned vld1.64 with 4 d-regs, no writeback. 937 if (NumAlignedDPRCS2Regs >= 4) { 938 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 939 &ARM::QQPRRegClass); 940 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Q), NextReg) 941 .addReg(ARM::R4).addImm(16) 942 .addReg(SupReg, RegState::ImplicitDefine)); 943 NextReg += 4; 944 NumAlignedDPRCS2Regs -= 4; 945 } 946 947 // 16-byte aligned vld1.64 with 2 d-regs. 948 if (NumAlignedDPRCS2Regs >= 2) { 949 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 950 &ARM::QPRRegClass); 951 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1q64), SupReg) 952 .addReg(ARM::R4).addImm(16)); 953 NextReg += 2; 954 NumAlignedDPRCS2Regs -= 2; 955 } 956 957 // Finally, use a vanilla vldr.64 for the remaining odd register. 958 if (NumAlignedDPRCS2Regs) 959 AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLDRD), NextReg) 960 .addReg(ARM::R4).addImm(2*(NextReg-R4BaseReg))); 961 962 // Last store kills r4. 963 llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI); 964} 965 966bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 967 MachineBasicBlock::iterator MI, 968 const std::vector<CalleeSavedInfo> &CSI, 969 const TargetRegisterInfo *TRI) const { 970 if (CSI.empty()) 971 return false; 972 973 MachineFunction &MF = *MBB.getParent(); 974 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 975 976 unsigned PushOpc = AFI->isThumbFunction() ? ARM::t2STMDB_UPD : ARM::STMDB_UPD; 977 unsigned PushOneOpc = AFI->isThumbFunction() ? 978 ARM::t2STR_PRE : ARM::STR_PRE_IMM; 979 unsigned FltOpc = ARM::VSTMDDB_UPD; 980 unsigned NumAlignedDPRCS2Regs = AFI->getNumAlignedDPRCS2Regs(); 981 emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea1Register, 0, 982 MachineInstr::FrameSetup); 983 emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea2Register, 0, 984 MachineInstr::FrameSetup); 985 emitPushInst(MBB, MI, CSI, FltOpc, 0, true, &isARMArea3Register, 986 NumAlignedDPRCS2Regs, MachineInstr::FrameSetup); 987 988 // The code above does not insert spill code for the aligned DPRCS2 registers. 989 // The stack realignment code will be inserted between the push instructions 990 // and these spills. 991 if (NumAlignedDPRCS2Regs) 992 emitAlignedDPRCS2Spills(MBB, MI, NumAlignedDPRCS2Regs, CSI, TRI); 993 994 return true; 995} 996 997bool ARMFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 998 MachineBasicBlock::iterator MI, 999 const std::vector<CalleeSavedInfo> &CSI, 1000 const TargetRegisterInfo *TRI) const { 1001 if (CSI.empty()) 1002 return false; 1003 1004 MachineFunction &MF = *MBB.getParent(); 1005 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1006 bool isVarArg = AFI->getVarArgsRegSaveSize() > 0; 1007 unsigned NumAlignedDPRCS2Regs = AFI->getNumAlignedDPRCS2Regs(); 1008 1009 // The emitPopInst calls below do not insert reloads for the aligned DPRCS2 1010 // registers. Do that here instead. 1011 if (NumAlignedDPRCS2Regs) 1012 emitAlignedDPRCS2Restores(MBB, MI, NumAlignedDPRCS2Regs, CSI, TRI); 1013 1014 unsigned PopOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_UPD : ARM::LDMIA_UPD; 1015 unsigned LdrOpc = AFI->isThumbFunction() ? ARM::t2LDR_POST :ARM::LDR_POST_IMM; 1016 unsigned FltOpc = ARM::VLDMDIA_UPD; 1017 emitPopInst(MBB, MI, CSI, FltOpc, 0, isVarArg, true, &isARMArea3Register, 1018 NumAlignedDPRCS2Regs); 1019 emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false, 1020 &isARMArea2Register, 0); 1021 emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false, 1022 &isARMArea1Register, 0); 1023 1024 return true; 1025} 1026 1027// FIXME: Make generic? 1028static unsigned GetFunctionSizeInBytes(const MachineFunction &MF, 1029 const ARMBaseInstrInfo &TII) { 1030 unsigned FnSize = 0; 1031 for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end(); 1032 MBBI != E; ++MBBI) { 1033 const MachineBasicBlock &MBB = *MBBI; 1034 for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end(); 1035 I != E; ++I) 1036 FnSize += TII.GetInstSizeInBytes(I); 1037 } 1038 return FnSize; 1039} 1040 1041/// estimateRSStackSizeLimit - Look at each instruction that references stack 1042/// frames and return the stack size limit beyond which some of these 1043/// instructions will require a scratch register during their expansion later. 1044// FIXME: Move to TII? 1045static unsigned estimateRSStackSizeLimit(MachineFunction &MF, 1046 const TargetFrameLowering *TFI) { 1047 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1048 unsigned Limit = (1 << 12) - 1; 1049 for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) { 1050 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); 1051 I != E; ++I) { 1052 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 1053 if (!I->getOperand(i).isFI()) continue; 1054 1055 // When using ADDri to get the address of a stack object, 255 is the 1056 // largest offset guaranteed to fit in the immediate offset. 1057 if (I->getOpcode() == ARM::ADDri) { 1058 Limit = std::min(Limit, (1U << 8) - 1); 1059 break; 1060 } 1061 1062 // Otherwise check the addressing mode. 1063 switch (I->getDesc().TSFlags & ARMII::AddrModeMask) { 1064 case ARMII::AddrMode3: 1065 case ARMII::AddrModeT2_i8: 1066 Limit = std::min(Limit, (1U << 8) - 1); 1067 break; 1068 case ARMII::AddrMode5: 1069 case ARMII::AddrModeT2_i8s4: 1070 Limit = std::min(Limit, ((1U << 8) - 1) * 4); 1071 break; 1072 case ARMII::AddrModeT2_i12: 1073 // i12 supports only positive offset so these will be converted to 1074 // i8 opcodes. See llvm::rewriteT2FrameIndex. 1075 if (TFI->hasFP(MF) && AFI->hasStackFrame()) 1076 Limit = std::min(Limit, (1U << 8) - 1); 1077 break; 1078 case ARMII::AddrMode4: 1079 case ARMII::AddrMode6: 1080 // Addressing modes 4 & 6 (load/store) instructions can't encode an 1081 // immediate offset for stack references. 1082 return 0; 1083 default: 1084 break; 1085 } 1086 break; // At most one FI per instruction 1087 } 1088 } 1089 } 1090 1091 return Limit; 1092} 1093 1094// In functions that realign the stack, it can be an advantage to spill the 1095// callee-saved vector registers after realigning the stack. The vst1 and vld1 1096// instructions take alignment hints that can improve performance. 1097// 1098static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) { 1099 MF.getInfo<ARMFunctionInfo>()->setNumAlignedDPRCS2Regs(0); 1100 if (!SpillAlignedNEONRegs) 1101 return; 1102 1103 // Naked functions don't spill callee-saved registers. 1104 if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1105 Attribute::Naked)) 1106 return; 1107 1108 // We are planning to use NEON instructions vst1 / vld1. 1109 if (!MF.getTarget().getSubtarget<ARMSubtarget>().hasNEON()) 1110 return; 1111 1112 // Don't bother if the default stack alignment is sufficiently high. 1113 if (MF.getTarget().getFrameLowering()->getStackAlignment() >= 8) 1114 return; 1115 1116 // Aligned spills require stack realignment. 1117 const ARMBaseRegisterInfo *RegInfo = 1118 static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo()); 1119 if (!RegInfo->canRealignStack(MF)) 1120 return; 1121 1122 // We always spill contiguous d-registers starting from d8. Count how many 1123 // needs spilling. The register allocator will almost always use the 1124 // callee-saved registers in order, but it can happen that there are holes in 1125 // the range. Registers above the hole will be spilled to the standard DPRCS 1126 // area. 1127 MachineRegisterInfo &MRI = MF.getRegInfo(); 1128 unsigned NumSpills = 0; 1129 for (; NumSpills < 8; ++NumSpills) 1130 if (!MRI.isPhysRegUsed(ARM::D8 + NumSpills)) 1131 break; 1132 1133 // Don't do this for just one d-register. It's not worth it. 1134 if (NumSpills < 2) 1135 return; 1136 1137 // Spill the first NumSpills D-registers after realigning the stack. 1138 MF.getInfo<ARMFunctionInfo>()->setNumAlignedDPRCS2Regs(NumSpills); 1139 1140 // A scratch register is required for the vst1 / vld1 instructions. 1141 MF.getRegInfo().setPhysRegUsed(ARM::R4); 1142} 1143 1144void 1145ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 1146 RegScavenger *RS) const { 1147 // This tells PEI to spill the FP as if it is any other callee-save register 1148 // to take advantage the eliminateFrameIndex machinery. This also ensures it 1149 // is spilled in the order specified by getCalleeSavedRegs() to make it easier 1150 // to combine multiple loads / stores. 1151 bool CanEliminateFrame = true; 1152 bool CS1Spilled = false; 1153 bool LRSpilled = false; 1154 unsigned NumGPRSpills = 0; 1155 SmallVector<unsigned, 4> UnspilledCS1GPRs; 1156 SmallVector<unsigned, 4> UnspilledCS2GPRs; 1157 const ARMBaseRegisterInfo *RegInfo = 1158 static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo()); 1159 const ARMBaseInstrInfo &TII = 1160 *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo()); 1161 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1162 MachineFrameInfo *MFI = MF.getFrameInfo(); 1163 MachineRegisterInfo &MRI = MF.getRegInfo(); 1164 unsigned FramePtr = RegInfo->getFrameRegister(MF); 1165 1166 // Spill R4 if Thumb2 function requires stack realignment - it will be used as 1167 // scratch register. Also spill R4 if Thumb2 function has varsized objects, 1168 // since it's not always possible to restore sp from fp in a single 1169 // instruction. 1170 // FIXME: It will be better just to find spare register here. 1171 if (AFI->isThumb2Function() && 1172 (MFI->hasVarSizedObjects() || RegInfo->needsStackRealignment(MF))) 1173 MRI.setPhysRegUsed(ARM::R4); 1174 1175 if (AFI->isThumb1OnlyFunction()) { 1176 // Spill LR if Thumb1 function uses variable length argument lists. 1177 if (AFI->getVarArgsRegSaveSize() > 0) 1178 MRI.setPhysRegUsed(ARM::LR); 1179 1180 // Spill R4 if Thumb1 epilogue has to restore SP from FP. We don't know 1181 // for sure what the stack size will be, but for this, an estimate is good 1182 // enough. If there anything changes it, it'll be a spill, which implies 1183 // we've used all the registers and so R4 is already used, so not marking 1184 // it here will be OK. 1185 // FIXME: It will be better just to find spare register here. 1186 unsigned StackSize = MFI->estimateStackSize(MF); 1187 if (MFI->hasVarSizedObjects() || StackSize > 508) 1188 MRI.setPhysRegUsed(ARM::R4); 1189 } 1190 1191 // See if we can spill vector registers to aligned stack. 1192 checkNumAlignedDPRCS2Regs(MF); 1193 1194 // Spill the BasePtr if it's used. 1195 if (RegInfo->hasBasePointer(MF)) 1196 MRI.setPhysRegUsed(RegInfo->getBaseRegister()); 1197 1198 // Don't spill FP if the frame can be eliminated. This is determined 1199 // by scanning the callee-save registers to see if any is used. 1200 const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(); 1201 for (unsigned i = 0; CSRegs[i]; ++i) { 1202 unsigned Reg = CSRegs[i]; 1203 bool Spilled = false; 1204 if (MRI.isPhysRegUsed(Reg)) { 1205 Spilled = true; 1206 CanEliminateFrame = false; 1207 } 1208 1209 if (!ARM::GPRRegClass.contains(Reg)) 1210 continue; 1211 1212 if (Spilled) { 1213 NumGPRSpills++; 1214 1215 if (!STI.isTargetIOS()) { 1216 if (Reg == ARM::LR) 1217 LRSpilled = true; 1218 CS1Spilled = true; 1219 continue; 1220 } 1221 1222 // Keep track if LR and any of R4, R5, R6, and R7 is spilled. 1223 switch (Reg) { 1224 case ARM::LR: 1225 LRSpilled = true; 1226 // Fallthrough 1227 case ARM::R4: case ARM::R5: 1228 case ARM::R6: case ARM::R7: 1229 CS1Spilled = true; 1230 break; 1231 default: 1232 break; 1233 } 1234 } else { 1235 if (!STI.isTargetIOS()) { 1236 UnspilledCS1GPRs.push_back(Reg); 1237 continue; 1238 } 1239 1240 switch (Reg) { 1241 case ARM::R4: case ARM::R5: 1242 case ARM::R6: case ARM::R7: 1243 case ARM::LR: 1244 UnspilledCS1GPRs.push_back(Reg); 1245 break; 1246 default: 1247 UnspilledCS2GPRs.push_back(Reg); 1248 break; 1249 } 1250 } 1251 } 1252 1253 bool ForceLRSpill = false; 1254 if (!LRSpilled && AFI->isThumb1OnlyFunction()) { 1255 unsigned FnSize = GetFunctionSizeInBytes(MF, TII); 1256 // Force LR to be spilled if the Thumb function size is > 2048. This enables 1257 // use of BL to implement far jump. If it turns out that it's not needed 1258 // then the branch fix up path will undo it. 1259 if (FnSize >= (1 << 11)) { 1260 CanEliminateFrame = false; 1261 ForceLRSpill = true; 1262 } 1263 } 1264 1265 // If any of the stack slot references may be out of range of an immediate 1266 // offset, make sure a register (or a spill slot) is available for the 1267 // register scavenger. Note that if we're indexing off the frame pointer, the 1268 // effective stack size is 4 bytes larger since the FP points to the stack 1269 // slot of the previous FP. Also, if we have variable sized objects in the 1270 // function, stack slot references will often be negative, and some of 1271 // our instructions are positive-offset only, so conservatively consider 1272 // that case to want a spill slot (or register) as well. Similarly, if 1273 // the function adjusts the stack pointer during execution and the 1274 // adjustments aren't already part of our stack size estimate, our offset 1275 // calculations may be off, so be conservative. 1276 // FIXME: We could add logic to be more precise about negative offsets 1277 // and which instructions will need a scratch register for them. Is it 1278 // worth the effort and added fragility? 1279 bool BigStack = 1280 (RS && 1281 (MFI->estimateStackSize(MF) + 1282 ((hasFP(MF) && AFI->hasStackFrame()) ? 4:0) >= 1283 estimateRSStackSizeLimit(MF, this))) 1284 || MFI->hasVarSizedObjects() 1285 || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF)); 1286 1287 bool ExtraCSSpill = false; 1288 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) { 1289 AFI->setHasStackFrame(true); 1290 1291 // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled. 1292 // Spill LR as well so we can fold BX_RET to the registers restore (LDM). 1293 if (!LRSpilled && CS1Spilled) { 1294 MRI.setPhysRegUsed(ARM::LR); 1295 NumGPRSpills++; 1296 UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(), 1297 UnspilledCS1GPRs.end(), (unsigned)ARM::LR)); 1298 ForceLRSpill = false; 1299 ExtraCSSpill = true; 1300 } 1301 1302 if (hasFP(MF)) { 1303 MRI.setPhysRegUsed(FramePtr); 1304 NumGPRSpills++; 1305 } 1306 1307 // If stack and double are 8-byte aligned and we are spilling an odd number 1308 // of GPRs, spill one extra callee save GPR so we won't have to pad between 1309 // the integer and double callee save areas. 1310 unsigned TargetAlign = getStackAlignment(); 1311 if (TargetAlign == 8 && (NumGPRSpills & 1)) { 1312 if (CS1Spilled && !UnspilledCS1GPRs.empty()) { 1313 for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) { 1314 unsigned Reg = UnspilledCS1GPRs[i]; 1315 // Don't spill high register if the function is thumb1 1316 if (!AFI->isThumb1OnlyFunction() || 1317 isARMLowRegister(Reg) || Reg == ARM::LR) { 1318 MRI.setPhysRegUsed(Reg); 1319 if (!MRI.isReserved(Reg)) 1320 ExtraCSSpill = true; 1321 break; 1322 } 1323 } 1324 } else if (!UnspilledCS2GPRs.empty() && !AFI->isThumb1OnlyFunction()) { 1325 unsigned Reg = UnspilledCS2GPRs.front(); 1326 MRI.setPhysRegUsed(Reg); 1327 if (!MRI.isReserved(Reg)) 1328 ExtraCSSpill = true; 1329 } 1330 } 1331 1332 // Estimate if we might need to scavenge a register at some point in order 1333 // to materialize a stack offset. If so, either spill one additional 1334 // callee-saved register or reserve a special spill slot to facilitate 1335 // register scavenging. Thumb1 needs a spill slot for stack pointer 1336 // adjustments also, even when the frame itself is small. 1337 if (BigStack && !ExtraCSSpill) { 1338 // If any non-reserved CS register isn't spilled, just spill one or two 1339 // extra. That should take care of it! 1340 unsigned NumExtras = TargetAlign / 4; 1341 SmallVector<unsigned, 2> Extras; 1342 while (NumExtras && !UnspilledCS1GPRs.empty()) { 1343 unsigned Reg = UnspilledCS1GPRs.back(); 1344 UnspilledCS1GPRs.pop_back(); 1345 if (!MRI.isReserved(Reg) && 1346 (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) || 1347 Reg == ARM::LR)) { 1348 Extras.push_back(Reg); 1349 NumExtras--; 1350 } 1351 } 1352 // For non-Thumb1 functions, also check for hi-reg CS registers 1353 if (!AFI->isThumb1OnlyFunction()) { 1354 while (NumExtras && !UnspilledCS2GPRs.empty()) { 1355 unsigned Reg = UnspilledCS2GPRs.back(); 1356 UnspilledCS2GPRs.pop_back(); 1357 if (!MRI.isReserved(Reg)) { 1358 Extras.push_back(Reg); 1359 NumExtras--; 1360 } 1361 } 1362 } 1363 if (Extras.size() && NumExtras == 0) { 1364 for (unsigned i = 0, e = Extras.size(); i != e; ++i) { 1365 MRI.setPhysRegUsed(Extras[i]); 1366 } 1367 } else if (!AFI->isThumb1OnlyFunction()) { 1368 // note: Thumb1 functions spill to R12, not the stack. Reserve a slot 1369 // closest to SP or frame pointer. 1370 const TargetRegisterClass *RC = &ARM::GPRRegClass; 1371 RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), 1372 RC->getAlignment(), 1373 false)); 1374 } 1375 } 1376 } 1377 1378 if (ForceLRSpill) { 1379 MRI.setPhysRegUsed(ARM::LR); 1380 AFI->setLRIsSpilledForFarJump(true); 1381 } 1382} 1383 1384 1385void ARMFrameLowering:: 1386eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 1387 MachineBasicBlock::iterator I) const { 1388 const ARMBaseInstrInfo &TII = 1389 *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo()); 1390 if (!hasReservedCallFrame(MF)) { 1391 // If we have alloca, convert as follows: 1392 // ADJCALLSTACKDOWN -> sub, sp, sp, amount 1393 // ADJCALLSTACKUP -> add, sp, sp, amount 1394 MachineInstr *Old = I; 1395 DebugLoc dl = Old->getDebugLoc(); 1396 unsigned Amount = Old->getOperand(0).getImm(); 1397 if (Amount != 0) { 1398 // We need to keep the stack aligned properly. To do this, we round the 1399 // amount of space needed for the outgoing arguments up to the next 1400 // alignment boundary. 1401 unsigned Align = getStackAlignment(); 1402 Amount = (Amount+Align-1)/Align*Align; 1403 1404 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1405 assert(!AFI->isThumb1OnlyFunction() && 1406 "This eliminateCallFramePseudoInstr does not support Thumb1!"); 1407 bool isARM = !AFI->isThumbFunction(); 1408 1409 // Replace the pseudo instruction with a new instruction... 1410 unsigned Opc = Old->getOpcode(); 1411 int PIdx = Old->findFirstPredOperandIdx(); 1412 ARMCC::CondCodes Pred = (PIdx == -1) 1413 ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm(); 1414 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) { 1415 // Note: PredReg is operand 2 for ADJCALLSTACKDOWN. 1416 unsigned PredReg = Old->getOperand(2).getReg(); 1417 emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags, 1418 Pred, PredReg); 1419 } else { 1420 // Note: PredReg is operand 3 for ADJCALLSTACKUP. 1421 unsigned PredReg = Old->getOperand(3).getReg(); 1422 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP); 1423 emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags, 1424 Pred, PredReg); 1425 } 1426 } 1427 } 1428 MBB.erase(I); 1429} 1430 1431