1//===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the base ARM implementation of TargetRegisterInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARMBaseRegisterInfo.h" 15#include "ARM.h" 16#include "ARMBaseInstrInfo.h" 17#include "ARMFrameLowering.h" 18#include "ARMMachineFunctionInfo.h" 19#include "ARMSubtarget.h" 20#include "MCTargetDesc/ARMAddressingModes.h" 21#include "llvm/ADT/BitVector.h" 22#include "llvm/ADT/SmallVector.h" 23#include "llvm/CodeGen/MachineConstantPool.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineFunction.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineRegisterInfo.h" 28#include "llvm/CodeGen/RegisterScavenging.h" 29#include "llvm/CodeGen/VirtRegMap.h" 30#include "llvm/IR/Constants.h" 31#include "llvm/IR/DerivedTypes.h" 32#include "llvm/IR/Function.h" 33#include "llvm/IR/LLVMContext.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/ErrorHandling.h" 36#include "llvm/Support/raw_ostream.h" 37#include "llvm/Target/TargetFrameLowering.h" 38#include "llvm/Target/TargetMachine.h" 39#include "llvm/Target/TargetOptions.h" 40 41#define DEBUG_TYPE "arm-register-info" 42 43#define GET_REGINFO_TARGET_DESC 44#include "ARMGenRegisterInfo.inc" 45 46using namespace llvm; 47 48ARMBaseRegisterInfo::ARMBaseRegisterInfo() 49 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), BasePtr(ARM::R6) {} 50 51static unsigned getFramePointerReg(const ARMSubtarget &STI) { 52 if (STI.isTargetMachO()) 53 return ARM::R7; 54 else if (STI.isTargetWindows()) 55 return ARM::R11; 56 else // ARM EABI 57 return STI.isThumb() ? ARM::R7 : ARM::R11; 58} 59 60const MCPhysReg* 61ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 62 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 63 bool UseSplitPush = STI.splitFramePushPop(); 64 const MCPhysReg *RegList = 65 STI.isTargetDarwin() 66 ? CSR_iOS_SaveList 67 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); 68 69 const Function *F = MF->getFunction(); 70 if (F->getCallingConv() == CallingConv::GHC) { 71 // GHC set of callee saved regs is empty as all those regs are 72 // used for passing STG regs around 73 return CSR_NoRegs_SaveList; 74 } else if (F->hasFnAttribute("interrupt")) { 75 if (STI.isMClass()) { 76 // M-class CPUs have hardware which saves the registers needed to allow a 77 // function conforming to the AAPCS to function as a handler. 78 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; 79 } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") { 80 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 81 // need to be saved to restore user-mode state. 82 return CSR_FIQ_SaveList; 83 } else { 84 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 85 // exception handling. 86 return CSR_GenericInt_SaveList; 87 } 88 } 89 90 if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() && 91 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 92 return CSR_iOS_SwiftError_SaveList; 93 94 if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS) 95 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 96 ? CSR_iOS_CXX_TLS_PE_SaveList 97 : CSR_iOS_CXX_TLS_SaveList; 98 return RegList; 99} 100 101const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 102 const MachineFunction *MF) const { 103 assert(MF && "Invalid MachineFunction pointer."); 104 if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && 105 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 106 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 107 return nullptr; 108} 109 110const uint32_t * 111ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 112 CallingConv::ID CC) const { 113 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 114 if (CC == CallingConv::GHC) 115 // This is academic becase all GHC calls are (supposed to be) tail calls 116 return CSR_NoRegs_RegMask; 117 118 if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() && 119 MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 120 return CSR_iOS_SwiftError_RegMask; 121 122 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 123 return CSR_iOS_CXX_TLS_RegMask; 124 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 125} 126 127const uint32_t* 128ARMBaseRegisterInfo::getNoPreservedMask() const { 129 return CSR_NoRegs_RegMask; 130} 131 132const uint32_t * 133ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 134 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 135 "only know about special TLS call on Darwin"); 136 return CSR_iOS_TLSCall_RegMask; 137} 138 139 140const uint32_t * 141ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 142 CallingConv::ID CC) const { 143 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 144 // This should return a register mask that is the same as that returned by 145 // getCallPreservedMask but that additionally preserves the register used for 146 // the first i32 argument (which must also be the register used to return a 147 // single i32 return value) 148 // 149 // In case that the calling convention does not use the same register for 150 // both or otherwise does not want to enable this optimization, the function 151 // should return NULL 152 if (CC == CallingConv::GHC) 153 // This is academic becase all GHC calls are (supposed to be) tail calls 154 return nullptr; 155 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 156 : CSR_AAPCS_ThisReturn_RegMask; 157} 158 159BitVector ARMBaseRegisterInfo:: 160getReservedRegs(const MachineFunction &MF) const { 161 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 162 const ARMFrameLowering *TFI = getFrameLowering(MF); 163 164 // FIXME: avoid re-calculating this every time. 165 BitVector Reserved(getNumRegs()); 166 Reserved.set(ARM::SP); 167 Reserved.set(ARM::PC); 168 Reserved.set(ARM::FPSCR); 169 Reserved.set(ARM::APSR_NZCV); 170 if (TFI->hasFP(MF)) 171 Reserved.set(getFramePointerReg(STI)); 172 if (hasBasePointer(MF)) 173 Reserved.set(BasePtr); 174 // Some targets reserve R9. 175 if (STI.isR9Reserved()) 176 Reserved.set(ARM::R9); 177 // Reserve D16-D31 if the subtarget doesn't support them. 178 if (!STI.hasVFP3() || STI.hasD16()) { 179 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 180 Reserved.set(ARM::D16, ARM::D31 + 1); 181 } 182 const TargetRegisterClass *RC = &ARM::GPRPairRegClass; 183 for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I) 184 for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI) 185 if (Reserved.test(*SI)) Reserved.set(*I); 186 187 return Reserved; 188} 189 190const TargetRegisterClass * 191ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 192 const MachineFunction &) const { 193 const TargetRegisterClass *Super = RC; 194 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 195 do { 196 switch (Super->getID()) { 197 case ARM::GPRRegClassID: 198 case ARM::SPRRegClassID: 199 case ARM::DPRRegClassID: 200 case ARM::QPRRegClassID: 201 case ARM::QQPRRegClassID: 202 case ARM::QQQQPRRegClassID: 203 case ARM::GPRPairRegClassID: 204 return Super; 205 } 206 Super = *I++; 207 } while (Super); 208 return RC; 209} 210 211const TargetRegisterClass * 212ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 213 const { 214 return &ARM::GPRRegClass; 215} 216 217const TargetRegisterClass * 218ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 219 if (RC == &ARM::CCRRegClass) 220 return &ARM::rGPRRegClass; // Can't copy CCR registers. 221 return RC; 222} 223 224unsigned 225ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 226 MachineFunction &MF) const { 227 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 228 const ARMFrameLowering *TFI = getFrameLowering(MF); 229 230 switch (RC->getID()) { 231 default: 232 return 0; 233 case ARM::tGPRRegClassID: 234 return TFI->hasFP(MF) ? 4 : 5; 235 case ARM::GPRRegClassID: { 236 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 237 return 10 - FP - (STI.isR9Reserved() ? 1 : 0); 238 } 239 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 240 case ARM::DPRRegClassID: 241 return 32 - 10; 242 } 243} 244 245// Get the other register in a GPRPair. 246static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) { 247 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 248 if (ARM::GPRPairRegClass.contains(*Supers)) 249 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 250 return 0; 251} 252 253// Resolve the RegPairEven / RegPairOdd register allocator hints. 254void 255ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg, 256 ArrayRef<MCPhysReg> Order, 257 SmallVectorImpl<MCPhysReg> &Hints, 258 const MachineFunction &MF, 259 const VirtRegMap *VRM, 260 const LiveRegMatrix *Matrix) const { 261 const MachineRegisterInfo &MRI = MF.getRegInfo(); 262 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg); 263 264 unsigned Odd; 265 switch (Hint.first) { 266 case ARMRI::RegPairEven: 267 Odd = 0; 268 break; 269 case ARMRI::RegPairOdd: 270 Odd = 1; 271 break; 272 default: 273 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 274 return; 275 } 276 277 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 278 // Check if the other part of the pair has already been assigned, and provide 279 // the paired register as the first hint. 280 unsigned Paired = Hint.second; 281 if (Paired == 0) 282 return; 283 284 unsigned PairedPhys = 0; 285 if (TargetRegisterInfo::isPhysicalRegister(Paired)) { 286 PairedPhys = Paired; 287 } else if (VRM && VRM->hasPhys(Paired)) { 288 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 289 } 290 291 // First prefer the paired physreg. 292 if (PairedPhys && 293 std::find(Order.begin(), Order.end(), PairedPhys) != Order.end()) 294 Hints.push_back(PairedPhys); 295 296 // Then prefer even or odd registers. 297 for (unsigned I = 0, E = Order.size(); I != E; ++I) { 298 unsigned Reg = Order[I]; 299 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 300 continue; 301 // Don't provide hints that are paired to a reserved register. 302 unsigned Paired = getPairedGPR(Reg, !Odd, this); 303 if (!Paired || MRI.isReserved(Paired)) 304 continue; 305 Hints.push_back(Reg); 306 } 307} 308 309void 310ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg, 311 MachineFunction &MF) const { 312 MachineRegisterInfo *MRI = &MF.getRegInfo(); 313 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); 314 if ((Hint.first == (unsigned)ARMRI::RegPairOdd || 315 Hint.first == (unsigned)ARMRI::RegPairEven) && 316 TargetRegisterInfo::isVirtualRegister(Hint.second)) { 317 // If 'Reg' is one of the even / odd register pair and it's now changed 318 // (e.g. coalesced) into a different register. The other register of the 319 // pair allocation hint must be updated to reflect the relationship 320 // change. 321 unsigned OtherReg = Hint.second; 322 Hint = MRI->getRegAllocationHint(OtherReg); 323 // Make sure the pair has not already divorced. 324 if (Hint.second == Reg) { 325 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 326 if (TargetRegisterInfo::isVirtualRegister(NewReg)) 327 MRI->setRegAllocationHint(NewReg, 328 Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven 329 : ARMRI::RegPairOdd, OtherReg); 330 } 331 } 332} 333 334bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 335 const MachineFrameInfo *MFI = MF.getFrameInfo(); 336 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 337 const ARMFrameLowering *TFI = getFrameLowering(MF); 338 339 // When outgoing call frames are so large that we adjust the stack pointer 340 // around the call, we can no longer use the stack pointer to reach the 341 // emergency spill slot. 342 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 343 return true; 344 345 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 346 // negative range for ldr/str (255), and thumb1 is positive offsets only. 347 // It's going to be better to use the SP or Base Pointer instead. When there 348 // are variable sized objects, we can't reference off of the SP, so we 349 // reserve a Base Pointer. 350 if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) { 351 // Conservatively estimate whether the negative offset from the frame 352 // pointer will be sufficient to reach. If a function has a smallish 353 // frame, it's less likely to have lots of spills and callee saved 354 // space, so it's all more likely to be within range of the frame pointer. 355 // If it's wrong, the scavenger will still enable access to work, it just 356 // won't be optimal. 357 if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128) 358 return false; 359 return true; 360 } 361 362 return false; 363} 364 365bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 366 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 367 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 368 const ARMFrameLowering *TFI = getFrameLowering(MF); 369 // We can't realign the stack if: 370 // 1. Dynamic stack realignment is explicitly disabled, 371 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or 372 // 3. There are VLAs in the function and the base pointer is disabled. 373 if (!TargetRegisterInfo::canRealignStack(MF)) 374 return false; 375 if (AFI->isThumb1OnlyFunction()) 376 return false; 377 // Stack realignment requires a frame pointer. If we already started 378 // register allocation with frame pointer elimination, it is too late now. 379 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>()))) 380 return false; 381 // We may also need a base pointer if there are dynamic allocas or stack 382 // pointer adjustments around calls. 383 if (TFI->hasReservedCallFrame(MF)) 384 return true; 385 // A base pointer is required and allowed. Check that it isn't too late to 386 // reserve it. 387 return MRI->canReserveReg(BasePtr); 388} 389 390bool ARMBaseRegisterInfo:: 391cannotEliminateFrame(const MachineFunction &MF) const { 392 const MachineFrameInfo *MFI = MF.getFrameInfo(); 393 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack()) 394 return true; 395 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() 396 || needsStackRealignment(MF); 397} 398 399unsigned 400ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 401 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 402 const ARMFrameLowering *TFI = getFrameLowering(MF); 403 404 if (TFI->hasFP(MF)) 405 return getFramePointerReg(STI); 406 return ARM::SP; 407} 408 409/// emitLoadConstPool - Emits a load from constpool to materialize the 410/// specified immediate. 411void ARMBaseRegisterInfo::emitLoadConstPool( 412 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 413 const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val, 414 ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const { 415 MachineFunction &MF = *MBB.getParent(); 416 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 417 MachineConstantPool *ConstantPool = MF.getConstantPool(); 418 const Constant *C = 419 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val); 420 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); 421 422 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 423 .addReg(DestReg, getDefRegState(true), SubIdx) 424 .addConstantPoolIndex(Idx) 425 .addImm(0).addImm(Pred).addReg(PredReg) 426 .setMIFlags(MIFlags); 427} 428 429bool ARMBaseRegisterInfo:: 430requiresRegisterScavenging(const MachineFunction &MF) const { 431 return true; 432} 433 434bool ARMBaseRegisterInfo:: 435trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 436 return true; 437} 438 439bool ARMBaseRegisterInfo:: 440requiresFrameIndexScavenging(const MachineFunction &MF) const { 441 return true; 442} 443 444bool ARMBaseRegisterInfo:: 445requiresVirtualBaseRegisters(const MachineFunction &MF) const { 446 return true; 447} 448 449int64_t ARMBaseRegisterInfo:: 450getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 451 const MCInstrDesc &Desc = MI->getDesc(); 452 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 453 int64_t InstrOffs = 0; 454 int Scale = 1; 455 unsigned ImmIdx = 0; 456 switch (AddrMode) { 457 case ARMII::AddrModeT2_i8: 458 case ARMII::AddrModeT2_i12: 459 case ARMII::AddrMode_i12: 460 InstrOffs = MI->getOperand(Idx+1).getImm(); 461 Scale = 1; 462 break; 463 case ARMII::AddrMode5: { 464 // VFP address mode. 465 const MachineOperand &OffOp = MI->getOperand(Idx+1); 466 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 467 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 468 InstrOffs = -InstrOffs; 469 Scale = 4; 470 break; 471 } 472 case ARMII::AddrMode2: { 473 ImmIdx = Idx+2; 474 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 475 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 476 InstrOffs = -InstrOffs; 477 break; 478 } 479 case ARMII::AddrMode3: { 480 ImmIdx = Idx+2; 481 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 482 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 483 InstrOffs = -InstrOffs; 484 break; 485 } 486 case ARMII::AddrModeT1_s: { 487 ImmIdx = Idx+1; 488 InstrOffs = MI->getOperand(ImmIdx).getImm(); 489 Scale = 4; 490 break; 491 } 492 default: 493 llvm_unreachable("Unsupported addressing mode!"); 494 } 495 496 return InstrOffs * Scale; 497} 498 499/// needsFrameBaseReg - Returns true if the instruction's frame index 500/// reference would be better served by a base register other than FP 501/// or SP. Used by LocalStackFrameAllocation to determine which frame index 502/// references it should create new base registers for. 503bool ARMBaseRegisterInfo:: 504needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 505 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 506 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 507 } 508 509 // It's the load/store FI references that cause issues, as it can be difficult 510 // to materialize the offset if it won't fit in the literal field. Estimate 511 // based on the size of the local frame and some conservative assumptions 512 // about the rest of the stack frame (note, this is pre-regalloc, so 513 // we don't know everything for certain yet) whether this offset is likely 514 // to be out of range of the immediate. Return true if so. 515 516 // We only generate virtual base registers for loads and stores, so 517 // return false for everything else. 518 unsigned Opc = MI->getOpcode(); 519 switch (Opc) { 520 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 521 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 522 case ARM::t2LDRi12: case ARM::t2LDRi8: 523 case ARM::t2STRi12: case ARM::t2STRi8: 524 case ARM::VLDRS: case ARM::VLDRD: 525 case ARM::VSTRS: case ARM::VSTRD: 526 case ARM::tSTRspi: case ARM::tLDRspi: 527 break; 528 default: 529 return false; 530 } 531 532 // Without a virtual base register, if the function has variable sized 533 // objects, all fixed-size local references will be via the frame pointer, 534 // Approximate the offset and see if it's legal for the instruction. 535 // Note that the incoming offset is based on the SP value at function entry, 536 // so it'll be negative. 537 MachineFunction &MF = *MI->getParent()->getParent(); 538 const ARMFrameLowering *TFI = getFrameLowering(MF); 539 MachineFrameInfo *MFI = MF.getFrameInfo(); 540 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 541 542 // Estimate an offset from the frame pointer. 543 // Conservatively assume all callee-saved registers get pushed. R4-R6 544 // will be earlier than the FP, so we ignore those. 545 // R7, LR 546 int64_t FPOffset = Offset - 8; 547 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 548 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 549 FPOffset -= 80; 550 // Estimate an offset from the stack pointer. 551 // The incoming offset is relating to the SP at the start of the function, 552 // but when we access the local it'll be relative to the SP after local 553 // allocation, so adjust our SP-relative offset by that allocation size. 554 Offset += MFI->getLocalFrameSize(); 555 // Assume that we'll have at least some spill slots allocated. 556 // FIXME: This is a total SWAG number. We should run some statistics 557 // and pick a real one. 558 Offset += 128; // 128 bytes of spill slots 559 560 // If there's a frame pointer and the addressing mode allows it, try using it. 561 // The FP is only available if there is no dynamic realignment. We 562 // don't know for sure yet whether we'll need that, so we guess based 563 // on whether there are any local variables that would trigger it. 564 unsigned StackAlign = TFI->getStackAlignment(); 565 if (TFI->hasFP(MF) && 566 !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) { 567 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 568 return false; 569 } 570 // If we can reference via the stack pointer, try that. 571 // FIXME: This (and the code that resolves the references) can be improved 572 // to only disallow SP relative references in the live range of 573 // the VLA(s). In practice, it's unclear how much difference that 574 // would make, but it may be worth doing. 575 if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 576 return false; 577 578 // The offset likely isn't legal, we want to allocate a virtual base register. 579 return true; 580} 581 582/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 583/// be a pointer to FrameIdx at the beginning of the basic block. 584void ARMBaseRegisterInfo:: 585materializeFrameBaseRegister(MachineBasicBlock *MBB, 586 unsigned BaseReg, int FrameIdx, 587 int64_t Offset) const { 588 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 589 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 590 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 591 592 MachineBasicBlock::iterator Ins = MBB->begin(); 593 DebugLoc DL; // Defaults to "unknown" 594 if (Ins != MBB->end()) 595 DL = Ins->getDebugLoc(); 596 597 const MachineFunction &MF = *MBB->getParent(); 598 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 599 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 600 const MCInstrDesc &MCID = TII.get(ADDriOpc); 601 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 602 603 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 604 .addFrameIndex(FrameIdx).addImm(Offset); 605 606 if (!AFI->isThumb1OnlyFunction()) 607 AddDefaultCC(AddDefaultPred(MIB)); 608} 609 610void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, 611 int64_t Offset) const { 612 MachineBasicBlock &MBB = *MI.getParent(); 613 MachineFunction &MF = *MBB.getParent(); 614 const ARMBaseInstrInfo &TII = 615 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 616 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 617 int Off = Offset; // ARM doesn't need the general 64-bit offsets 618 unsigned i = 0; 619 620 assert(!AFI->isThumb1OnlyFunction() && 621 "This resolveFrameIndex does not support Thumb1!"); 622 623 while (!MI.getOperand(i).isFI()) { 624 ++i; 625 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 626 } 627 bool Done = false; 628 if (!AFI->isThumbFunction()) 629 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 630 else { 631 assert(AFI->isThumb2Function()); 632 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII); 633 } 634 assert (Done && "Unable to resolve frame index!"); 635 (void)Done; 636} 637 638bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg, 639 int64_t Offset) const { 640 const MCInstrDesc &Desc = MI->getDesc(); 641 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 642 unsigned i = 0; 643 644 while (!MI->getOperand(i).isFI()) { 645 ++i; 646 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 647 } 648 649 // AddrMode4 and AddrMode6 cannot handle any offset. 650 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 651 return Offset == 0; 652 653 unsigned NumBits = 0; 654 unsigned Scale = 1; 655 bool isSigned = true; 656 switch (AddrMode) { 657 case ARMII::AddrModeT2_i8: 658 case ARMII::AddrModeT2_i12: 659 // i8 supports only negative, and i12 supports only positive, so 660 // based on Offset sign, consider the appropriate instruction 661 Scale = 1; 662 if (Offset < 0) { 663 NumBits = 8; 664 Offset = -Offset; 665 } else { 666 NumBits = 12; 667 } 668 break; 669 case ARMII::AddrMode5: 670 // VFP address mode. 671 NumBits = 8; 672 Scale = 4; 673 break; 674 case ARMII::AddrMode_i12: 675 case ARMII::AddrMode2: 676 NumBits = 12; 677 break; 678 case ARMII::AddrMode3: 679 NumBits = 8; 680 break; 681 case ARMII::AddrModeT1_s: 682 NumBits = (BaseReg == ARM::SP ? 8 : 5); 683 Scale = 4; 684 isSigned = false; 685 break; 686 default: 687 llvm_unreachable("Unsupported addressing mode!"); 688 } 689 690 Offset += getFrameIndexInstrOffset(MI, i); 691 // Make sure the offset is encodable for instructions that scale the 692 // immediate. 693 if ((Offset & (Scale-1)) != 0) 694 return false; 695 696 if (isSigned && Offset < 0) 697 Offset = -Offset; 698 699 unsigned Mask = (1 << NumBits) - 1; 700 if ((unsigned)Offset <= Mask * Scale) 701 return true; 702 703 return false; 704} 705 706void 707ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 708 int SPAdj, unsigned FIOperandNum, 709 RegScavenger *RS) const { 710 MachineInstr &MI = *II; 711 MachineBasicBlock &MBB = *MI.getParent(); 712 MachineFunction &MF = *MBB.getParent(); 713 const ARMBaseInstrInfo &TII = 714 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 715 const ARMFrameLowering *TFI = getFrameLowering(MF); 716 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 717 assert(!AFI->isThumb1OnlyFunction() && 718 "This eliminateFrameIndex does not support Thumb1!"); 719 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 720 unsigned FrameReg; 721 722 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 723 724 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 725 // call frame setup/destroy instructions have already been eliminated. That 726 // means the stack pointer cannot be used to access the emergency spill slot 727 // when !hasReservedCallFrame(). 728#ifndef NDEBUG 729 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 730 assert(TFI->hasReservedCallFrame(MF) && 731 "Cannot use SP to access the emergency spill slot in " 732 "functions without a reserved call frame"); 733 assert(!MF.getFrameInfo()->hasVarSizedObjects() && 734 "Cannot use SP to access the emergency spill slot in " 735 "functions with variable sized frame objects"); 736 } 737#endif // NDEBUG 738 739 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 740 741 // Modify MI as necessary to handle as much of 'Offset' as possible 742 bool Done = false; 743 if (!AFI->isThumbFunction()) 744 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 745 else { 746 assert(AFI->isThumb2Function()); 747 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 748 } 749 if (Done) 750 return; 751 752 // If we get here, the immediate doesn't fit into the instruction. We folded 753 // as much as possible above, handle the rest, providing a register that is 754 // SP+LargeImm. 755 assert((Offset || 756 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 757 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) && 758 "This code isn't needed if offset already handled!"); 759 760 unsigned ScratchReg = 0; 761 int PIdx = MI.findFirstPredOperandIdx(); 762 ARMCC::CondCodes Pred = (PIdx == -1) 763 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 764 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg(); 765 if (Offset == 0) 766 // Must be addrmode4/6. 767 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 768 else { 769 ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass); 770 if (!AFI->isThumbFunction()) 771 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 772 Offset, Pred, PredReg, TII); 773 else { 774 assert(AFI->isThumb2Function()); 775 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 776 Offset, Pred, PredReg, TII); 777 } 778 // Update the original instruction to use the scratch register. 779 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 780 } 781} 782 783bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 784 const TargetRegisterClass *SrcRC, 785 unsigned SubReg, 786 const TargetRegisterClass *DstRC, 787 unsigned DstSubReg, 788 const TargetRegisterClass *NewRC) const { 789 auto MBB = MI->getParent(); 790 auto MF = MBB->getParent(); 791 const MachineRegisterInfo &MRI = MF->getRegInfo(); 792 // If not copying into a sub-register this should be ok because we shouldn't 793 // need to split the reg. 794 if (!DstSubReg) 795 return true; 796 // Small registers don't frequently cause a problem, so we can coalesce them. 797 if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32) 798 return true; 799 800 auto NewRCWeight = 801 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 802 auto SrcRCWeight = 803 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 804 auto DstRCWeight = 805 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 806 // If the source register class is more expensive than the destination, the 807 // coalescing is probably profitable. 808 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 809 return true; 810 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 811 return true; 812 813 // If the register allocator isn't constrained, we can always allow coalescing 814 // unfortunately we don't know yet if we will be constrained. 815 // The goal of this heuristic is to restrict how many expensive registers 816 // we allow to coalesce in a given basic block. 817 auto AFI = MF->getInfo<ARMFunctionInfo>(); 818 auto It = AFI->getCoalescedWeight(MBB); 819 820 DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 821 << It->second << "\n"); 822 DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 823 << NewRCWeight.RegWeight << "\n"); 824 825 // This number is the largest round number that which meets the criteria: 826 // (1) addresses PR18825 827 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 828 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 829 // In practice the SizeMultiplier will only factor in for straight line code 830 // that uses a lot of NEON vectors, which isn't terribly common. 831 unsigned SizeMultiplier = MBB->size()/100; 832 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 833 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 834 It->second += NewRCWeight.RegWeight; 835 return true; 836 } 837 return false; 838} 839