1//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86RegisterInfo.h" 17#include "X86FrameLowering.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/ADT/BitVector.h" 23#include "llvm/ADT/STLExtras.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineFunction.h" 26#include "llvm/CodeGen/MachineFunctionPass.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/MachineModuleInfo.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/MachineValueType.h" 31#include "llvm/IR/Constants.h" 32#include "llvm/IR/Function.h" 33#include "llvm/IR/Type.h" 34#include "llvm/MC/MCAsmInfo.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Target/TargetFrameLowering.h" 38#include "llvm/Target/TargetInstrInfo.h" 39#include "llvm/Target/TargetMachine.h" 40#include "llvm/Target/TargetOptions.h" 41 42using namespace llvm; 43 44#define GET_REGINFO_TARGET_DESC 45#include "X86GenRegisterInfo.inc" 46 47cl::opt<bool> 48ForceStackAlign("force-align-stack", 49 cl::desc("Force align the stack to the minimum alignment" 50 " needed for the function."), 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> 54EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), 55 cl::desc("Enable use of a base pointer for complex stack frames")); 56 57X86RegisterInfo::X86RegisterInfo(const Triple &TT) 58 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP), 59 X86_MC::getDwarfRegFlavour(TT, false), 60 X86_MC::getDwarfRegFlavour(TT, true), 61 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) { 62 X86_MC::InitLLVM2SEHRegisterMapping(this); 63 64 // Cache some information. 65 Is64Bit = TT.isArch64Bit(); 66 IsWin64 = Is64Bit && TT.isOSWindows(); 67 68 // Use a callee-saved register as the base pointer. These registers must 69 // not conflict with any ABI requirements. For example, in 32-bit mode PIC 70 // requires GOT in the EBX register before function calls via PLT GOT pointer. 71 if (Is64Bit) { 72 SlotSize = 8; 73 // This matches the simplified 32-bit pointer code in the data layout 74 // computation. 75 // FIXME: Should use the data layout? 76 bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32; 77 StackPtr = Use64BitReg ? X86::RSP : X86::ESP; 78 FramePtr = Use64BitReg ? X86::RBP : X86::EBP; 79 BasePtr = Use64BitReg ? X86::RBX : X86::EBX; 80 } else { 81 SlotSize = 4; 82 StackPtr = X86::ESP; 83 FramePtr = X86::EBP; 84 BasePtr = X86::ESI; 85 } 86} 87 88bool 89X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 90 // ExeDepsFixer and PostRAScheduler require liveness. 91 return true; 92} 93 94int 95X86RegisterInfo::getSEHRegNum(unsigned i) const { 96 return getEncodingValue(i); 97} 98 99const TargetRegisterClass * 100X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC, 101 unsigned Idx) const { 102 // The sub_8bit sub-register index is more constrained in 32-bit mode. 103 // It behaves just like the sub_8bit_hi index. 104 if (!Is64Bit && Idx == X86::sub_8bit) 105 Idx = X86::sub_8bit_hi; 106 107 // Forward to TableGen's default version. 108 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx); 109} 110 111const TargetRegisterClass * 112X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 113 const TargetRegisterClass *B, 114 unsigned SubIdx) const { 115 // The sub_8bit sub-register index is more constrained in 32-bit mode. 116 if (!Is64Bit && SubIdx == X86::sub_8bit) { 117 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi); 118 if (!A) 119 return nullptr; 120 } 121 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx); 122} 123 124const TargetRegisterClass * 125X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 126 const MachineFunction &MF) const { 127 // Don't allow super-classes of GR8_NOREX. This class is only used after 128 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied 129 // to the full GR8 register class in 64-bit mode, so we cannot allow the 130 // reigster class inflation. 131 // 132 // The GR8_NOREX class is always used in a way that won't be constrained to a 133 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the 134 // full GR8 class. 135 if (RC == &X86::GR8_NOREXRegClass) 136 return RC; 137 138 const TargetRegisterClass *Super = RC; 139 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 140 do { 141 switch (Super->getID()) { 142 case X86::GR8RegClassID: 143 case X86::GR16RegClassID: 144 case X86::GR32RegClassID: 145 case X86::GR64RegClassID: 146 case X86::FR32RegClassID: 147 case X86::FR64RegClassID: 148 case X86::RFP32RegClassID: 149 case X86::RFP64RegClassID: 150 case X86::RFP80RegClassID: 151 case X86::VR128RegClassID: 152 case X86::VR256RegClassID: 153 // Don't return a super-class that would shrink the spill size. 154 // That can happen with the vector and float classes. 155 if (Super->getSize() == RC->getSize()) 156 return Super; 157 } 158 Super = *I++; 159 } while (Super); 160 return RC; 161} 162 163const TargetRegisterClass * 164X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, 165 unsigned Kind) const { 166 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 167 switch (Kind) { 168 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 169 case 0: // Normal GPRs. 170 if (Subtarget.isTarget64BitLP64()) 171 return &X86::GR64RegClass; 172 return &X86::GR32RegClass; 173 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 174 if (Subtarget.isTarget64BitLP64()) 175 return &X86::GR64_NOSPRegClass; 176 return &X86::GR32_NOSPRegClass; 177 case 2: // Available for tailcall (not callee-saved GPRs). 178 if (IsWin64) 179 return &X86::GR64_TCW64RegClass; 180 else if (Is64Bit) 181 return &X86::GR64_TCRegClass; 182 183 const Function *F = MF.getFunction(); 184 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); 185 if (hasHipeCC) 186 return &X86::GR32RegClass; 187 return &X86::GR32_TCRegClass; 188 } 189} 190 191const TargetRegisterClass * 192X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 193 if (RC == &X86::CCRRegClass) { 194 if (Is64Bit) 195 return &X86::GR64RegClass; 196 else 197 return &X86::GR32RegClass; 198 } 199 return RC; 200} 201 202unsigned 203X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 204 MachineFunction &MF) const { 205 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 206 207 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 208 switch (RC->getID()) { 209 default: 210 return 0; 211 case X86::GR32RegClassID: 212 return 4 - FPDiff; 213 case X86::GR64RegClassID: 214 return 12 - FPDiff; 215 case X86::VR128RegClassID: 216 return Is64Bit ? 10 : 4; 217 case X86::VR64RegClassID: 218 return 4; 219 } 220} 221 222const MCPhysReg * 223X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 224 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>(); 225 bool HasAVX = Subtarget.hasAVX(); 226 bool HasAVX512 = Subtarget.hasAVX512(); 227 bool CallsEHReturn = MF->getMMI().callsEHReturn(); 228 229 assert(MF && "MachineFunction required"); 230 switch (MF->getFunction()->getCallingConv()) { 231 case CallingConv::GHC: 232 case CallingConv::HiPE: 233 return CSR_NoRegs_SaveList; 234 case CallingConv::AnyReg: 235 if (HasAVX) 236 return CSR_64_AllRegs_AVX_SaveList; 237 return CSR_64_AllRegs_SaveList; 238 case CallingConv::PreserveMost: 239 return CSR_64_RT_MostRegs_SaveList; 240 case CallingConv::PreserveAll: 241 if (HasAVX) 242 return CSR_64_RT_AllRegs_AVX_SaveList; 243 return CSR_64_RT_AllRegs_SaveList; 244 case CallingConv::Intel_OCL_BI: { 245 if (HasAVX512 && IsWin64) 246 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList; 247 if (HasAVX512 && Is64Bit) 248 return CSR_64_Intel_OCL_BI_AVX512_SaveList; 249 if (HasAVX && IsWin64) 250 return CSR_Win64_Intel_OCL_BI_AVX_SaveList; 251 if (HasAVX && Is64Bit) 252 return CSR_64_Intel_OCL_BI_AVX_SaveList; 253 if (!HasAVX && !IsWin64 && Is64Bit) 254 return CSR_64_Intel_OCL_BI_SaveList; 255 break; 256 } 257 case CallingConv::Cold: 258 if (Is64Bit) 259 return CSR_64_MostRegs_SaveList; 260 break; 261 case CallingConv::X86_64_Win64: 262 return CSR_Win64_SaveList; 263 case CallingConv::X86_64_SysV: 264 if (CallsEHReturn) 265 return CSR_64EHRet_SaveList; 266 return CSR_64_SaveList; 267 default: 268 break; 269 } 270 271 if (Is64Bit) { 272 if (IsWin64) 273 return CSR_Win64_SaveList; 274 if (CallsEHReturn) 275 return CSR_64EHRet_SaveList; 276 return CSR_64_SaveList; 277 } 278 if (CallsEHReturn) 279 return CSR_32EHRet_SaveList; 280 return CSR_32_SaveList; 281} 282 283const uint32_t * 284X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF, 285 CallingConv::ID CC) const { 286 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 287 bool HasAVX = Subtarget.hasAVX(); 288 bool HasAVX512 = Subtarget.hasAVX512(); 289 290 switch (CC) { 291 case CallingConv::GHC: 292 case CallingConv::HiPE: 293 return CSR_NoRegs_RegMask; 294 case CallingConv::AnyReg: 295 if (HasAVX) 296 return CSR_64_AllRegs_AVX_RegMask; 297 return CSR_64_AllRegs_RegMask; 298 case CallingConv::PreserveMost: 299 return CSR_64_RT_MostRegs_RegMask; 300 case CallingConv::PreserveAll: 301 if (HasAVX) 302 return CSR_64_RT_AllRegs_AVX_RegMask; 303 return CSR_64_RT_AllRegs_RegMask; 304 case CallingConv::Intel_OCL_BI: { 305 if (HasAVX512 && IsWin64) 306 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask; 307 if (HasAVX512 && Is64Bit) 308 return CSR_64_Intel_OCL_BI_AVX512_RegMask; 309 if (HasAVX && IsWin64) 310 return CSR_Win64_Intel_OCL_BI_AVX_RegMask; 311 if (HasAVX && Is64Bit) 312 return CSR_64_Intel_OCL_BI_AVX_RegMask; 313 if (!HasAVX && !IsWin64 && Is64Bit) 314 return CSR_64_Intel_OCL_BI_RegMask; 315 break; 316 } 317 case CallingConv::Cold: 318 if (Is64Bit) 319 return CSR_64_MostRegs_RegMask; 320 break; 321 default: 322 break; 323 case CallingConv::X86_64_Win64: 324 return CSR_Win64_RegMask; 325 case CallingConv::X86_64_SysV: 326 return CSR_64_RegMask; 327 } 328 329 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check 330 // callsEHReturn(). 331 if (Is64Bit) { 332 if (IsWin64) 333 return CSR_Win64_RegMask; 334 return CSR_64_RegMask; 335 } 336 return CSR_32_RegMask; 337} 338 339const uint32_t* 340X86RegisterInfo::getNoPreservedMask() const { 341 return CSR_NoRegs_RegMask; 342} 343 344BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 345 BitVector Reserved(getNumRegs()); 346 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 347 348 // Set the stack-pointer register and its aliases as reserved. 349 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid(); 350 ++I) 351 Reserved.set(*I); 352 353 // Set the instruction pointer register and its aliases as reserved. 354 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid(); 355 ++I) 356 Reserved.set(*I); 357 358 // Set the frame-pointer register and its aliases as reserved if needed. 359 if (TFI->hasFP(MF)) { 360 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid(); 361 ++I) 362 Reserved.set(*I); 363 } 364 365 // Set the base-pointer register and its aliases as reserved if needed. 366 if (hasBasePointer(MF)) { 367 CallingConv::ID CC = MF.getFunction()->getCallingConv(); 368 const uint32_t *RegMask = getCallPreservedMask(MF, CC); 369 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) 370 report_fatal_error( 371 "Stack realignment in presence of dynamic allocas is not supported with" 372 "this calling convention."); 373 374 unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), MVT::i64, 375 false); 376 for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true); 377 I.isValid(); ++I) 378 Reserved.set(*I); 379 } 380 381 // Mark the segment registers as reserved. 382 Reserved.set(X86::CS); 383 Reserved.set(X86::SS); 384 Reserved.set(X86::DS); 385 Reserved.set(X86::ES); 386 Reserved.set(X86::FS); 387 Reserved.set(X86::GS); 388 389 // Mark the floating point stack registers as reserved. 390 for (unsigned n = 0; n != 8; ++n) 391 Reserved.set(X86::ST0 + n); 392 393 // Reserve the registers that only exist in 64-bit mode. 394 if (!Is64Bit) { 395 // These 8-bit registers are part of the x86-64 extension even though their 396 // super-registers are old 32-bits. 397 Reserved.set(X86::SIL); 398 Reserved.set(X86::DIL); 399 Reserved.set(X86::BPL); 400 Reserved.set(X86::SPL); 401 402 for (unsigned n = 0; n != 8; ++n) { 403 // R8, R9, ... 404 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI) 405 Reserved.set(*AI); 406 407 // XMM8, XMM9, ... 408 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) 409 Reserved.set(*AI); 410 } 411 } 412 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) { 413 for (unsigned n = 16; n != 32; ++n) { 414 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) 415 Reserved.set(*AI); 416 } 417 } 418 419 return Reserved; 420} 421 422//===----------------------------------------------------------------------===// 423// Stack Frame Processing methods 424//===----------------------------------------------------------------------===// 425 426bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { 427 const MachineFrameInfo *MFI = MF.getFrameInfo(); 428 429 if (!EnableBasePointer) 430 return false; 431 432 // When we need stack realignment, we can't address the stack from the frame 433 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we 434 // can't address variables from the stack pointer. MS inline asm can 435 // reference locals while also adjusting the stack pointer. When we can't 436 // use both the SP and the FP, we need a separate base pointer register. 437 bool CantUseFP = needsStackRealignment(MF); 438 bool CantUseSP = 439 MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust(); 440 return CantUseFP && CantUseSP; 441} 442 443bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 444 if (MF.getFunction()->hasFnAttribute("no-realign-stack")) 445 return false; 446 447 const MachineFrameInfo *MFI = MF.getFrameInfo(); 448 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 449 450 // Stack realignment requires a frame pointer. If we already started 451 // register allocation with frame pointer elimination, it is too late now. 452 if (!MRI->canReserveReg(FramePtr)) 453 return false; 454 455 // If a base pointer is necessary. Check that it isn't too late to reserve 456 // it. 457 if (MFI->hasVarSizedObjects()) 458 return MRI->canReserveReg(BasePtr); 459 return true; 460} 461 462bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 463 const MachineFrameInfo *MFI = MF.getFrameInfo(); 464 const Function *F = MF.getFunction(); 465 unsigned StackAlign = 466 MF.getSubtarget().getFrameLowering()->getStackAlignment(); 467 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || 468 F->hasFnAttribute(Attribute::StackAlignment)); 469 470 // If we've requested that we force align the stack do so now. 471 if (ForceStackAlign) 472 return canRealignStack(MF); 473 474 return requiresRealignment && canRealignStack(MF); 475} 476 477bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 478 unsigned Reg, int &FrameIdx) const { 479 // Since X86 defines assignCalleeSavedSpillSlots which always return true 480 // this function neither used nor tested. 481 llvm_unreachable("Unused function on X86. Otherwise need a test case."); 482} 483 484void 485X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 486 int SPAdj, unsigned FIOperandNum, 487 RegScavenger *RS) const { 488 MachineInstr &MI = *II; 489 MachineFunction &MF = *MI.getParent()->getParent(); 490 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 491 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 492 unsigned BasePtr; 493 494 unsigned Opc = MI.getOpcode(); 495 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 496 if (hasBasePointer(MF)) 497 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister()); 498 else if (needsStackRealignment(MF)) 499 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 500 else if (AfterFPPop) 501 BasePtr = StackPtr; 502 else 503 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 504 505 // FRAME_ALLOC uses a single offset, with no register. It only works in the 506 // simple FP case, and doesn't work with stack realignment. On 32-bit, the 507 // offset is from the traditional base pointer location. On 64-bit, the 508 // offset is from the SP at the end of the prologue, not the FP location. This 509 // matches the behavior of llvm.frameaddress. 510 if (Opc == TargetOpcode::FRAME_ALLOC) { 511 MachineOperand &FI = MI.getOperand(FIOperandNum); 512 bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 513 int Offset; 514 if (IsWinEH) 515 Offset = static_cast<const X86FrameLowering *>(TFI) 516 ->getFrameIndexOffsetFromSP(MF, FrameIndex); 517 else 518 Offset = TFI->getFrameIndexOffset(MF, FrameIndex); 519 FI.ChangeToImmediate(Offset); 520 return; 521 } 522 523 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit 524 // register as source operand, semantic is the same and destination is 525 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided. 526 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr)) 527 BasePtr = getX86SubSuperRegister(BasePtr, MVT::i64, false); 528 529 // This must be part of a four operand memory reference. Replace the 530 // FrameIndex with base register with EBP. Add an offset to the offset. 531 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 532 533 // Now add the frame object offset to the offset from EBP. 534 int FIOffset; 535 if (AfterFPPop) { 536 // Tail call jmp happens after FP is popped. 537 const MachineFrameInfo *MFI = MF.getFrameInfo(); 538 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 539 } else 540 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 541 542 if (BasePtr == StackPtr) 543 FIOffset += SPAdj; 544 545 // The frame index format for stackmaps and patchpoints is different from the 546 // X86 format. It only has a FI and an offset. 547 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { 548 assert(BasePtr == FramePtr && "Expected the FP as base register"); 549 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset; 550 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 551 return; 552 } 553 554 if (MI.getOperand(FIOperandNum+3).isImm()) { 555 // Offset is a 32-bit integer. 556 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm()); 557 int Offset = FIOffset + Imm; 558 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) && 559 "Requesting 64-bit offset in 32-bit immediate!"); 560 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset); 561 } else { 562 // Offset is symbolic. This is extremely rare. 563 uint64_t Offset = FIOffset + 564 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); 565 MI.getOperand(FIOperandNum + 3).setOffset(Offset); 566 } 567} 568 569unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 570 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 571 return TFI->hasFP(MF) ? FramePtr : StackPtr; 572} 573 574unsigned 575X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const { 576 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 577 unsigned FrameReg = getFrameRegister(MF); 578 if (Subtarget.isTarget64BitILP32()) 579 FrameReg = getX86SubSuperRegister(FrameReg, MVT::i32, false); 580 return FrameReg; 581} 582 583namespace llvm { 584unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, 585 bool High) { 586 switch (VT) { 587 default: llvm_unreachable("Unexpected VT"); 588 case MVT::i8: 589 if (High) { 590 switch (Reg) { 591 default: return getX86SubSuperRegister(Reg, MVT::i64); 592 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 593 return X86::SI; 594 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 595 return X86::DI; 596 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 597 return X86::BP; 598 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 599 return X86::SP; 600 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 601 return X86::AH; 602 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 603 return X86::DH; 604 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 605 return X86::CH; 606 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 607 return X86::BH; 608 } 609 } else { 610 switch (Reg) { 611 default: llvm_unreachable("Unexpected register"); 612 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 613 return X86::AL; 614 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 615 return X86::DL; 616 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 617 return X86::CL; 618 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 619 return X86::BL; 620 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 621 return X86::SIL; 622 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 623 return X86::DIL; 624 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 625 return X86::BPL; 626 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 627 return X86::SPL; 628 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 629 return X86::R8B; 630 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 631 return X86::R9B; 632 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 633 return X86::R10B; 634 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 635 return X86::R11B; 636 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 637 return X86::R12B; 638 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 639 return X86::R13B; 640 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 641 return X86::R14B; 642 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 643 return X86::R15B; 644 } 645 } 646 case MVT::i16: 647 switch (Reg) { 648 default: llvm_unreachable("Unexpected register"); 649 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 650 return X86::AX; 651 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 652 return X86::DX; 653 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 654 return X86::CX; 655 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 656 return X86::BX; 657 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 658 return X86::SI; 659 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 660 return X86::DI; 661 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 662 return X86::BP; 663 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 664 return X86::SP; 665 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 666 return X86::R8W; 667 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 668 return X86::R9W; 669 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 670 return X86::R10W; 671 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 672 return X86::R11W; 673 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 674 return X86::R12W; 675 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 676 return X86::R13W; 677 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 678 return X86::R14W; 679 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 680 return X86::R15W; 681 } 682 case MVT::i32: 683 switch (Reg) { 684 default: llvm_unreachable("Unexpected register"); 685 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 686 return X86::EAX; 687 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 688 return X86::EDX; 689 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 690 return X86::ECX; 691 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 692 return X86::EBX; 693 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 694 return X86::ESI; 695 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 696 return X86::EDI; 697 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 698 return X86::EBP; 699 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 700 return X86::ESP; 701 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 702 return X86::R8D; 703 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 704 return X86::R9D; 705 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 706 return X86::R10D; 707 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 708 return X86::R11D; 709 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 710 return X86::R12D; 711 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 712 return X86::R13D; 713 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 714 return X86::R14D; 715 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 716 return X86::R15D; 717 } 718 case MVT::i64: 719 switch (Reg) { 720 default: llvm_unreachable("Unexpected register"); 721 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 722 return X86::RAX; 723 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 724 return X86::RDX; 725 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 726 return X86::RCX; 727 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 728 return X86::RBX; 729 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 730 return X86::RSI; 731 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 732 return X86::RDI; 733 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 734 return X86::RBP; 735 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 736 return X86::RSP; 737 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 738 return X86::R8; 739 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 740 return X86::R9; 741 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 742 return X86::R10; 743 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 744 return X86::R11; 745 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 746 return X86::R12; 747 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 748 return X86::R13; 749 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 750 return X86::R14; 751 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 752 return X86::R15; 753 } 754 } 755} 756 757unsigned get512BitSuperRegister(unsigned Reg) { 758 if (Reg >= X86::XMM0 && Reg <= X86::XMM31) 759 return X86::ZMM0 + (Reg - X86::XMM0); 760 if (Reg >= X86::YMM0 && Reg <= X86::YMM31) 761 return X86::ZMM0 + (Reg - X86::YMM0); 762 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31) 763 return Reg; 764 llvm_unreachable("Unexpected SIMD register"); 765} 766 767} 768