X86RegisterInfo.cpp revision 74b3c8da4800c7e8ba8f019879db29738ecc5f74
1//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86RegisterInfo.h" 17#include "X86.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/ADT/BitVector.h" 23#include "llvm/ADT/STLExtras.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineFunction.h" 26#include "llvm/CodeGen/MachineFunctionPass.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/MachineModuleInfo.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/ValueTypes.h" 31#include "llvm/IR/Constants.h" 32#include "llvm/IR/Function.h" 33#include "llvm/IR/Type.h" 34#include "llvm/MC/MCAsmInfo.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Target/TargetFrameLowering.h" 38#include "llvm/Target/TargetInstrInfo.h" 39#include "llvm/Target/TargetMachine.h" 40#include "llvm/Target/TargetOptions.h" 41 42#define GET_REGINFO_TARGET_DESC 43#include "X86GenRegisterInfo.inc" 44 45using namespace llvm; 46 47cl::opt<bool> 48ForceStackAlign("force-align-stack", 49 cl::desc("Force align the stack to the minimum alignment" 50 " needed for the function."), 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> 54EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), 55 cl::desc("Enable use of a base pointer for complex stack frames")); 56 57X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 58 const TargetInstrInfo &tii) 59 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit() 60 ? X86::RIP : X86::EIP), 61 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false), 62 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true), 63 (tm.getSubtarget<X86Subtarget>().is64Bit() 64 ? X86::RIP : X86::EIP)), 65 TM(tm), TII(tii) { 66 X86_MC::InitLLVM2SEHRegisterMapping(this); 67 68 // Cache some information. 69 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 70 Is64Bit = Subtarget->is64Bit(); 71 IsWin64 = Subtarget->isTargetWin64(); 72 73 if (Is64Bit) { 74 SlotSize = 8; 75 StackPtr = X86::RSP; 76 FramePtr = X86::RBP; 77 } else { 78 SlotSize = 4; 79 StackPtr = X86::ESP; 80 FramePtr = X86::EBP; 81 } 82 // Use a callee-saved register as the base pointer. These registers must 83 // not conflict with any ABI requirements. For example, in 32-bit mode PIC 84 // requires GOT in the EBX register before function calls via PLT GOT pointer. 85 BasePtr = Is64Bit ? X86::RBX : X86::ESI; 86} 87 88/// getCompactUnwindRegNum - This function maps the register to the number for 89/// compact unwind encoding. Return -1 if the register isn't valid. 90int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const { 91 switch (getLLVMRegNum(RegNum, isEH)) { 92 case X86::EBX: case X86::RBX: return 1; 93 case X86::ECX: case X86::R12: return 2; 94 case X86::EDX: case X86::R13: return 3; 95 case X86::EDI: case X86::R14: return 4; 96 case X86::ESI: case X86::R15: return 5; 97 case X86::EBP: case X86::RBP: return 6; 98 } 99 100 return -1; 101} 102 103bool 104X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 105 // Only enable when post-RA scheduling is enabled and this is needed. 106 return TM.getSubtargetImpl()->postRAScheduler(); 107} 108 109int 110X86RegisterInfo::getSEHRegNum(unsigned i) const { 111 return getEncodingValue(i); 112} 113 114const TargetRegisterClass * 115X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC, 116 unsigned Idx) const { 117 // The sub_8bit sub-register index is more constrained in 32-bit mode. 118 // It behaves just like the sub_8bit_hi index. 119 if (!Is64Bit && Idx == X86::sub_8bit) 120 Idx = X86::sub_8bit_hi; 121 122 // Forward to TableGen's default version. 123 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx); 124} 125 126const TargetRegisterClass * 127X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 128 const TargetRegisterClass *B, 129 unsigned SubIdx) const { 130 // The sub_8bit sub-register index is more constrained in 32-bit mode. 131 if (!Is64Bit && SubIdx == X86::sub_8bit) { 132 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi); 133 if (!A) 134 return 0; 135 } 136 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx); 137} 138 139const TargetRegisterClass* 140X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{ 141 // Don't allow super-classes of GR8_NOREX. This class is only used after 142 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied 143 // to the full GR8 register class in 64-bit mode, so we cannot allow the 144 // reigster class inflation. 145 // 146 // The GR8_NOREX class is always used in a way that won't be constrained to a 147 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the 148 // full GR8 class. 149 if (RC == &X86::GR8_NOREXRegClass) 150 return RC; 151 152 const TargetRegisterClass *Super = RC; 153 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 154 do { 155 switch (Super->getID()) { 156 case X86::GR8RegClassID: 157 case X86::GR16RegClassID: 158 case X86::GR32RegClassID: 159 case X86::GR64RegClassID: 160 case X86::FR32RegClassID: 161 case X86::FR64RegClassID: 162 case X86::RFP32RegClassID: 163 case X86::RFP64RegClassID: 164 case X86::RFP80RegClassID: 165 case X86::VR128RegClassID: 166 case X86::VR256RegClassID: 167 // Don't return a super-class that would shrink the spill size. 168 // That can happen with the vector and float classes. 169 if (Super->getSize() == RC->getSize()) 170 return Super; 171 } 172 Super = *I++; 173 } while (Super); 174 return RC; 175} 176 177const TargetRegisterClass * 178X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 179 const { 180 const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); 181 switch (Kind) { 182 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 183 case 0: // Normal GPRs. 184 if (Subtarget.isTarget64BitLP64()) 185 return &X86::GR64RegClass; 186 return &X86::GR32RegClass; 187 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 188 if (Subtarget.isTarget64BitLP64()) 189 return &X86::GR64_NOSPRegClass; 190 return &X86::GR32_NOSPRegClass; 191 case 2: // Available for tailcall (not callee-saved GPRs). 192 if (Subtarget.isTargetWin64()) 193 return &X86::GR64_TCW64RegClass; 194 else if (Subtarget.is64Bit()) 195 return &X86::GR64_TCRegClass; 196 197 const Function *F = MF.getFunction(); 198 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); 199 if (hasHipeCC) 200 return &X86::GR32RegClass; 201 return &X86::GR32_TCRegClass; 202 } 203} 204 205const TargetRegisterClass * 206X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 207 if (RC == &X86::CCRRegClass) { 208 if (Is64Bit) 209 return &X86::GR64RegClass; 210 else 211 return &X86::GR32RegClass; 212 } 213 return RC; 214} 215 216unsigned 217X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 218 MachineFunction &MF) const { 219 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 220 221 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 222 switch (RC->getID()) { 223 default: 224 return 0; 225 case X86::GR32RegClassID: 226 return 4 - FPDiff; 227 case X86::GR64RegClassID: 228 return 12 - FPDiff; 229 case X86::VR128RegClassID: 230 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4; 231 case X86::VR64RegClassID: 232 return 4; 233 } 234} 235 236const uint16_t * 237X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 238 bool callsEHReturn = false; 239 bool ghcCall = false; 240 bool oclBiCall = false; 241 bool hipeCall = false; 242 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 243 244 if (MF) { 245 callsEHReturn = MF->getMMI().callsEHReturn(); 246 const Function *F = MF->getFunction(); 247 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 248 oclBiCall = (F ? F->getCallingConv() == CallingConv::Intel_OCL_BI : false); 249 hipeCall = (F ? F->getCallingConv() == CallingConv::HiPE : false); 250 } 251 252 if (ghcCall || hipeCall) 253 return CSR_NoRegs_SaveList; 254 if (oclBiCall) { 255 if (HasAVX && IsWin64) 256 return CSR_Win64_Intel_OCL_BI_AVX_SaveList; 257 if (HasAVX && Is64Bit) 258 return CSR_64_Intel_OCL_BI_AVX_SaveList; 259 if (!HasAVX && !IsWin64 && Is64Bit) 260 return CSR_64_Intel_OCL_BI_SaveList; 261 } 262 if (Is64Bit) { 263 if (IsWin64) 264 return CSR_Win64_SaveList; 265 if (callsEHReturn) 266 return CSR_64EHRet_SaveList; 267 return CSR_64_SaveList; 268 } 269 if (callsEHReturn) 270 return CSR_32EHRet_SaveList; 271 return CSR_32_SaveList; 272} 273 274const uint32_t* 275X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { 276 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 277 278 if (CC == CallingConv::Intel_OCL_BI) { 279 if (IsWin64 && HasAVX) 280 return CSR_Win64_Intel_OCL_BI_AVX_RegMask; 281 if (Is64Bit && HasAVX) 282 return CSR_64_Intel_OCL_BI_AVX_RegMask; 283 if (!HasAVX && !IsWin64 && Is64Bit) 284 return CSR_64_Intel_OCL_BI_RegMask; 285 } 286 if (CC == CallingConv::GHC || CC == CallingConv::HiPE) 287 return CSR_NoRegs_RegMask; 288 if (!Is64Bit) 289 return CSR_32_RegMask; 290 if (IsWin64) 291 return CSR_Win64_RegMask; 292 return CSR_64_RegMask; 293} 294 295const uint32_t* 296X86RegisterInfo::getNoPreservedMask() const { 297 return CSR_NoRegs_RegMask; 298} 299 300BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 301 BitVector Reserved(getNumRegs()); 302 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 303 304 // Set the stack-pointer register and its aliases as reserved. 305 Reserved.set(X86::RSP); 306 for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I) 307 Reserved.set(*I); 308 309 // Set the instruction pointer register and its aliases as reserved. 310 Reserved.set(X86::RIP); 311 for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I) 312 Reserved.set(*I); 313 314 // Set the frame-pointer register and its aliases as reserved if needed. 315 if (TFI->hasFP(MF)) { 316 Reserved.set(X86::RBP); 317 for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I) 318 Reserved.set(*I); 319 } 320 321 // Set the base-pointer register and its aliases as reserved if needed. 322 if (hasBasePointer(MF)) { 323 CallingConv::ID CC = MF.getFunction()->getCallingConv(); 324 const uint32_t* RegMask = getCallPreservedMask(CC); 325 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) 326 report_fatal_error( 327 "Stack realignment in presence of dynamic allocas is not supported with" 328 "this calling convention."); 329 330 Reserved.set(getBaseRegister()); 331 for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I) 332 Reserved.set(*I); 333 } 334 335 // Mark the segment registers as reserved. 336 Reserved.set(X86::CS); 337 Reserved.set(X86::SS); 338 Reserved.set(X86::DS); 339 Reserved.set(X86::ES); 340 Reserved.set(X86::FS); 341 Reserved.set(X86::GS); 342 343 // Mark the floating point stack registers as reserved. 344 Reserved.set(X86::ST0); 345 Reserved.set(X86::ST1); 346 Reserved.set(X86::ST2); 347 Reserved.set(X86::ST3); 348 Reserved.set(X86::ST4); 349 Reserved.set(X86::ST5); 350 Reserved.set(X86::ST6); 351 Reserved.set(X86::ST7); 352 353 // Reserve the registers that only exist in 64-bit mode. 354 if (!Is64Bit) { 355 // These 8-bit registers are part of the x86-64 extension even though their 356 // super-registers are old 32-bits. 357 Reserved.set(X86::SIL); 358 Reserved.set(X86::DIL); 359 Reserved.set(X86::BPL); 360 Reserved.set(X86::SPL); 361 362 for (unsigned n = 0; n != 8; ++n) { 363 // R8, R9, ... 364 static const uint16_t GPR64[] = { 365 X86::R8, X86::R9, X86::R10, X86::R11, 366 X86::R12, X86::R13, X86::R14, X86::R15 367 }; 368 for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI) 369 Reserved.set(*AI); 370 371 // XMM8, XMM9, ... 372 assert(X86::XMM15 == X86::XMM8+7); 373 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) 374 Reserved.set(*AI); 375 } 376 } 377 378 return Reserved; 379} 380 381//===----------------------------------------------------------------------===// 382// Stack Frame Processing methods 383//===----------------------------------------------------------------------===// 384 385bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { 386 const MachineFrameInfo *MFI = MF.getFrameInfo(); 387 388 if (!EnableBasePointer) 389 return false; 390 391 // When we need stack realignment and there are dynamic allocas, we can't 392 // reference off of the stack pointer, so we reserve a base pointer. 393 if (needsStackRealignment(MF) && MFI->hasVarSizedObjects()) 394 return true; 395 396 return false; 397} 398 399bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 400 const MachineFrameInfo *MFI = MF.getFrameInfo(); 401 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 402 if (!MF.getTarget().Options.RealignStack) 403 return false; 404 405 // Stack realignment requires a frame pointer. If we already started 406 // register allocation with frame pointer elimination, it is too late now. 407 if (!MRI->canReserveReg(FramePtr)) 408 return false; 409 410 // If a base pointer is necessary. Check that it isn't too late to reserve 411 // it. 412 if (MFI->hasVarSizedObjects()) 413 return MRI->canReserveReg(BasePtr); 414 return true; 415} 416 417bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 418 const MachineFrameInfo *MFI = MF.getFrameInfo(); 419 const Function *F = MF.getFunction(); 420 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 421 bool requiresRealignment = 422 ((MFI->getMaxAlignment() > StackAlign) || 423 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 424 Attribute::StackAlignment)); 425 426 // If we've requested that we force align the stack do so now. 427 if (ForceStackAlign) 428 return canRealignStack(MF); 429 430 return requiresRealignment && canRealignStack(MF); 431} 432 433bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 434 unsigned Reg, int &FrameIdx) const { 435 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 436 437 if (Reg == FramePtr && TFI->hasFP(MF)) { 438 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 439 return true; 440 } 441 return false; 442} 443 444static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 445 if (is64Bit) { 446 if (isInt<8>(Imm)) 447 return X86::SUB64ri8; 448 return X86::SUB64ri32; 449 } else { 450 if (isInt<8>(Imm)) 451 return X86::SUB32ri8; 452 return X86::SUB32ri; 453 } 454} 455 456static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 457 if (is64Bit) { 458 if (isInt<8>(Imm)) 459 return X86::ADD64ri8; 460 return X86::ADD64ri32; 461 } else { 462 if (isInt<8>(Imm)) 463 return X86::ADD32ri8; 464 return X86::ADD32ri; 465 } 466} 467 468void X86RegisterInfo:: 469eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 470 MachineBasicBlock::iterator I) const { 471 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 472 bool reseveCallFrame = TFI->hasReservedCallFrame(MF); 473 int Opcode = I->getOpcode(); 474 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); 475 DebugLoc DL = I->getDebugLoc(); 476 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0; 477 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0; 478 I = MBB.erase(I); 479 480 if (!reseveCallFrame) { 481 // If the stack pointer can be changed after prologue, turn the 482 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 483 // adjcallstackdown instruction into 'add ESP, <amt>' 484 // TODO: consider using push / pop instead of sub + store / add 485 if (Amount == 0) 486 return; 487 488 // We need to keep the stack aligned properly. To do this, we round the 489 // amount of space needed for the outgoing arguments up to the next 490 // alignment boundary. 491 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 492 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 493 494 MachineInstr *New = 0; 495 if (Opcode == TII.getCallFrameSetupOpcode()) { 496 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)), 497 StackPtr) 498 .addReg(StackPtr) 499 .addImm(Amount); 500 } else { 501 assert(Opcode == TII.getCallFrameDestroyOpcode()); 502 503 // Factor out the amount the callee already popped. 504 Amount -= CalleeAmt; 505 506 if (Amount) { 507 unsigned Opc = getADDriOpcode(Is64Bit, Amount); 508 New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 509 .addReg(StackPtr).addImm(Amount); 510 } 511 } 512 513 if (New) { 514 // The EFLAGS implicit def is dead. 515 New->getOperand(3).setIsDead(); 516 517 // Replace the pseudo instruction with a new instruction. 518 MBB.insert(I, New); 519 } 520 521 return; 522 } 523 524 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) { 525 // If we are performing frame pointer elimination and if the callee pops 526 // something off the stack pointer, add it back. We do this until we have 527 // more advanced stack pointer tracking ability. 528 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt); 529 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 530 .addReg(StackPtr).addImm(CalleeAmt); 531 532 // The EFLAGS implicit def is dead. 533 New->getOperand(3).setIsDead(); 534 535 // We are not tracking the stack pointer adjustment by the callee, so make 536 // sure we restore the stack pointer immediately after the call, there may 537 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions. 538 MachineBasicBlock::iterator B = MBB.begin(); 539 while (I != B && !llvm::prior(I)->isCall()) 540 --I; 541 MBB.insert(I, New); 542 } 543} 544 545void 546X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 547 int SPAdj, unsigned FIOperandNum, 548 RegScavenger *RS) const { 549 assert(SPAdj == 0 && "Unexpected"); 550 551 MachineInstr &MI = *II; 552 MachineFunction &MF = *MI.getParent()->getParent(); 553 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 554 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 555 unsigned BasePtr; 556 557 unsigned Opc = MI.getOpcode(); 558 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 559 if (hasBasePointer(MF)) 560 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister()); 561 else if (needsStackRealignment(MF)) 562 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 563 else if (AfterFPPop) 564 BasePtr = StackPtr; 565 else 566 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 567 568 // This must be part of a four operand memory reference. Replace the 569 // FrameIndex with base register with EBP. Add an offset to the offset. 570 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 571 572 // Now add the frame object offset to the offset from EBP. 573 int FIOffset; 574 if (AfterFPPop) { 575 // Tail call jmp happens after FP is popped. 576 const MachineFrameInfo *MFI = MF.getFrameInfo(); 577 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 578 } else 579 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 580 581 if (MI.getOperand(FIOperandNum+3).isImm()) { 582 // Offset is a 32-bit integer. 583 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm()); 584 int Offset = FIOffset + Imm; 585 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) && 586 "Requesting 64-bit offset in 32-bit immediate!"); 587 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset); 588 } else { 589 // Offset is symbolic. This is extremely rare. 590 uint64_t Offset = FIOffset + 591 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); 592 MI.getOperand(FIOperandNum + 3).setOffset(Offset); 593 } 594} 595 596unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 597 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 598 return TFI->hasFP(MF) ? FramePtr : StackPtr; 599} 600 601unsigned X86RegisterInfo::getEHExceptionRegister() const { 602 llvm_unreachable("What is the exception register"); 603} 604 605unsigned X86RegisterInfo::getEHHandlerRegister() const { 606 llvm_unreachable("What is the exception handler register"); 607} 608 609namespace llvm { 610unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, 611 bool High) { 612 switch (VT) { 613 default: llvm_unreachable("Unexpected VT"); 614 case MVT::i8: 615 if (High) { 616 switch (Reg) { 617 default: return getX86SubSuperRegister(Reg, MVT::i64, High); 618 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 619 return X86::AH; 620 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 621 return X86::DH; 622 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 623 return X86::CH; 624 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 625 return X86::BH; 626 } 627 } else { 628 switch (Reg) { 629 default: llvm_unreachable("Unexpected register"); 630 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 631 return X86::AL; 632 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 633 return X86::DL; 634 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 635 return X86::CL; 636 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 637 return X86::BL; 638 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 639 return X86::SIL; 640 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 641 return X86::DIL; 642 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 643 return X86::BPL; 644 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 645 return X86::SPL; 646 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 647 return X86::R8B; 648 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 649 return X86::R9B; 650 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 651 return X86::R10B; 652 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 653 return X86::R11B; 654 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 655 return X86::R12B; 656 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 657 return X86::R13B; 658 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 659 return X86::R14B; 660 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 661 return X86::R15B; 662 } 663 } 664 case MVT::i16: 665 switch (Reg) { 666 default: llvm_unreachable("Unexpected register"); 667 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 668 return X86::AX; 669 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 670 return X86::DX; 671 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 672 return X86::CX; 673 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 674 return X86::BX; 675 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 676 return X86::SI; 677 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 678 return X86::DI; 679 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 680 return X86::BP; 681 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 682 return X86::SP; 683 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 684 return X86::R8W; 685 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 686 return X86::R9W; 687 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 688 return X86::R10W; 689 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 690 return X86::R11W; 691 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 692 return X86::R12W; 693 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 694 return X86::R13W; 695 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 696 return X86::R14W; 697 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 698 return X86::R15W; 699 } 700 case MVT::i32: 701 switch (Reg) { 702 default: llvm_unreachable("Unexpected register"); 703 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 704 return X86::EAX; 705 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 706 return X86::EDX; 707 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 708 return X86::ECX; 709 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 710 return X86::EBX; 711 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 712 return X86::ESI; 713 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 714 return X86::EDI; 715 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 716 return X86::EBP; 717 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 718 return X86::ESP; 719 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 720 return X86::R8D; 721 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 722 return X86::R9D; 723 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 724 return X86::R10D; 725 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 726 return X86::R11D; 727 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 728 return X86::R12D; 729 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 730 return X86::R13D; 731 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 732 return X86::R14D; 733 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 734 return X86::R15D; 735 } 736 case MVT::i64: 737 // For 64-bit mode if we've requested a "high" register and the 738 // Q or r constraints we want one of these high registers or 739 // just the register name otherwise. 740 if (High) { 741 switch (Reg) { 742 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 743 return X86::SI; 744 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 745 return X86::DI; 746 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 747 return X86::BP; 748 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 749 return X86::SP; 750 // Fallthrough. 751 } 752 } 753 switch (Reg) { 754 default: llvm_unreachable("Unexpected register"); 755 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 756 return X86::RAX; 757 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 758 return X86::RDX; 759 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 760 return X86::RCX; 761 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 762 return X86::RBX; 763 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 764 return X86::RSI; 765 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 766 return X86::RDI; 767 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 768 return X86::RBP; 769 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 770 return X86::RSP; 771 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 772 return X86::R8; 773 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 774 return X86::R9; 775 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 776 return X86::R10; 777 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 778 return X86::R11; 779 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 780 return X86::R12; 781 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 782 return X86::R13; 783 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 784 return X86::R14; 785 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 786 return X86::R15; 787 } 788 } 789} 790} 791