X86RegisterInfo.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86RegisterInfo.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86Subtarget.h" 20#include "X86TargetMachine.h" 21#include "llvm/ADT/BitVector.h" 22#include "llvm/ADT/STLExtras.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineFunctionPass.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineModuleInfo.h" 28#include "llvm/CodeGen/MachineRegisterInfo.h" 29#include "llvm/CodeGen/MachineValueType.h" 30#include "llvm/IR/Constants.h" 31#include "llvm/IR/Function.h" 32#include "llvm/IR/Type.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/Support/ErrorHandling.h" 36#include "llvm/Target/TargetFrameLowering.h" 37#include "llvm/Target/TargetInstrInfo.h" 38#include "llvm/Target/TargetMachine.h" 39#include "llvm/Target/TargetOptions.h" 40 41using namespace llvm; 42 43#define GET_REGINFO_TARGET_DESC 44#include "X86GenRegisterInfo.inc" 45 46cl::opt<bool> 47ForceStackAlign("force-align-stack", 48 cl::desc("Force align the stack to the minimum alignment" 49 " needed for the function."), 50 cl::init(false), cl::Hidden); 51 52static cl::opt<bool> 53EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), 54 cl::desc("Enable use of a base pointer for complex stack frames")); 55 56X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm) 57 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit() 58 ? X86::RIP : X86::EIP), 59 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false), 60 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true), 61 (tm.getSubtarget<X86Subtarget>().is64Bit() 62 ? X86::RIP : X86::EIP)), 63 TM(tm) { 64 X86_MC::InitLLVM2SEHRegisterMapping(this); 65 66 // Cache some information. 67 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 68 Is64Bit = Subtarget->is64Bit(); 69 IsWin64 = Subtarget->isTargetWin64(); 70 71 if (Is64Bit) { 72 SlotSize = 8; 73 StackPtr = X86::RSP; 74 FramePtr = X86::RBP; 75 } else { 76 SlotSize = 4; 77 StackPtr = X86::ESP; 78 FramePtr = X86::EBP; 79 } 80 // Use a callee-saved register as the base pointer. These registers must 81 // not conflict with any ABI requirements. For example, in 32-bit mode PIC 82 // requires GOT in the EBX register before function calls via PLT GOT pointer. 83 BasePtr = Is64Bit ? X86::RBX : X86::ESI; 84} 85 86/// getCompactUnwindRegNum - This function maps the register to the number for 87/// compact unwind encoding. Return -1 if the register isn't valid. 88int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const { 89 switch (getLLVMRegNum(RegNum, isEH)) { 90 case X86::EBX: case X86::RBX: return 1; 91 case X86::ECX: case X86::R12: return 2; 92 case X86::EDX: case X86::R13: return 3; 93 case X86::EDI: case X86::R14: return 4; 94 case X86::ESI: case X86::R15: return 5; 95 case X86::EBP: case X86::RBP: return 6; 96 } 97 98 return -1; 99} 100 101bool 102X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 103 // ExeDepsFixer and PostRAScheduler require liveness. 104 return true; 105} 106 107int 108X86RegisterInfo::getSEHRegNum(unsigned i) const { 109 return getEncodingValue(i); 110} 111 112const TargetRegisterClass * 113X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC, 114 unsigned Idx) const { 115 // The sub_8bit sub-register index is more constrained in 32-bit mode. 116 // It behaves just like the sub_8bit_hi index. 117 if (!Is64Bit && Idx == X86::sub_8bit) 118 Idx = X86::sub_8bit_hi; 119 120 // Forward to TableGen's default version. 121 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx); 122} 123 124const TargetRegisterClass * 125X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 126 const TargetRegisterClass *B, 127 unsigned SubIdx) const { 128 // The sub_8bit sub-register index is more constrained in 32-bit mode. 129 if (!Is64Bit && SubIdx == X86::sub_8bit) { 130 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi); 131 if (!A) 132 return nullptr; 133 } 134 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx); 135} 136 137const TargetRegisterClass* 138X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{ 139 // Don't allow super-classes of GR8_NOREX. This class is only used after 140 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied 141 // to the full GR8 register class in 64-bit mode, so we cannot allow the 142 // reigster class inflation. 143 // 144 // The GR8_NOREX class is always used in a way that won't be constrained to a 145 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the 146 // full GR8 class. 147 if (RC == &X86::GR8_NOREXRegClass) 148 return RC; 149 150 const TargetRegisterClass *Super = RC; 151 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 152 do { 153 switch (Super->getID()) { 154 case X86::GR8RegClassID: 155 case X86::GR16RegClassID: 156 case X86::GR32RegClassID: 157 case X86::GR64RegClassID: 158 case X86::FR32RegClassID: 159 case X86::FR64RegClassID: 160 case X86::RFP32RegClassID: 161 case X86::RFP64RegClassID: 162 case X86::RFP80RegClassID: 163 case X86::VR128RegClassID: 164 case X86::VR256RegClassID: 165 // Don't return a super-class that would shrink the spill size. 166 // That can happen with the vector and float classes. 167 if (Super->getSize() == RC->getSize()) 168 return Super; 169 } 170 Super = *I++; 171 } while (Super); 172 return RC; 173} 174 175const TargetRegisterClass * 176X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 177 const { 178 const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); 179 switch (Kind) { 180 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 181 case 0: // Normal GPRs. 182 if (Subtarget.isTarget64BitLP64()) 183 return &X86::GR64RegClass; 184 return &X86::GR32RegClass; 185 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 186 if (Subtarget.isTarget64BitLP64()) 187 return &X86::GR64_NOSPRegClass; 188 return &X86::GR32_NOSPRegClass; 189 case 2: // Available for tailcall (not callee-saved GPRs). 190 if (Subtarget.isTargetWin64()) 191 return &X86::GR64_TCW64RegClass; 192 else if (Subtarget.is64Bit()) 193 return &X86::GR64_TCRegClass; 194 195 const Function *F = MF.getFunction(); 196 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); 197 if (hasHipeCC) 198 return &X86::GR32RegClass; 199 return &X86::GR32_TCRegClass; 200 } 201} 202 203const TargetRegisterClass * 204X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 205 if (RC == &X86::CCRRegClass) { 206 if (Is64Bit) 207 return &X86::GR64RegClass; 208 else 209 return &X86::GR32RegClass; 210 } 211 return RC; 212} 213 214unsigned 215X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 216 MachineFunction &MF) const { 217 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 218 219 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 220 switch (RC->getID()) { 221 default: 222 return 0; 223 case X86::GR32RegClassID: 224 return 4 - FPDiff; 225 case X86::GR64RegClassID: 226 return 12 - FPDiff; 227 case X86::VR128RegClassID: 228 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4; 229 case X86::VR64RegClassID: 230 return 4; 231 } 232} 233 234const MCPhysReg * 235X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 236 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 237 bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512(); 238 239 assert(MF && "MachineFunction required"); 240 switch (MF->getFunction()->getCallingConv()) { 241 case CallingConv::GHC: 242 case CallingConv::HiPE: 243 return CSR_NoRegs_SaveList; 244 case CallingConv::AnyReg: 245 if (HasAVX) 246 return CSR_64_AllRegs_AVX_SaveList; 247 return CSR_64_AllRegs_SaveList; 248 case CallingConv::PreserveMost: 249 return CSR_64_RT_MostRegs_SaveList; 250 case CallingConv::PreserveAll: 251 if (HasAVX) 252 return CSR_64_RT_AllRegs_AVX_SaveList; 253 return CSR_64_RT_AllRegs_SaveList; 254 case CallingConv::Intel_OCL_BI: { 255 if (HasAVX512 && IsWin64) 256 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList; 257 if (HasAVX512 && Is64Bit) 258 return CSR_64_Intel_OCL_BI_AVX512_SaveList; 259 if (HasAVX && IsWin64) 260 return CSR_Win64_Intel_OCL_BI_AVX_SaveList; 261 if (HasAVX && Is64Bit) 262 return CSR_64_Intel_OCL_BI_AVX_SaveList; 263 if (!HasAVX && !IsWin64 && Is64Bit) 264 return CSR_64_Intel_OCL_BI_SaveList; 265 break; 266 } 267 case CallingConv::Cold: 268 if (Is64Bit) 269 return CSR_64_MostRegs_SaveList; 270 break; 271 default: 272 break; 273 } 274 275 bool CallsEHReturn = MF->getMMI().callsEHReturn(); 276 if (Is64Bit) { 277 if (IsWin64) 278 return CSR_Win64_SaveList; 279 if (CallsEHReturn) 280 return CSR_64EHRet_SaveList; 281 return CSR_64_SaveList; 282 } 283 if (CallsEHReturn) 284 return CSR_32EHRet_SaveList; 285 return CSR_32_SaveList; 286} 287 288const uint32_t* 289X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { 290 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 291 bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512(); 292 293 switch (CC) { 294 case CallingConv::GHC: 295 case CallingConv::HiPE: 296 return CSR_NoRegs_RegMask; 297 case CallingConv::AnyReg: 298 if (HasAVX) 299 return CSR_64_AllRegs_AVX_RegMask; 300 return CSR_64_AllRegs_RegMask; 301 case CallingConv::PreserveMost: 302 return CSR_64_RT_MostRegs_RegMask; 303 case CallingConv::PreserveAll: 304 if (HasAVX) 305 return CSR_64_RT_AllRegs_AVX_RegMask; 306 return CSR_64_RT_AllRegs_RegMask; 307 case CallingConv::Intel_OCL_BI: { 308 if (HasAVX512 && IsWin64) 309 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask; 310 if (HasAVX512 && Is64Bit) 311 return CSR_64_Intel_OCL_BI_AVX512_RegMask; 312 if (HasAVX && IsWin64) 313 return CSR_Win64_Intel_OCL_BI_AVX_RegMask; 314 if (HasAVX && Is64Bit) 315 return CSR_64_Intel_OCL_BI_AVX_RegMask; 316 if (!HasAVX && !IsWin64 && Is64Bit) 317 return CSR_64_Intel_OCL_BI_RegMask; 318 break; 319 } 320 case CallingConv::Cold: 321 if (Is64Bit) 322 return CSR_64_MostRegs_RegMask; 323 break; 324 default: 325 break; 326 } 327 328 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check 329 // callsEHReturn(). 330 if (Is64Bit) { 331 if (IsWin64) 332 return CSR_Win64_RegMask; 333 return CSR_64_RegMask; 334 } 335 return CSR_32_RegMask; 336} 337 338const uint32_t* 339X86RegisterInfo::getNoPreservedMask() const { 340 return CSR_NoRegs_RegMask; 341} 342 343BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 344 BitVector Reserved(getNumRegs()); 345 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 346 347 // Set the stack-pointer register and its aliases as reserved. 348 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid(); 349 ++I) 350 Reserved.set(*I); 351 352 // Set the instruction pointer register and its aliases as reserved. 353 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid(); 354 ++I) 355 Reserved.set(*I); 356 357 // Set the frame-pointer register and its aliases as reserved if needed. 358 if (TFI->hasFP(MF)) { 359 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid(); 360 ++I) 361 Reserved.set(*I); 362 } 363 364 // Set the base-pointer register and its aliases as reserved if needed. 365 if (hasBasePointer(MF)) { 366 CallingConv::ID CC = MF.getFunction()->getCallingConv(); 367 const uint32_t* RegMask = getCallPreservedMask(CC); 368 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) 369 report_fatal_error( 370 "Stack realignment in presence of dynamic allocas is not supported with" 371 "this calling convention."); 372 373 for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true); 374 I.isValid(); ++I) 375 Reserved.set(*I); 376 } 377 378 // Mark the segment registers as reserved. 379 Reserved.set(X86::CS); 380 Reserved.set(X86::SS); 381 Reserved.set(X86::DS); 382 Reserved.set(X86::ES); 383 Reserved.set(X86::FS); 384 Reserved.set(X86::GS); 385 386 // Mark the floating point stack registers as reserved. 387 for (unsigned n = 0; n != 8; ++n) 388 Reserved.set(X86::ST0 + n); 389 390 // Reserve the registers that only exist in 64-bit mode. 391 if (!Is64Bit) { 392 // These 8-bit registers are part of the x86-64 extension even though their 393 // super-registers are old 32-bits. 394 Reserved.set(X86::SIL); 395 Reserved.set(X86::DIL); 396 Reserved.set(X86::BPL); 397 Reserved.set(X86::SPL); 398 399 for (unsigned n = 0; n != 8; ++n) { 400 // R8, R9, ... 401 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI) 402 Reserved.set(*AI); 403 404 // XMM8, XMM9, ... 405 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) 406 Reserved.set(*AI); 407 } 408 } 409 if (!Is64Bit || !TM.getSubtarget<X86Subtarget>().hasAVX512()) { 410 for (unsigned n = 16; n != 32; ++n) { 411 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) 412 Reserved.set(*AI); 413 } 414 } 415 416 return Reserved; 417} 418 419//===----------------------------------------------------------------------===// 420// Stack Frame Processing methods 421//===----------------------------------------------------------------------===// 422 423bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { 424 const MachineFrameInfo *MFI = MF.getFrameInfo(); 425 426 if (!EnableBasePointer) 427 return false; 428 429 // When we need stack realignment, we can't address the stack from the frame 430 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we 431 // can't address variables from the stack pointer. MS inline asm can 432 // reference locals while also adjusting the stack pointer. When we can't 433 // use both the SP and the FP, we need a separate base pointer register. 434 bool CantUseFP = needsStackRealignment(MF); 435 bool CantUseSP = 436 MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust(); 437 return CantUseFP && CantUseSP; 438} 439 440bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 441 if (MF.getFunction()->hasFnAttribute("no-realign-stack")) 442 return false; 443 444 const MachineFrameInfo *MFI = MF.getFrameInfo(); 445 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 446 447 // Stack realignment requires a frame pointer. If we already started 448 // register allocation with frame pointer elimination, it is too late now. 449 if (!MRI->canReserveReg(FramePtr)) 450 return false; 451 452 // If a base pointer is necessary. Check that it isn't too late to reserve 453 // it. 454 if (MFI->hasVarSizedObjects()) 455 return MRI->canReserveReg(BasePtr); 456 return true; 457} 458 459bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 460 const MachineFrameInfo *MFI = MF.getFrameInfo(); 461 const Function *F = MF.getFunction(); 462 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 463 bool requiresRealignment = 464 ((MFI->getMaxAlignment() > StackAlign) || 465 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 466 Attribute::StackAlignment)); 467 468 // If we've requested that we force align the stack do so now. 469 if (ForceStackAlign) 470 return canRealignStack(MF); 471 472 return requiresRealignment && canRealignStack(MF); 473} 474 475bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 476 unsigned Reg, int &FrameIdx) const { 477 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 478 479 if (Reg == FramePtr && TFI->hasFP(MF)) { 480 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 481 return true; 482 } 483 return false; 484} 485 486void 487X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 488 int SPAdj, unsigned FIOperandNum, 489 RegScavenger *RS) const { 490 assert(SPAdj == 0 && "Unexpected"); 491 492 MachineInstr &MI = *II; 493 MachineFunction &MF = *MI.getParent()->getParent(); 494 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 495 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 496 unsigned BasePtr; 497 498 unsigned Opc = MI.getOpcode(); 499 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 500 if (hasBasePointer(MF)) 501 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister()); 502 else if (needsStackRealignment(MF)) 503 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 504 else if (AfterFPPop) 505 BasePtr = StackPtr; 506 else 507 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 508 509 // This must be part of a four operand memory reference. Replace the 510 // FrameIndex with base register with EBP. Add an offset to the offset. 511 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 512 513 // Now add the frame object offset to the offset from EBP. 514 int FIOffset; 515 if (AfterFPPop) { 516 // Tail call jmp happens after FP is popped. 517 const MachineFrameInfo *MFI = MF.getFrameInfo(); 518 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 519 } else 520 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 521 522 // The frame index format for stackmaps and patchpoints is different from the 523 // X86 format. It only has a FI and an offset. 524 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { 525 assert(BasePtr == FramePtr && "Expected the FP as base register"); 526 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset; 527 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 528 return; 529 } 530 531 if (MI.getOperand(FIOperandNum+3).isImm()) { 532 // Offset is a 32-bit integer. 533 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm()); 534 int Offset = FIOffset + Imm; 535 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) && 536 "Requesting 64-bit offset in 32-bit immediate!"); 537 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset); 538 } else { 539 // Offset is symbolic. This is extremely rare. 540 uint64_t Offset = FIOffset + 541 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); 542 MI.getOperand(FIOperandNum + 3).setOffset(Offset); 543 } 544} 545 546unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 547 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 548 return TFI->hasFP(MF) ? FramePtr : StackPtr; 549} 550 551namespace llvm { 552unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, 553 bool High) { 554 switch (VT) { 555 default: llvm_unreachable("Unexpected VT"); 556 case MVT::i8: 557 if (High) { 558 switch (Reg) { 559 default: return getX86SubSuperRegister(Reg, MVT::i64); 560 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 561 return X86::SI; 562 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 563 return X86::DI; 564 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 565 return X86::BP; 566 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 567 return X86::SP; 568 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 569 return X86::AH; 570 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 571 return X86::DH; 572 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 573 return X86::CH; 574 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 575 return X86::BH; 576 } 577 } else { 578 switch (Reg) { 579 default: llvm_unreachable("Unexpected register"); 580 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 581 return X86::AL; 582 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 583 return X86::DL; 584 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 585 return X86::CL; 586 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 587 return X86::BL; 588 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 589 return X86::SIL; 590 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 591 return X86::DIL; 592 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 593 return X86::BPL; 594 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 595 return X86::SPL; 596 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 597 return X86::R8B; 598 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 599 return X86::R9B; 600 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 601 return X86::R10B; 602 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 603 return X86::R11B; 604 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 605 return X86::R12B; 606 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 607 return X86::R13B; 608 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 609 return X86::R14B; 610 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 611 return X86::R15B; 612 } 613 } 614 case MVT::i16: 615 switch (Reg) { 616 default: llvm_unreachable("Unexpected register"); 617 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 618 return X86::AX; 619 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 620 return X86::DX; 621 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 622 return X86::CX; 623 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 624 return X86::BX; 625 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 626 return X86::SI; 627 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 628 return X86::DI; 629 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 630 return X86::BP; 631 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 632 return X86::SP; 633 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 634 return X86::R8W; 635 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 636 return X86::R9W; 637 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 638 return X86::R10W; 639 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 640 return X86::R11W; 641 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 642 return X86::R12W; 643 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 644 return X86::R13W; 645 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 646 return X86::R14W; 647 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 648 return X86::R15W; 649 } 650 case MVT::i32: 651 switch (Reg) { 652 default: llvm_unreachable("Unexpected register"); 653 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 654 return X86::EAX; 655 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 656 return X86::EDX; 657 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 658 return X86::ECX; 659 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 660 return X86::EBX; 661 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 662 return X86::ESI; 663 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 664 return X86::EDI; 665 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 666 return X86::EBP; 667 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 668 return X86::ESP; 669 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 670 return X86::R8D; 671 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 672 return X86::R9D; 673 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 674 return X86::R10D; 675 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 676 return X86::R11D; 677 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 678 return X86::R12D; 679 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 680 return X86::R13D; 681 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 682 return X86::R14D; 683 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 684 return X86::R15D; 685 } 686 case MVT::i64: 687 switch (Reg) { 688 default: llvm_unreachable("Unexpected register"); 689 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 690 return X86::RAX; 691 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 692 return X86::RDX; 693 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 694 return X86::RCX; 695 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 696 return X86::RBX; 697 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 698 return X86::RSI; 699 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 700 return X86::RDI; 701 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 702 return X86::RBP; 703 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 704 return X86::RSP; 705 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 706 return X86::R8; 707 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 708 return X86::R9; 709 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 710 return X86::R10; 711 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 712 return X86::R11; 713 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 714 return X86::R12; 715 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 716 return X86::R13; 717 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 718 return X86::R14; 719 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 720 return X86::R15; 721 } 722 } 723} 724 725unsigned get512BitSuperRegister(unsigned Reg) { 726 if (Reg >= X86::XMM0 && Reg <= X86::XMM31) 727 return X86::ZMM0 + (Reg - X86::XMM0); 728 if (Reg >= X86::YMM0 && Reg <= X86::YMM31) 729 return X86::ZMM0 + (Reg - X86::YMM0); 730 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31) 731 return Reg; 732 llvm_unreachable("Unexpected SIMD register"); 733} 734 735} 736