X86RegisterInfo.cpp revision 0ccb37a7339883e1fd090beadc2deb1ce40ea7d4
1//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86RegisterInfo.h" 17#include "X86.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/ADT/BitVector.h" 23#include "llvm/ADT/STLExtras.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineFunction.h" 26#include "llvm/CodeGen/MachineFunctionPass.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/MachineModuleInfo.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/ValueTypes.h" 31#include "llvm/IR/Constants.h" 32#include "llvm/IR/Function.h" 33#include "llvm/IR/Type.h" 34#include "llvm/MC/MCAsmInfo.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Target/TargetFrameLowering.h" 38#include "llvm/Target/TargetInstrInfo.h" 39#include "llvm/Target/TargetMachine.h" 40#include "llvm/Target/TargetOptions.h" 41 42#define GET_REGINFO_TARGET_DESC 43#include "X86GenRegisterInfo.inc" 44 45using namespace llvm; 46 47cl::opt<bool> 48ForceStackAlign("force-align-stack", 49 cl::desc("Force align the stack to the minimum alignment" 50 " needed for the function."), 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> 54EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), 55 cl::desc("Enable use of a base pointer for complex stack frames")); 56 57X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm) 58 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit() 59 ? X86::RIP : X86::EIP), 60 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false), 61 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true), 62 (tm.getSubtarget<X86Subtarget>().is64Bit() 63 ? X86::RIP : X86::EIP)), 64 TM(tm) { 65 X86_MC::InitLLVM2SEHRegisterMapping(this); 66 67 // Cache some information. 68 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 69 Is64Bit = Subtarget->is64Bit(); 70 IsWin64 = Subtarget->isTargetWin64(); 71 72 if (Is64Bit) { 73 SlotSize = 8; 74 StackPtr = X86::RSP; 75 FramePtr = X86::RBP; 76 } else { 77 SlotSize = 4; 78 StackPtr = X86::ESP; 79 FramePtr = X86::EBP; 80 } 81 // Use a callee-saved register as the base pointer. These registers must 82 // not conflict with any ABI requirements. For example, in 32-bit mode PIC 83 // requires GOT in the EBX register before function calls via PLT GOT pointer. 84 BasePtr = Is64Bit ? X86::RBX : X86::ESI; 85} 86 87/// getCompactUnwindRegNum - This function maps the register to the number for 88/// compact unwind encoding. Return -1 if the register isn't valid. 89int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const { 90 switch (getLLVMRegNum(RegNum, isEH)) { 91 case X86::EBX: case X86::RBX: return 1; 92 case X86::ECX: case X86::R12: return 2; 93 case X86::EDX: case X86::R13: return 3; 94 case X86::EDI: case X86::R14: return 4; 95 case X86::ESI: case X86::R15: return 5; 96 case X86::EBP: case X86::RBP: return 6; 97 } 98 99 return -1; 100} 101 102bool 103X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 104 // ExeDepsFixer and PostRAScheduler require liveness. 105 return true; 106} 107 108int 109X86RegisterInfo::getSEHRegNum(unsigned i) const { 110 return getEncodingValue(i); 111} 112 113const TargetRegisterClass * 114X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC, 115 unsigned Idx) const { 116 // The sub_8bit sub-register index is more constrained in 32-bit mode. 117 // It behaves just like the sub_8bit_hi index. 118 if (!Is64Bit && Idx == X86::sub_8bit) 119 Idx = X86::sub_8bit_hi; 120 121 // Forward to TableGen's default version. 122 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx); 123} 124 125const TargetRegisterClass * 126X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 127 const TargetRegisterClass *B, 128 unsigned SubIdx) const { 129 // The sub_8bit sub-register index is more constrained in 32-bit mode. 130 if (!Is64Bit && SubIdx == X86::sub_8bit) { 131 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi); 132 if (!A) 133 return 0; 134 } 135 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx); 136} 137 138const TargetRegisterClass* 139X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{ 140 // Don't allow super-classes of GR8_NOREX. This class is only used after 141 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied 142 // to the full GR8 register class in 64-bit mode, so we cannot allow the 143 // reigster class inflation. 144 // 145 // The GR8_NOREX class is always used in a way that won't be constrained to a 146 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the 147 // full GR8 class. 148 if (RC == &X86::GR8_NOREXRegClass) 149 return RC; 150 151 const TargetRegisterClass *Super = RC; 152 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 153 do { 154 switch (Super->getID()) { 155 case X86::GR8RegClassID: 156 case X86::GR16RegClassID: 157 case X86::GR32RegClassID: 158 case X86::GR64RegClassID: 159 case X86::FR32RegClassID: 160 case X86::FR64RegClassID: 161 case X86::RFP32RegClassID: 162 case X86::RFP64RegClassID: 163 case X86::RFP80RegClassID: 164 case X86::VR128RegClassID: 165 case X86::VR256RegClassID: 166 // Don't return a super-class that would shrink the spill size. 167 // That can happen with the vector and float classes. 168 if (Super->getSize() == RC->getSize()) 169 return Super; 170 } 171 Super = *I++; 172 } while (Super); 173 return RC; 174} 175 176const TargetRegisterClass * 177X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 178 const { 179 const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); 180 switch (Kind) { 181 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 182 case 0: // Normal GPRs. 183 if (Subtarget.isTarget64BitLP64()) 184 return &X86::GR64RegClass; 185 return &X86::GR32RegClass; 186 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 187 if (Subtarget.isTarget64BitLP64()) 188 return &X86::GR64_NOSPRegClass; 189 return &X86::GR32_NOSPRegClass; 190 case 2: // Available for tailcall (not callee-saved GPRs). 191 if (Subtarget.isTargetWin64()) 192 return &X86::GR64_TCW64RegClass; 193 else if (Subtarget.is64Bit()) 194 return &X86::GR64_TCRegClass; 195 196 const Function *F = MF.getFunction(); 197 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); 198 if (hasHipeCC) 199 return &X86::GR32RegClass; 200 return &X86::GR32_TCRegClass; 201 } 202} 203 204const TargetRegisterClass * 205X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 206 if (RC == &X86::CCRRegClass) { 207 if (Is64Bit) 208 return &X86::GR64RegClass; 209 else 210 return &X86::GR32RegClass; 211 } 212 return RC; 213} 214 215unsigned 216X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 217 MachineFunction &MF) const { 218 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 219 220 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 221 switch (RC->getID()) { 222 default: 223 return 0; 224 case X86::GR32RegClassID: 225 return 4 - FPDiff; 226 case X86::GR64RegClassID: 227 return 12 - FPDiff; 228 case X86::VR128RegClassID: 229 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4; 230 case X86::VR64RegClassID: 231 return 4; 232 } 233} 234 235const uint16_t * 236X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 237 switch (MF->getFunction()->getCallingConv()) { 238 case CallingConv::GHC: 239 case CallingConv::HiPE: 240 return CSR_NoRegs_SaveList; 241 242 case CallingConv::WebKit_JS: 243 return CSR_64_SaveList; 244 case CallingConv::AnyReg: 245 return CSR_MostRegs_64_SaveList; 246 247 case CallingConv::Intel_OCL_BI: { 248 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 249 bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512(); 250 if (HasAVX512 && IsWin64) 251 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList; 252 if (HasAVX512 && Is64Bit) 253 return CSR_64_Intel_OCL_BI_AVX512_SaveList; 254 if (HasAVX && IsWin64) 255 return CSR_Win64_Intel_OCL_BI_AVX_SaveList; 256 if (HasAVX && Is64Bit) 257 return CSR_64_Intel_OCL_BI_AVX_SaveList; 258 if (!HasAVX && !IsWin64 && Is64Bit) 259 return CSR_64_Intel_OCL_BI_SaveList; 260 break; 261 } 262 263 case CallingConv::Cold: 264 if (Is64Bit) 265 return CSR_MostRegs_64_SaveList; 266 break; 267 268 default: 269 break; 270 } 271 272 bool CallsEHReturn = MF->getMMI().callsEHReturn(); 273 if (Is64Bit) { 274 if (IsWin64) 275 return CSR_Win64_SaveList; 276 if (CallsEHReturn) 277 return CSR_64EHRet_SaveList; 278 return CSR_64_SaveList; 279 } 280 if (CallsEHReturn) 281 return CSR_32EHRet_SaveList; 282 return CSR_32_SaveList; 283} 284 285const uint32_t* 286X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { 287 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 288 bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512(); 289 290 if (CC == CallingConv::Intel_OCL_BI) { 291 if (IsWin64 && HasAVX512) 292 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask; 293 if (Is64Bit && HasAVX512) 294 return CSR_64_Intel_OCL_BI_AVX512_RegMask; 295 if (IsWin64 && HasAVX) 296 return CSR_Win64_Intel_OCL_BI_AVX_RegMask; 297 if (Is64Bit && HasAVX) 298 return CSR_64_Intel_OCL_BI_AVX_RegMask; 299 if (!HasAVX && !IsWin64 && Is64Bit) 300 return CSR_64_Intel_OCL_BI_RegMask; 301 } 302 if (CC == CallingConv::GHC || CC == CallingConv::HiPE) 303 return CSR_NoRegs_RegMask; 304 if (CC == CallingConv::WebKit_JS || CC == CallingConv::AnyReg) 305 return CSR_MostRegs_64_RegMask; 306 if (!Is64Bit) 307 return CSR_32_RegMask; 308 if (CC == CallingConv::Cold) 309 return CSR_MostRegs_64_RegMask; 310 if (IsWin64) 311 return CSR_Win64_RegMask; 312 return CSR_64_RegMask; 313} 314 315const uint32_t* 316X86RegisterInfo::getNoPreservedMask() const { 317 return CSR_NoRegs_RegMask; 318} 319 320BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 321 BitVector Reserved(getNumRegs()); 322 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 323 324 // Set the stack-pointer register and its aliases as reserved. 325 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid(); 326 ++I) 327 Reserved.set(*I); 328 329 // Set the instruction pointer register and its aliases as reserved. 330 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid(); 331 ++I) 332 Reserved.set(*I); 333 334 // Set the frame-pointer register and its aliases as reserved if needed. 335 if (TFI->hasFP(MF)) { 336 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid(); 337 ++I) 338 Reserved.set(*I); 339 } 340 341 // Set the base-pointer register and its aliases as reserved if needed. 342 if (hasBasePointer(MF)) { 343 CallingConv::ID CC = MF.getFunction()->getCallingConv(); 344 const uint32_t* RegMask = getCallPreservedMask(CC); 345 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) 346 report_fatal_error( 347 "Stack realignment in presence of dynamic allocas is not supported with" 348 "this calling convention."); 349 350 for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true); 351 I.isValid(); ++I) 352 Reserved.set(*I); 353 } 354 355 // Mark the segment registers as reserved. 356 Reserved.set(X86::CS); 357 Reserved.set(X86::SS); 358 Reserved.set(X86::DS); 359 Reserved.set(X86::ES); 360 Reserved.set(X86::FS); 361 Reserved.set(X86::GS); 362 363 // Mark the floating point stack registers as reserved. 364 for (unsigned n = 0; n != 8; ++n) 365 Reserved.set(X86::ST0 + n); 366 367 // Reserve the registers that only exist in 64-bit mode. 368 if (!Is64Bit) { 369 // These 8-bit registers are part of the x86-64 extension even though their 370 // super-registers are old 32-bits. 371 Reserved.set(X86::SIL); 372 Reserved.set(X86::DIL); 373 Reserved.set(X86::BPL); 374 Reserved.set(X86::SPL); 375 376 for (unsigned n = 0; n != 8; ++n) { 377 // R8, R9, ... 378 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI) 379 Reserved.set(*AI); 380 381 // XMM8, XMM9, ... 382 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) 383 Reserved.set(*AI); 384 } 385 } 386 if (!Is64Bit || !TM.getSubtarget<X86Subtarget>().hasAVX512()) { 387 for (unsigned n = 16; n != 32; ++n) { 388 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) 389 Reserved.set(*AI); 390 } 391 } 392 393 return Reserved; 394} 395 396//===----------------------------------------------------------------------===// 397// Stack Frame Processing methods 398//===----------------------------------------------------------------------===// 399 400bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { 401 const MachineFrameInfo *MFI = MF.getFrameInfo(); 402 403 if (!EnableBasePointer) 404 return false; 405 406 // When we need stack realignment and there are dynamic allocas, we can't 407 // reference off of the stack pointer, so we reserve a base pointer. 408 // 409 // This is also true if the function contain MS-style inline assembly. We 410 // do this because if any stack changes occur in the inline assembly, e.g., 411 // "pusha", then any C local variable or C argument references in the 412 // inline assembly will be wrong because the SP is not properly tracked. 413 if ((needsStackRealignment(MF) && MFI->hasVarSizedObjects()) || 414 MF.hasMSInlineAsm()) 415 return true; 416 417 return false; 418} 419 420bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 421 if (MF.getFunction()->hasFnAttribute("no-realign-stack")) 422 return false; 423 424 const MachineFrameInfo *MFI = MF.getFrameInfo(); 425 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 426 427 // Stack realignment requires a frame pointer. If we already started 428 // register allocation with frame pointer elimination, it is too late now. 429 if (!MRI->canReserveReg(FramePtr)) 430 return false; 431 432 // If a base pointer is necessary. Check that it isn't too late to reserve 433 // it. 434 if (MFI->hasVarSizedObjects()) 435 return MRI->canReserveReg(BasePtr); 436 return true; 437} 438 439bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 440 const MachineFrameInfo *MFI = MF.getFrameInfo(); 441 const Function *F = MF.getFunction(); 442 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 443 bool requiresRealignment = 444 ((MFI->getMaxAlignment() > StackAlign) || 445 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 446 Attribute::StackAlignment)); 447 448 // If we've requested that we force align the stack do so now. 449 if (ForceStackAlign) 450 return canRealignStack(MF); 451 452 return requiresRealignment && canRealignStack(MF); 453} 454 455bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 456 unsigned Reg, int &FrameIdx) const { 457 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 458 459 if (Reg == FramePtr && TFI->hasFP(MF)) { 460 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 461 return true; 462 } 463 return false; 464} 465 466void 467X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 468 int SPAdj, unsigned FIOperandNum, 469 RegScavenger *RS) const { 470 assert(SPAdj == 0 && "Unexpected"); 471 472 MachineInstr &MI = *II; 473 MachineFunction &MF = *MI.getParent()->getParent(); 474 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 475 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 476 unsigned BasePtr; 477 478 unsigned Opc = MI.getOpcode(); 479 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 480 if (hasBasePointer(MF)) 481 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister()); 482 else if (needsStackRealignment(MF)) 483 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 484 else if (AfterFPPop) 485 BasePtr = StackPtr; 486 else 487 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 488 489 // This must be part of a four operand memory reference. Replace the 490 // FrameIndex with base register with EBP. Add an offset to the offset. 491 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 492 493 // Now add the frame object offset to the offset from EBP. 494 int FIOffset; 495 if (AfterFPPop) { 496 // Tail call jmp happens after FP is popped. 497 const MachineFrameInfo *MFI = MF.getFrameInfo(); 498 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 499 } else 500 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 501 502 if (MI.getOperand(FIOperandNum+3).isImm()) { 503 // Offset is a 32-bit integer. 504 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm()); 505 int Offset = FIOffset + Imm; 506 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) && 507 "Requesting 64-bit offset in 32-bit immediate!"); 508 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset); 509 } else { 510 // Offset is symbolic. This is extremely rare. 511 uint64_t Offset = FIOffset + 512 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); 513 MI.getOperand(FIOperandNum + 3).setOffset(Offset); 514 } 515} 516 517unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 518 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 519 return TFI->hasFP(MF) ? FramePtr : StackPtr; 520} 521 522namespace llvm { 523unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, 524 bool High) { 525 switch (VT) { 526 default: llvm_unreachable("Unexpected VT"); 527 case MVT::i8: 528 if (High) { 529 switch (Reg) { 530 default: return getX86SubSuperRegister(Reg, MVT::i64); 531 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 532 return X86::SI; 533 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 534 return X86::DI; 535 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 536 return X86::BP; 537 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 538 return X86::SP; 539 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 540 return X86::AH; 541 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 542 return X86::DH; 543 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 544 return X86::CH; 545 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 546 return X86::BH; 547 } 548 } else { 549 switch (Reg) { 550 default: llvm_unreachable("Unexpected register"); 551 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 552 return X86::AL; 553 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 554 return X86::DL; 555 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 556 return X86::CL; 557 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 558 return X86::BL; 559 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 560 return X86::SIL; 561 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 562 return X86::DIL; 563 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 564 return X86::BPL; 565 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 566 return X86::SPL; 567 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 568 return X86::R8B; 569 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 570 return X86::R9B; 571 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 572 return X86::R10B; 573 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 574 return X86::R11B; 575 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 576 return X86::R12B; 577 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 578 return X86::R13B; 579 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 580 return X86::R14B; 581 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 582 return X86::R15B; 583 } 584 } 585 case MVT::i16: 586 switch (Reg) { 587 default: llvm_unreachable("Unexpected register"); 588 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 589 return X86::AX; 590 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 591 return X86::DX; 592 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 593 return X86::CX; 594 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 595 return X86::BX; 596 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 597 return X86::SI; 598 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 599 return X86::DI; 600 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 601 return X86::BP; 602 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 603 return X86::SP; 604 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 605 return X86::R8W; 606 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 607 return X86::R9W; 608 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 609 return X86::R10W; 610 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 611 return X86::R11W; 612 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 613 return X86::R12W; 614 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 615 return X86::R13W; 616 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 617 return X86::R14W; 618 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 619 return X86::R15W; 620 } 621 case MVT::i32: 622 switch (Reg) { 623 default: llvm_unreachable("Unexpected register"); 624 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 625 return X86::EAX; 626 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 627 return X86::EDX; 628 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 629 return X86::ECX; 630 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 631 return X86::EBX; 632 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 633 return X86::ESI; 634 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 635 return X86::EDI; 636 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 637 return X86::EBP; 638 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 639 return X86::ESP; 640 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 641 return X86::R8D; 642 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 643 return X86::R9D; 644 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 645 return X86::R10D; 646 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 647 return X86::R11D; 648 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 649 return X86::R12D; 650 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 651 return X86::R13D; 652 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 653 return X86::R14D; 654 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 655 return X86::R15D; 656 } 657 case MVT::i64: 658 switch (Reg) { 659 default: llvm_unreachable("Unexpected register"); 660 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 661 return X86::RAX; 662 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 663 return X86::RDX; 664 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 665 return X86::RCX; 666 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 667 return X86::RBX; 668 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 669 return X86::RSI; 670 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 671 return X86::RDI; 672 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 673 return X86::RBP; 674 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 675 return X86::RSP; 676 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 677 return X86::R8; 678 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 679 return X86::R9; 680 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 681 return X86::R10; 682 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 683 return X86::R11; 684 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 685 return X86::R12; 686 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 687 return X86::R13; 688 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 689 return X86::R14; 690 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 691 return X86::R15; 692 } 693 } 694} 695 696unsigned get512BitSuperRegister(unsigned Reg) { 697 if (Reg >= X86::XMM0 && Reg <= X86::XMM31) 698 return X86::ZMM0 + (Reg - X86::XMM0); 699 if (Reg >= X86::YMM0 && Reg <= X86::YMM31) 700 return X86::ZMM0 + (Reg - X86::YMM0); 701 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31) 702 return Reg; 703 llvm_unreachable("Unexpected SIMD register"); 704} 705 706} 707