X86RegisterInfo.cpp revision 6f0d024a534af18d9e60b3ea757376cd8a3a980e
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFrameInfo.h" 29#include "llvm/CodeGen/MachineLocation.h" 30#include "llvm/CodeGen/MachineModuleInfo.h" 31#include "llvm/CodeGen/MachineRegisterInfo.h" 32#include "llvm/Target/TargetAsmInfo.h" 33#include "llvm/Target/TargetFrameInfo.h" 34#include "llvm/Target/TargetInstrInfo.h" 35#include "llvm/Target/TargetMachine.h" 36#include "llvm/Target/TargetOptions.h" 37#include "llvm/ADT/BitVector.h" 38#include "llvm/ADT/STLExtras.h" 39using namespace llvm; 40 41X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 42 const TargetInstrInfo &tii) 43 : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP), 44 TM(tm), TII(tii) { 45 // Cache some information. 46 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 47 Is64Bit = Subtarget->is64Bit(); 48 StackAlign = TM.getFrameInfo()->getStackAlignment(); 49 if (Is64Bit) { 50 SlotSize = 8; 51 StackPtr = X86::RSP; 52 FramePtr = X86::RBP; 53 } else { 54 SlotSize = 4; 55 StackPtr = X86::ESP; 56 FramePtr = X86::EBP; 57 } 58} 59 60// getDwarfRegNum - This function maps LLVM register identifiers to the 61// Dwarf specific numbering, used in debug info and exception tables. 62 63int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 64 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 65 unsigned Flavour = DWARFFlavour::X86_64; 66 if (!Subtarget->is64Bit()) { 67 if (Subtarget->isTargetDarwin()) { 68 if (isEH) 69 Flavour = DWARFFlavour::X86_32_DarwinEH; 70 else 71 Flavour = DWARFFlavour::X86_32_Generic; 72 } else if (Subtarget->isTargetCygMing()) { 73 // Unsupported by now, just quick fallback 74 Flavour = DWARFFlavour::X86_32_Generic; 75 } else { 76 Flavour = DWARFFlavour::X86_32_Generic; 77 } 78 } 79 80 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 81} 82 83// getX86RegNum - This function maps LLVM register identifiers to their X86 84// specific numbering, which is used in various places encoding instructions. 85// 86unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) const { 87 switch(RegNo) { 88 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 89 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 90 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 91 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 92 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 93 return N86::ESP; 94 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 95 return N86::EBP; 96 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 97 return N86::ESI; 98 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 99 return N86::EDI; 100 101 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 102 return N86::EAX; 103 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 104 return N86::ECX; 105 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 106 return N86::EDX; 107 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 108 return N86::EBX; 109 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 110 return N86::ESP; 111 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 112 return N86::EBP; 113 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 114 return N86::ESI; 115 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 116 return N86::EDI; 117 118 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 119 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 120 return RegNo-X86::ST0; 121 122 case X86::XMM0: case X86::XMM8: case X86::MM0: 123 return 0; 124 case X86::XMM1: case X86::XMM9: case X86::MM1: 125 return 1; 126 case X86::XMM2: case X86::XMM10: case X86::MM2: 127 return 2; 128 case X86::XMM3: case X86::XMM11: case X86::MM3: 129 return 3; 130 case X86::XMM4: case X86::XMM12: case X86::MM4: 131 return 4; 132 case X86::XMM5: case X86::XMM13: case X86::MM5: 133 return 5; 134 case X86::XMM6: case X86::XMM14: case X86::MM6: 135 return 6; 136 case X86::XMM7: case X86::XMM15: case X86::MM7: 137 return 7; 138 139 default: 140 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 141 assert(0 && "Register allocator hasn't allocated reg correctly yet!"); 142 return 0; 143 } 144} 145 146const TargetRegisterClass * 147X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 148 if (RC == &X86::CCRRegClass) 149 if (Is64Bit) 150 return &X86::GR64RegClass; 151 else 152 return &X86::GR32RegClass; 153 return NULL; 154} 155 156void X86RegisterInfo::reMaterialize(MachineBasicBlock &MBB, 157 MachineBasicBlock::iterator I, 158 unsigned DestReg, 159 const MachineInstr *Orig) const { 160 // MOV32r0 etc. are implemented with xor which clobbers condition code. 161 // Re-materialize them as movri instructions to avoid side effects. 162 switch (Orig->getOpcode()) { 163 case X86::MOV8r0: 164 BuildMI(MBB, I, TII.get(X86::MOV8ri), DestReg).addImm(0); 165 break; 166 case X86::MOV16r0: 167 BuildMI(MBB, I, TII.get(X86::MOV16ri), DestReg).addImm(0); 168 break; 169 case X86::MOV32r0: 170 BuildMI(MBB, I, TII.get(X86::MOV32ri), DestReg).addImm(0); 171 break; 172 case X86::MOV64r0: 173 BuildMI(MBB, I, TII.get(X86::MOV64ri32), DestReg).addImm(0); 174 break; 175 default: { 176 MachineInstr *MI = Orig->clone(); 177 MI->getOperand(0).setReg(DestReg); 178 MBB.insert(I, MI); 179 break; 180 } 181 } 182} 183 184const unsigned * 185X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 186 static const unsigned CalleeSavedRegs32Bit[] = { 187 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 188 }; 189 190 static const unsigned CalleeSavedRegs32EHRet[] = { 191 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 192 }; 193 194 static const unsigned CalleeSavedRegs64Bit[] = { 195 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 196 }; 197 198 if (Is64Bit) 199 return CalleeSavedRegs64Bit; 200 else { 201 if (MF) { 202 MachineFrameInfo *MFI = MF->getFrameInfo(); 203 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 204 if (MMI && MMI->callsEHReturn()) 205 return CalleeSavedRegs32EHRet; 206 } 207 return CalleeSavedRegs32Bit; 208 } 209} 210 211const TargetRegisterClass* const* 212X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 213 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 214 &X86::GR32RegClass, &X86::GR32RegClass, 215 &X86::GR32RegClass, &X86::GR32RegClass, 0 216 }; 217 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 218 &X86::GR32RegClass, &X86::GR32RegClass, 219 &X86::GR32RegClass, &X86::GR32RegClass, 220 &X86::GR32RegClass, &X86::GR32RegClass, 0 221 }; 222 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 223 &X86::GR64RegClass, &X86::GR64RegClass, 224 &X86::GR64RegClass, &X86::GR64RegClass, 225 &X86::GR64RegClass, &X86::GR64RegClass, 0 226 }; 227 228 if (Is64Bit) 229 return CalleeSavedRegClasses64Bit; 230 else { 231 if (MF) { 232 MachineFrameInfo *MFI = MF->getFrameInfo(); 233 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 234 if (MMI && MMI->callsEHReturn()) 235 return CalleeSavedRegClasses32EHRet; 236 } 237 return CalleeSavedRegClasses32Bit; 238 } 239 240} 241 242BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 243 BitVector Reserved(getNumRegs()); 244 Reserved.set(X86::RSP); 245 Reserved.set(X86::ESP); 246 Reserved.set(X86::SP); 247 Reserved.set(X86::SPL); 248 if (hasFP(MF)) { 249 Reserved.set(X86::RBP); 250 Reserved.set(X86::EBP); 251 Reserved.set(X86::BP); 252 Reserved.set(X86::BPL); 253 } 254 return Reserved; 255} 256 257//===----------------------------------------------------------------------===// 258// Stack Frame Processing methods 259//===----------------------------------------------------------------------===// 260 261// hasFP - Return true if the specified function should have a dedicated frame 262// pointer register. This is true if the function has variable sized allocas or 263// if frame pointer elimination is disabled. 264// 265bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 266 MachineFrameInfo *MFI = MF.getFrameInfo(); 267 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 268 269 return (NoFramePointerElim || 270 MFI->hasVarSizedObjects() || 271 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 272 (MMI && MMI->callsUnwindInit())); 273} 274 275bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 276 return !MF.getFrameInfo()->hasVarSizedObjects(); 277} 278 279void X86RegisterInfo:: 280eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 281 MachineBasicBlock::iterator I) const { 282 if (!hasReservedCallFrame(MF)) { 283 // If the stack pointer can be changed after prologue, turn the 284 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 285 // adjcallstackdown instruction into 'add ESP, <amt>' 286 // TODO: consider using push / pop instead of sub + store / add 287 MachineInstr *Old = I; 288 uint64_t Amount = Old->getOperand(0).getImm(); 289 if (Amount != 0) { 290 // We need to keep the stack aligned properly. To do this, we round the 291 // amount of space needed for the outgoing arguments up to the next 292 // alignment boundary. 293 Amount = (Amount+StackAlign-1)/StackAlign*StackAlign; 294 295 MachineInstr *New = 0; 296 if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) { 297 New=BuildMI(TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), StackPtr) 298 .addReg(StackPtr).addImm(Amount); 299 } else { 300 assert(Old->getOpcode() == X86::ADJCALLSTACKUP); 301 // factor out the amount the callee already popped. 302 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 303 Amount -= CalleeAmt; 304 if (Amount) { 305 unsigned Opc = (Amount < 128) ? 306 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 307 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 308 New = BuildMI(TII.get(Opc), StackPtr).addReg(StackPtr).addImm(Amount); 309 } 310 } 311 312 // Replace the pseudo instruction with a new instruction... 313 if (New) MBB.insert(I, New); 314 } 315 } else if (I->getOpcode() == X86::ADJCALLSTACKUP) { 316 // If we are performing frame pointer elimination and if the callee pops 317 // something off the stack pointer, add it back. We do this until we have 318 // more advanced stack pointer tracking ability. 319 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 320 unsigned Opc = (CalleeAmt < 128) ? 321 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 322 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 323 MachineInstr *New = 324 BuildMI(TII.get(Opc), StackPtr).addReg(StackPtr).addImm(CalleeAmt); 325 MBB.insert(I, New); 326 } 327 } 328 329 MBB.erase(I); 330} 331 332void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 333 int SPAdj, RegScavenger *RS) const{ 334 assert(SPAdj == 0 && "Unexpected"); 335 336 unsigned i = 0; 337 MachineInstr &MI = *II; 338 MachineFunction &MF = *MI.getParent()->getParent(); 339 while (!MI.getOperand(i).isFrameIndex()) { 340 ++i; 341 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 342 } 343 344 int FrameIndex = MI.getOperand(i).getIndex(); 345 // This must be part of a four operand memory reference. Replace the 346 // FrameIndex with base register with EBP. Add an offset to the offset. 347 MI.getOperand(i).ChangeToRegister(hasFP(MF) ? FramePtr : StackPtr, false); 348 349 // Now add the frame object offset to the offset from EBP. 350 int64_t Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) + 351 MI.getOperand(i+3).getImm()+SlotSize; 352 353 if (!hasFP(MF)) 354 Offset += MF.getFrameInfo()->getStackSize(); 355 else { 356 Offset += SlotSize; // Skip the saved EBP 357 // Skip the RETADDR move area 358 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 359 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 360 if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta; 361 } 362 363 MI.getOperand(i+3).ChangeToImmediate(Offset); 364} 365 366void 367X86RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const{ 368 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 369 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 370 if (TailCallReturnAddrDelta < 0) { 371 // create RETURNADDR area 372 // arg 373 // arg 374 // RETADDR 375 // { ... 376 // RETADDR area 377 // ... 378 // } 379 // [EBP] 380 MF.getFrameInfo()-> 381 CreateFixedObject(-TailCallReturnAddrDelta, 382 (-1*SlotSize)+TailCallReturnAddrDelta); 383 } 384 if (hasFP(MF)) { 385 assert((TailCallReturnAddrDelta <= 0) && 386 "The Delta should always be zero or negative"); 387 // Create a frame entry for the EBP register that must be saved. 388 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, 389 (int)SlotSize * -2+ 390 TailCallReturnAddrDelta); 391 assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && 392 "Slot for EBP register must be last in order to be found!"); 393 } 394} 395 396/// emitSPUpdate - Emit a series of instructions to increment / decrement the 397/// stack pointer by a constant value. 398static 399void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 400 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 401 const TargetInstrInfo &TII) { 402 bool isSub = NumBytes < 0; 403 uint64_t Offset = isSub ? -NumBytes : NumBytes; 404 unsigned Opc = isSub 405 ? ((Offset < 128) ? 406 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 407 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 408 : ((Offset < 128) ? 409 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 410 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 411 uint64_t Chunk = (1LL << 31) - 1; 412 413 while (Offset) { 414 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 415 BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal); 416 Offset -= ThisVal; 417 } 418} 419 420// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 421static 422void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 423 unsigned StackPtr, uint64_t *NumBytes = NULL) { 424 if (MBBI == MBB.begin()) return; 425 426 MachineBasicBlock::iterator PI = prior(MBBI); 427 unsigned Opc = PI->getOpcode(); 428 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 429 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 430 PI->getOperand(0).getReg() == StackPtr) { 431 if (NumBytes) 432 *NumBytes += PI->getOperand(2).getImm(); 433 MBB.erase(PI); 434 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 435 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 436 PI->getOperand(0).getReg() == StackPtr) { 437 if (NumBytes) 438 *NumBytes -= PI->getOperand(2).getImm(); 439 MBB.erase(PI); 440 } 441} 442 443// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 444static 445void mergeSPUpdatesDown(MachineBasicBlock &MBB, 446 MachineBasicBlock::iterator &MBBI, 447 unsigned StackPtr, uint64_t *NumBytes = NULL) { 448 return; 449 450 if (MBBI == MBB.end()) return; 451 452 MachineBasicBlock::iterator NI = next(MBBI); 453 if (NI == MBB.end()) return; 454 455 unsigned Opc = NI->getOpcode(); 456 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 457 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 458 NI->getOperand(0).getReg() == StackPtr) { 459 if (NumBytes) 460 *NumBytes -= NI->getOperand(2).getImm(); 461 MBB.erase(NI); 462 MBBI = NI; 463 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 464 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 465 NI->getOperand(0).getReg() == StackPtr) { 466 if (NumBytes) 467 *NumBytes += NI->getOperand(2).getImm(); 468 MBB.erase(NI); 469 MBBI = NI; 470 } 471} 472 473/// mergeSPUpdates - Checks the instruction before/after the passed 474/// instruction. If it is an ADD/SUB instruction it is deleted 475/// argument and the stack adjustment is returned as a positive value for ADD 476/// and a negative for SUB. 477static int mergeSPUpdates(MachineBasicBlock &MBB, 478 MachineBasicBlock::iterator &MBBI, 479 unsigned StackPtr, 480 bool doMergeWithPrevious) { 481 482 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 483 (!doMergeWithPrevious && MBBI == MBB.end())) 484 return 0; 485 486 int Offset = 0; 487 488 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 489 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI); 490 unsigned Opc = PI->getOpcode(); 491 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 492 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 493 PI->getOperand(0).getReg() == StackPtr){ 494 Offset += PI->getOperand(2).getImm(); 495 MBB.erase(PI); 496 if (!doMergeWithPrevious) MBBI = NI; 497 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 498 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 499 PI->getOperand(0).getReg() == StackPtr) { 500 Offset -= PI->getOperand(2).getImm(); 501 MBB.erase(PI); 502 if (!doMergeWithPrevious) MBBI = NI; 503 } 504 505 return Offset; 506} 507 508void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 509 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB 510 MachineFrameInfo *MFI = MF.getFrameInfo(); 511 const Function* Fn = MF.getFunction(); 512 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 513 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 514 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 515 MachineBasicBlock::iterator MBBI = MBB.begin(); 516 517 // Prepare for frame info. 518 unsigned FrameLabelId = 0; 519 520 // Get the number of bytes to allocate from the FrameInfo. 521 uint64_t StackSize = MFI->getStackSize(); 522 // Add RETADDR move area to callee saved frame size. 523 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 524 if (TailCallReturnAddrDelta < 0) 525 X86FI->setCalleeSavedFrameSize( 526 X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta)); 527 uint64_t NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 528 529 // Insert stack pointer adjustment for later moving of return addr. Only 530 // applies to tail call optimized functions where the callee argument stack 531 // size is bigger than the callers. 532 if (TailCallReturnAddrDelta < 0) { 533 BuildMI(MBB, MBBI, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 534 StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta); 535 } 536 537 if (hasFP(MF)) { 538 // Get the offset of the stack slot for the EBP register... which is 539 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 540 // Update the frame offset adjustment. 541 MFI->setOffsetAdjustment(SlotSize-NumBytes); 542 543 // Save EBP into the appropriate stack slot... 544 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 545 .addReg(FramePtr); 546 NumBytes -= SlotSize; 547 548 if (MMI && MMI->needsFrameInfo()) { 549 // Mark effective beginning of when frame pointer becomes valid. 550 FrameLabelId = MMI->NextLabelID(); 551 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(FrameLabelId).addImm(0); 552 } 553 554 // Update EBP with the new base value... 555 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 556 .addReg(StackPtr); 557 } 558 559 unsigned ReadyLabelId = 0; 560 if (MMI && MMI->needsFrameInfo()) { 561 // Mark effective beginning of when frame pointer is ready. 562 ReadyLabelId = MMI->NextLabelID(); 563 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(ReadyLabelId).addImm(0); 564 } 565 566 // Skip the callee-saved push instructions. 567 while (MBBI != MBB.end() && 568 (MBBI->getOpcode() == X86::PUSH32r || 569 MBBI->getOpcode() == X86::PUSH64r)) 570 ++MBBI; 571 572 if (NumBytes) { // adjust stack pointer: ESP -= numbytes 573 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 574 // Check, whether EAX is livein for this function 575 bool isEAXAlive = false; 576 for (MachineRegisterInfo::livein_iterator 577 II = MF.getRegInfo().livein_begin(), 578 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 579 unsigned Reg = II->first; 580 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 581 Reg == X86::AH || Reg == X86::AL); 582 } 583 584 // Function prologue calls _alloca to probe the stack when allocating 585 // more than 4k bytes in one go. Touching the stack at 4K increments is 586 // necessary to ensure that the guard pages used by the OS virtual memory 587 // manager are allocated in correct sequence. 588 if (!isEAXAlive) { 589 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes); 590 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) 591 .addExternalSymbol("_alloca"); 592 } else { 593 // Save EAX 594 BuildMI(MBB, MBBI, TII.get(X86::PUSH32r), X86::EAX); 595 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 596 // allocated bytes for EAX. 597 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4); 598 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) 599 .addExternalSymbol("_alloca"); 600 // Restore EAX 601 MachineInstr *MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm),X86::EAX), 602 StackPtr, NumBytes-4); 603 MBB.insert(MBBI, MI); 604 } 605 } else { 606 // If there is an SUB32ri of ESP immediately before this instruction, 607 // merge the two. This can be the case when tail call elimination is 608 // enabled and the callee has more arguments then the caller. 609 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 610 // If there is an ADD32ri or SUB32ri of ESP immediately after this 611 // instruction, merge the two instructions. 612 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 613 614 if (NumBytes) 615 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 616 } 617 } 618 619 if (MMI && MMI->needsFrameInfo()) { 620 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 621 const TargetData *TD = MF.getTarget().getTargetData(); 622 623 // Calculate amount of bytes used for return address storing 624 int stackGrowth = 625 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 626 TargetFrameInfo::StackGrowsUp ? 627 TD->getPointerSize() : -TD->getPointerSize()); 628 629 if (StackSize) { 630 // Show update of SP. 631 if (hasFP(MF)) { 632 // Adjust SP 633 MachineLocation SPDst(MachineLocation::VirtualFP); 634 MachineLocation SPSrc(MachineLocation::VirtualFP, 2*stackGrowth); 635 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 636 } else { 637 MachineLocation SPDst(MachineLocation::VirtualFP); 638 MachineLocation SPSrc(MachineLocation::VirtualFP, 639 -StackSize+stackGrowth); 640 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 641 } 642 } else { 643 //FIXME: Verify & implement for FP 644 MachineLocation SPDst(StackPtr); 645 MachineLocation SPSrc(StackPtr, stackGrowth); 646 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 647 } 648 649 // Add callee saved registers to move list. 650 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 651 652 // FIXME: This is dirty hack. The code itself is pretty mess right now. 653 // It should be rewritten from scratch and generalized sometimes. 654 655 // Determine maximum offset (minumum due to stack growth) 656 int64_t MaxOffset = 0; 657 for (unsigned I = 0, E = CSI.size(); I!=E; ++I) 658 MaxOffset = std::min(MaxOffset, 659 MFI->getObjectOffset(CSI[I].getFrameIdx())); 660 661 // Calculate offsets 662 int64_t saveAreaOffset = (hasFP(MF) ? 3 : 2)*stackGrowth; 663 for (unsigned I = 0, E = CSI.size(); I!=E; ++I) { 664 int64_t Offset = MFI->getObjectOffset(CSI[I].getFrameIdx()); 665 unsigned Reg = CSI[I].getReg(); 666 Offset = (MaxOffset-Offset+saveAreaOffset); 667 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 668 MachineLocation CSSrc(Reg); 669 Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc)); 670 } 671 672 if (hasFP(MF)) { 673 // Save FP 674 MachineLocation FPDst(MachineLocation::VirtualFP, 2*stackGrowth); 675 MachineLocation FPSrc(FramePtr); 676 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 677 } 678 679 MachineLocation FPDst(hasFP(MF) ? FramePtr : StackPtr); 680 MachineLocation FPSrc(MachineLocation::VirtualFP); 681 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 682 } 683 684 // If it's main() on Cygwin\Mingw32 we should align stack as well 685 if (Fn->hasExternalLinkage() && Fn->getName() == "main" && 686 Subtarget->isTargetCygMing()) { 687 BuildMI(MBB, MBBI, TII.get(X86::AND32ri), X86::ESP) 688 .addReg(X86::ESP).addImm(-StackAlign); 689 690 // Probe the stack 691 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(StackAlign); 692 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); 693 } 694} 695 696void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 697 MachineBasicBlock &MBB) const { 698 const MachineFrameInfo *MFI = MF.getFrameInfo(); 699 const Function* Fn = MF.getFunction(); 700 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 701 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 702 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 703 unsigned RetOpcode = MBBI->getOpcode(); 704 705 switch (RetOpcode) { 706 case X86::RET: 707 case X86::RETI: 708 case X86::TCRETURNdi: 709 case X86::TCRETURNri: 710 case X86::TCRETURNri64: 711 case X86::TCRETURNdi64: 712 case X86::EH_RETURN: 713 case X86::TAILJMPd: 714 case X86::TAILJMPr: 715 case X86::TAILJMPm: break; // These are ok 716 default: 717 assert(0 && "Can only insert epilog into returning blocks"); 718 } 719 720 // Get the number of bytes to allocate from the FrameInfo 721 uint64_t StackSize = MFI->getStackSize(); 722 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 723 uint64_t NumBytes = StackSize - CSSize; 724 725 if (hasFP(MF)) { 726 // pop EBP. 727 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 728 NumBytes -= SlotSize; 729 } 730 731 // Skip the callee-saved pop instructions. 732 while (MBBI != MBB.begin()) { 733 MachineBasicBlock::iterator PI = prior(MBBI); 734 unsigned Opc = PI->getOpcode(); 735 if (Opc != X86::POP32r && Opc != X86::POP64r && 736 !PI->getDesc().isTerminator()) 737 break; 738 --MBBI; 739 } 740 741 // If there is an ADD32ri or SUB32ri of ESP immediately before this 742 // instruction, merge the two instructions. 743 if (NumBytes || MFI->hasVarSizedObjects()) 744 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 745 746 // If dynamic alloca is used, then reset esp to point to the last callee-saved 747 // slot before popping them off! Also, if it's main() on Cygwin/Mingw32 we 748 // aligned stack in the prologue, - revert stack changes back. Note: we're 749 // assuming, that frame pointer was forced for main() 750 if (MFI->hasVarSizedObjects() || 751 (Fn->hasExternalLinkage() && Fn->getName() == "main" && 752 Subtarget->isTargetCygMing())) { 753 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 754 if (CSSize) { 755 MachineInstr *MI = addRegOffset(BuildMI(TII.get(Opc), StackPtr), 756 FramePtr, -CSSize); 757 MBB.insert(MBBI, MI); 758 } else 759 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr). 760 addReg(FramePtr); 761 762 NumBytes = 0; 763 } 764 765 // adjust stack pointer back: ESP += numbytes 766 if (NumBytes) 767 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 768 769 // We're returning from function via eh_return. 770 if (RetOpcode == X86::EH_RETURN) { 771 MBBI = prior(MBB.end()); 772 MachineOperand &DestAddr = MBBI->getOperand(0); 773 assert(DestAddr.isRegister() && "Offset should be in register!"); 774 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr). 775 addReg(DestAddr.getReg()); 776 // Tail call return: adjust the stack pointer and jump to callee 777 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 778 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) { 779 MBBI = prior(MBB.end()); 780 MachineOperand &JumpTarget = MBBI->getOperand(0); 781 MachineOperand &StackAdjust = MBBI->getOperand(1); 782 assert( StackAdjust.isImmediate() && "Expecting immediate value."); 783 784 // Adjust stack pointer. 785 int StackAdj = StackAdjust.getImm(); 786 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 787 int Offset = 0; 788 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 789 // Incoporate the retaddr area. 790 Offset = StackAdj-MaxTCDelta; 791 assert(Offset >= 0 && "Offset should never be negative"); 792 if (Offset) { 793 // Check for possible merge with preceeding ADD instruction. 794 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 795 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 796 } 797 // Jump to label or value in register. 798 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) 799 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPd)). 800 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 801 else if (RetOpcode== X86::TCRETURNri64) { 802 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 803 } else 804 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 805 // Delete the pseudo instruction TCRETURN. 806 MBB.erase(MBBI); 807 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 808 (X86FI->getTCReturnAddrDelta() < 0)) { 809 // Add the return addr area delta back since we are not tail calling. 810 int delta = -1*X86FI->getTCReturnAddrDelta(); 811 MBBI = prior(MBB.end()); 812 // Check for possible merge with preceeding ADD instruction. 813 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 814 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 815 } 816} 817 818unsigned X86RegisterInfo::getRARegister() const { 819 if (Is64Bit) 820 return X86::RIP; // Should have dwarf #16 821 else 822 return X86::EIP; // Should have dwarf #8 823} 824 825unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const { 826 return hasFP(MF) ? FramePtr : StackPtr; 827} 828 829int 830X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { 831 int Offset = MF.getFrameInfo()->getObjectOffset(FI) + SlotSize; 832 if (!hasFP(MF)) 833 return Offset + MF.getFrameInfo()->getStackSize(); 834 835 Offset += SlotSize; // Skip the saved EBP 836 // Skip the RETADDR move area 837 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 838 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 839 if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta; 840 return Offset; 841} 842 843void X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) 844 const { 845 // Calculate amount of bytes used for return address storing 846 int stackGrowth = (Is64Bit ? -8 : -4); 847 848 // Initial state of the frame pointer is esp+4. 849 MachineLocation Dst(MachineLocation::VirtualFP); 850 MachineLocation Src(StackPtr, stackGrowth); 851 Moves.push_back(MachineMove(0, Dst, Src)); 852 853 // Add return address to move list 854 MachineLocation CSDst(StackPtr, stackGrowth); 855 MachineLocation CSSrc(getRARegister()); 856 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 857} 858 859unsigned X86RegisterInfo::getEHExceptionRegister() const { 860 assert(0 && "What is the exception register"); 861 return 0; 862} 863 864unsigned X86RegisterInfo::getEHHandlerRegister() const { 865 assert(0 && "What is the exception handler register"); 866 return 0; 867} 868 869namespace llvm { 870unsigned getX86SubSuperRegister(unsigned Reg, MVT::ValueType VT, bool High) { 871 switch (VT) { 872 default: return Reg; 873 case MVT::i8: 874 if (High) { 875 switch (Reg) { 876 default: return 0; 877 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 878 return X86::AH; 879 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 880 return X86::DH; 881 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 882 return X86::CH; 883 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 884 return X86::BH; 885 } 886 } else { 887 switch (Reg) { 888 default: return 0; 889 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 890 return X86::AL; 891 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 892 return X86::DL; 893 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 894 return X86::CL; 895 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 896 return X86::BL; 897 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 898 return X86::SIL; 899 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 900 return X86::DIL; 901 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 902 return X86::BPL; 903 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 904 return X86::SPL; 905 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 906 return X86::R8B; 907 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 908 return X86::R9B; 909 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 910 return X86::R10B; 911 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 912 return X86::R11B; 913 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 914 return X86::R12B; 915 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 916 return X86::R13B; 917 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 918 return X86::R14B; 919 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 920 return X86::R15B; 921 } 922 } 923 case MVT::i16: 924 switch (Reg) { 925 default: return Reg; 926 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 927 return X86::AX; 928 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 929 return X86::DX; 930 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 931 return X86::CX; 932 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 933 return X86::BX; 934 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 935 return X86::SI; 936 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 937 return X86::DI; 938 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 939 return X86::BP; 940 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 941 return X86::SP; 942 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 943 return X86::R8W; 944 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 945 return X86::R9W; 946 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 947 return X86::R10W; 948 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 949 return X86::R11W; 950 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 951 return X86::R12W; 952 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 953 return X86::R13W; 954 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 955 return X86::R14W; 956 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 957 return X86::R15W; 958 } 959 case MVT::i32: 960 switch (Reg) { 961 default: return Reg; 962 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 963 return X86::EAX; 964 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 965 return X86::EDX; 966 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 967 return X86::ECX; 968 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 969 return X86::EBX; 970 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 971 return X86::ESI; 972 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 973 return X86::EDI; 974 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 975 return X86::EBP; 976 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 977 return X86::ESP; 978 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 979 return X86::R8D; 980 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 981 return X86::R9D; 982 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 983 return X86::R10D; 984 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 985 return X86::R11D; 986 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 987 return X86::R12D; 988 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 989 return X86::R13D; 990 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 991 return X86::R14D; 992 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 993 return X86::R15D; 994 } 995 case MVT::i64: 996 switch (Reg) { 997 default: return Reg; 998 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 999 return X86::RAX; 1000 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1001 return X86::RDX; 1002 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1003 return X86::RCX; 1004 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1005 return X86::RBX; 1006 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1007 return X86::RSI; 1008 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1009 return X86::RDI; 1010 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1011 return X86::RBP; 1012 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1013 return X86::RSP; 1014 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1015 return X86::R8; 1016 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1017 return X86::R9; 1018 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1019 return X86::R10; 1020 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1021 return X86::R11; 1022 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1023 return X86::R12; 1024 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1025 return X86::R13; 1026 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1027 return X86::R14; 1028 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1029 return X86::R15; 1030 } 1031 } 1032 1033 return Reg; 1034} 1035} 1036 1037#include "X86GenRegisterInfo.inc" 1038 1039