X86RegisterInfo.cpp revision c5b7a4223d4d91abbfd98f016f2f173ce181003e
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameLowering.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/ErrorHandling.h" 41#include "llvm/Support/CommandLine.h" 42using namespace llvm; 43 44cl::opt<bool> 45ForceStackAlign("force-align-stack", 46 cl::desc("Force align the stack to the minimum alignment" 47 " needed for the function."), 48 cl::init(false), cl::Hidden); 49 50X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 51 const TargetInstrInfo &tii) 52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 53 X86::ADJCALLSTACKDOWN64 : 54 X86::ADJCALLSTACKDOWN32, 55 tm.getSubtarget<X86Subtarget>().is64Bit() ? 56 X86::ADJCALLSTACKUP64 : 57 X86::ADJCALLSTACKUP32), 58 TM(tm), TII(tii) { 59 // Cache some information. 60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 61 Is64Bit = Subtarget->is64Bit(); 62 IsWin64 = Subtarget->isTargetWin64(); 63 StackAlign = TM.getFrameLowering()->getStackAlignment(); 64 65 if (Is64Bit) { 66 SlotSize = 8; 67 StackPtr = X86::RSP; 68 FramePtr = X86::RBP; 69 } else { 70 SlotSize = 4; 71 StackPtr = X86::ESP; 72 FramePtr = X86::EBP; 73 } 74} 75 76/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 77/// specific numbering, used in debug info and exception tables. 78int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 79 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 80 unsigned Flavour = DWARFFlavour::X86_64; 81 82 if (!Subtarget->is64Bit()) { 83 if (Subtarget->isTargetDarwin()) { 84 if (isEH) 85 Flavour = DWARFFlavour::X86_32_DarwinEH; 86 else 87 Flavour = DWARFFlavour::X86_32_Generic; 88 } else if (Subtarget->isTargetCygMing()) { 89 // Unsupported by now, just quick fallback 90 Flavour = DWARFFlavour::X86_32_Generic; 91 } else { 92 Flavour = DWARFFlavour::X86_32_Generic; 93 } 94 } 95 96 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 97} 98 99/// getX86RegNum - This function maps LLVM register identifiers to their X86 100/// specific numbering, which is used in various places encoding instructions. 101unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 102 switch(RegNo) { 103 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 104 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 105 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 106 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 107 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 108 return N86::ESP; 109 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 110 return N86::EBP; 111 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 112 return N86::ESI; 113 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 114 return N86::EDI; 115 116 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 117 return N86::EAX; 118 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 119 return N86::ECX; 120 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 121 return N86::EDX; 122 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 123 return N86::EBX; 124 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 125 return N86::ESP; 126 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 127 return N86::EBP; 128 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 129 return N86::ESI; 130 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 131 return N86::EDI; 132 133 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 134 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 135 return RegNo-X86::ST0; 136 137 case X86::XMM0: case X86::XMM8: 138 case X86::YMM0: case X86::YMM8: case X86::MM0: 139 return 0; 140 case X86::XMM1: case X86::XMM9: 141 case X86::YMM1: case X86::YMM9: case X86::MM1: 142 return 1; 143 case X86::XMM2: case X86::XMM10: 144 case X86::YMM2: case X86::YMM10: case X86::MM2: 145 return 2; 146 case X86::XMM3: case X86::XMM11: 147 case X86::YMM3: case X86::YMM11: case X86::MM3: 148 return 3; 149 case X86::XMM4: case X86::XMM12: 150 case X86::YMM4: case X86::YMM12: case X86::MM4: 151 return 4; 152 case X86::XMM5: case X86::XMM13: 153 case X86::YMM5: case X86::YMM13: case X86::MM5: 154 return 5; 155 case X86::XMM6: case X86::XMM14: 156 case X86::YMM6: case X86::YMM14: case X86::MM6: 157 return 6; 158 case X86::XMM7: case X86::XMM15: 159 case X86::YMM7: case X86::YMM15: case X86::MM7: 160 return 7; 161 162 case X86::ES: return 0; 163 case X86::CS: return 1; 164 case X86::SS: return 2; 165 case X86::DS: return 3; 166 case X86::FS: return 4; 167 case X86::GS: return 5; 168 169 case X86::CR0: case X86::CR8 : case X86::DR0: return 0; 170 case X86::CR1: case X86::CR9 : case X86::DR1: return 1; 171 case X86::CR2: case X86::CR10: case X86::DR2: return 2; 172 case X86::CR3: case X86::CR11: case X86::DR3: return 3; 173 case X86::CR4: case X86::CR12: case X86::DR4: return 4; 174 case X86::CR5: case X86::CR13: case X86::DR5: return 5; 175 case X86::CR6: case X86::CR14: case X86::DR6: return 6; 176 case X86::CR7: case X86::CR15: case X86::DR7: return 7; 177 178 // Pseudo index registers are equivalent to a "none" 179 // scaled index (See Intel Manual 2A, table 2-3) 180 case X86::EIZ: 181 case X86::RIZ: 182 return 4; 183 184 default: 185 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 186 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 187 return 0; 188 } 189} 190 191const TargetRegisterClass * 192X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 193 const TargetRegisterClass *B, 194 unsigned SubIdx) const { 195 switch (SubIdx) { 196 default: return 0; 197 case X86::sub_8bit: 198 if (B == &X86::GR8RegClass) { 199 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 200 return A; 201 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 202 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 203 A == &X86::GR64_NOREXRegClass || 204 A == &X86::GR64_NOSPRegClass || 205 A == &X86::GR64_NOREX_NOSPRegClass) 206 return &X86::GR64_ABCDRegClass; 207 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 208 A == &X86::GR32_NOREXRegClass || 209 A == &X86::GR32_NOSPRegClass) 210 return &X86::GR32_ABCDRegClass; 211 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 212 A == &X86::GR16_NOREXRegClass) 213 return &X86::GR16_ABCDRegClass; 214 } else if (B == &X86::GR8_NOREXRegClass) { 215 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 216 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 217 return &X86::GR64_NOREXRegClass; 218 else if (A == &X86::GR64_ABCDRegClass) 219 return &X86::GR64_ABCDRegClass; 220 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 221 A == &X86::GR32_NOSPRegClass) 222 return &X86::GR32_NOREXRegClass; 223 else if (A == &X86::GR32_ABCDRegClass) 224 return &X86::GR32_ABCDRegClass; 225 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 226 return &X86::GR16_NOREXRegClass; 227 else if (A == &X86::GR16_ABCDRegClass) 228 return &X86::GR16_ABCDRegClass; 229 } 230 break; 231 case X86::sub_8bit_hi: 232 if (B == &X86::GR8_ABCD_HRegClass) { 233 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 234 A == &X86::GR64_NOREXRegClass || 235 A == &X86::GR64_NOSPRegClass || 236 A == &X86::GR64_NOREX_NOSPRegClass) 237 return &X86::GR64_ABCDRegClass; 238 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 239 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 240 return &X86::GR32_ABCDRegClass; 241 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 242 A == &X86::GR16_NOREXRegClass) 243 return &X86::GR16_ABCDRegClass; 244 } 245 break; 246 case X86::sub_16bit: 247 if (B == &X86::GR16RegClass) { 248 if (A->getSize() == 4 || A->getSize() == 8) 249 return A; 250 } else if (B == &X86::GR16_ABCDRegClass) { 251 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 252 A == &X86::GR64_NOREXRegClass || 253 A == &X86::GR64_NOSPRegClass || 254 A == &X86::GR64_NOREX_NOSPRegClass) 255 return &X86::GR64_ABCDRegClass; 256 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 257 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 258 return &X86::GR32_ABCDRegClass; 259 } else if (B == &X86::GR16_NOREXRegClass) { 260 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 261 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 262 return &X86::GR64_NOREXRegClass; 263 else if (A == &X86::GR64_ABCDRegClass) 264 return &X86::GR64_ABCDRegClass; 265 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 266 A == &X86::GR32_NOSPRegClass) 267 return &X86::GR32_NOREXRegClass; 268 else if (A == &X86::GR32_ABCDRegClass) 269 return &X86::GR64_ABCDRegClass; 270 } 271 break; 272 case X86::sub_32bit: 273 if (B == &X86::GR32RegClass) { 274 if (A->getSize() == 8) 275 return A; 276 } else if (B == &X86::GR32_NOSPRegClass) { 277 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass) 278 return &X86::GR64_NOSPRegClass; 279 if (A->getSize() == 8) 280 return getCommonSubClass(A, &X86::GR64_NOSPRegClass); 281 } else if (B == &X86::GR32_ABCDRegClass) { 282 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 283 A == &X86::GR64_NOREXRegClass || 284 A == &X86::GR64_NOSPRegClass || 285 A == &X86::GR64_NOREX_NOSPRegClass) 286 return &X86::GR64_ABCDRegClass; 287 } else if (B == &X86::GR32_NOREXRegClass) { 288 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 289 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 290 return &X86::GR64_NOREXRegClass; 291 else if (A == &X86::GR64_ABCDRegClass) 292 return &X86::GR64_ABCDRegClass; 293 } 294 break; 295 case X86::sub_ss: 296 if (B == &X86::FR32RegClass) 297 return A; 298 break; 299 case X86::sub_sd: 300 if (B == &X86::FR64RegClass) 301 return A; 302 break; 303 case X86::sub_xmm: 304 if (B == &X86::VR128RegClass) 305 return A; 306 break; 307 } 308 return 0; 309} 310 311const TargetRegisterClass * 312X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 313 switch (Kind) { 314 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 315 case 0: // Normal GPRs. 316 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 317 return &X86::GR64RegClass; 318 return &X86::GR32RegClass; 319 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 320 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 321 return &X86::GR64_NOSPRegClass; 322 return &X86::GR32_NOSPRegClass; 323 } 324} 325 326const TargetRegisterClass * 327X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 328 if (RC == &X86::CCRRegClass) { 329 if (Is64Bit) 330 return &X86::GR64RegClass; 331 else 332 return &X86::GR32RegClass; 333 } 334 return NULL; 335} 336 337const unsigned * 338X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 339 bool callsEHReturn = false; 340 bool ghcCall = false; 341 342 if (MF) { 343 callsEHReturn = MF->getMMI().callsEHReturn(); 344 const Function *F = MF->getFunction(); 345 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 346 } 347 348 static const unsigned GhcCalleeSavedRegs[] = { 349 0 350 }; 351 352 static const unsigned CalleeSavedRegs32Bit[] = { 353 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 354 }; 355 356 static const unsigned CalleeSavedRegs32EHRet[] = { 357 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 358 }; 359 360 static const unsigned CalleeSavedRegs64Bit[] = { 361 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 362 }; 363 364 static const unsigned CalleeSavedRegs64EHRet[] = { 365 X86::RAX, X86::RDX, X86::RBX, X86::R12, 366 X86::R13, X86::R14, X86::R15, X86::RBP, 0 367 }; 368 369 static const unsigned CalleeSavedRegsWin64[] = { 370 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 371 X86::R12, X86::R13, X86::R14, X86::R15, 372 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 373 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 374 X86::XMM14, X86::XMM15, 0 375 }; 376 377 if (ghcCall) { 378 return GhcCalleeSavedRegs; 379 } else if (Is64Bit) { 380 if (IsWin64) 381 return CalleeSavedRegsWin64; 382 else 383 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 384 } else { 385 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 386 } 387} 388 389BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 390 BitVector Reserved(getNumRegs()); 391 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 392 393 // Set the stack-pointer register and its aliases as reserved. 394 Reserved.set(X86::RSP); 395 Reserved.set(X86::ESP); 396 Reserved.set(X86::SP); 397 Reserved.set(X86::SPL); 398 399 // Set the instruction pointer register and its aliases as reserved. 400 Reserved.set(X86::RIP); 401 Reserved.set(X86::EIP); 402 Reserved.set(X86::IP); 403 404 // Set the frame-pointer register and its aliases as reserved if needed. 405 if (TFI->hasFP(MF)) { 406 Reserved.set(X86::RBP); 407 Reserved.set(X86::EBP); 408 Reserved.set(X86::BP); 409 Reserved.set(X86::BPL); 410 } 411 412 // Mark the x87 stack registers as reserved, since they don't behave normally 413 // with respect to liveness. We don't fully model the effects of x87 stack 414 // pushes and pops after stackification. 415 Reserved.set(X86::ST0); 416 Reserved.set(X86::ST1); 417 Reserved.set(X86::ST2); 418 Reserved.set(X86::ST3); 419 Reserved.set(X86::ST4); 420 Reserved.set(X86::ST5); 421 Reserved.set(X86::ST6); 422 Reserved.set(X86::ST7); 423 return Reserved; 424} 425 426//===----------------------------------------------------------------------===// 427// Stack Frame Processing methods 428//===----------------------------------------------------------------------===// 429 430bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 431 const MachineFrameInfo *MFI = MF.getFrameInfo(); 432 return (RealignStack && 433 !MFI->hasVarSizedObjects()); 434} 435 436bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 437 const MachineFrameInfo *MFI = MF.getFrameInfo(); 438 const Function *F = MF.getFunction(); 439 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || 440 F->hasFnAttr(Attribute::StackAlignment)); 441 442 // FIXME: Currently we don't support stack realignment for functions with 443 // variable-sized allocas. 444 // FIXME: It's more complicated than this... 445 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 446 report_fatal_error( 447 "Stack realignment in presense of dynamic allocas is not supported"); 448 449 // If we've requested that we force align the stack do so now. 450 if (ForceStackAlign) 451 return canRealignStack(MF); 452 453 return requiresRealignment && canRealignStack(MF); 454} 455 456bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 457 unsigned Reg, int &FrameIdx) const { 458 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 459 460 if (Reg == FramePtr && TFI->hasFP(MF)) { 461 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 462 return true; 463 } 464 return false; 465} 466 467static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 468 if (is64Bit) { 469 if (isInt<8>(Imm)) 470 return X86::SUB64ri8; 471 return X86::SUB64ri32; 472 } else { 473 if (isInt<8>(Imm)) 474 return X86::SUB32ri8; 475 return X86::SUB32ri; 476 } 477} 478 479static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 480 if (is64Bit) { 481 if (isInt<8>(Imm)) 482 return X86::ADD64ri8; 483 return X86::ADD64ri32; 484 } else { 485 if (isInt<8>(Imm)) 486 return X86::ADD32ri8; 487 return X86::ADD32ri; 488 } 489} 490 491void X86RegisterInfo:: 492eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 493 MachineBasicBlock::iterator I) const { 494 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 495 bool reseveCallFrame = TFI->hasReservedCallFrame(MF); 496 int Opcode = I->getOpcode(); 497 bool isDestroy = Opcode == getCallFrameDestroyOpcode(); 498 DebugLoc DL = I->getDebugLoc(); 499 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0; 500 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0; 501 I = MBB.erase(I); 502 503 if (!reseveCallFrame) { 504 // If the stack pointer can be changed after prologue, turn the 505 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 506 // adjcallstackdown instruction into 'add ESP, <amt>' 507 // TODO: consider using push / pop instead of sub + store / add 508 if (Amount == 0) 509 return; 510 511 // We need to keep the stack aligned properly. To do this, we round the 512 // amount of space needed for the outgoing arguments up to the next 513 // alignment boundary. 514 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 515 516 MachineInstr *New = 0; 517 if (Opcode == getCallFrameSetupOpcode()) { 518 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)), 519 StackPtr) 520 .addReg(StackPtr) 521 .addImm(Amount); 522 } else { 523 assert(Opcode == getCallFrameDestroyOpcode()); 524 525 // Factor out the amount the callee already popped. 526 Amount -= CalleeAmt; 527 528 if (Amount) { 529 unsigned Opc = getADDriOpcode(Is64Bit, Amount); 530 New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 531 .addReg(StackPtr).addImm(Amount); 532 } 533 } 534 535 if (New) { 536 // The EFLAGS implicit def is dead. 537 New->getOperand(3).setIsDead(); 538 539 // Replace the pseudo instruction with a new instruction. 540 MBB.insert(I, New); 541 } 542 543 return; 544 } 545 546 if (Opcode == getCallFrameDestroyOpcode() && CalleeAmt) { 547 // If we are performing frame pointer elimination and if the callee pops 548 // something off the stack pointer, add it back. We do this until we have 549 // more advanced stack pointer tracking ability. 550 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt); 551 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 552 .addReg(StackPtr).addImm(CalleeAmt); 553 554 // The EFLAGS implicit def is dead. 555 New->getOperand(3).setIsDead(); 556 MBB.insert(I, New); 557 } 558} 559 560void 561X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 562 int SPAdj, RegScavenger *RS) const{ 563 assert(SPAdj == 0 && "Unexpected"); 564 565 unsigned i = 0; 566 MachineInstr &MI = *II; 567 MachineFunction &MF = *MI.getParent()->getParent(); 568 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 569 570 while (!MI.getOperand(i).isFI()) { 571 ++i; 572 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 573 } 574 575 int FrameIndex = MI.getOperand(i).getIndex(); 576 unsigned BasePtr; 577 578 unsigned Opc = MI.getOpcode(); 579 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 580 if (needsStackRealignment(MF)) 581 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 582 else if (AfterFPPop) 583 BasePtr = StackPtr; 584 else 585 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 586 587 // This must be part of a four operand memory reference. Replace the 588 // FrameIndex with base register with EBP. Add an offset to the offset. 589 MI.getOperand(i).ChangeToRegister(BasePtr, false); 590 591 // Now add the frame object offset to the offset from EBP. 592 int FIOffset; 593 if (AfterFPPop) { 594 // Tail call jmp happens after FP is popped. 595 const MachineFrameInfo *MFI = MF.getFrameInfo(); 596 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 597 } else 598 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 599 600 if (MI.getOperand(i+3).isImm()) { 601 // Offset is a 32-bit integer. 602 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm()); 603 MI.getOperand(i + 3).ChangeToImmediate(Offset); 604 } else { 605 // Offset is symbolic. This is extremely rare. 606 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); 607 MI.getOperand(i+3).setOffset(Offset); 608 } 609} 610 611unsigned X86RegisterInfo::getRARegister() const { 612 return Is64Bit ? X86::RIP // Should have dwarf #16. 613 : X86::EIP; // Should have dwarf #8. 614} 615 616unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 617 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 618 return TFI->hasFP(MF) ? FramePtr : StackPtr; 619} 620 621unsigned X86RegisterInfo::getEHExceptionRegister() const { 622 llvm_unreachable("What is the exception register"); 623 return 0; 624} 625 626unsigned X86RegisterInfo::getEHHandlerRegister() const { 627 llvm_unreachable("What is the exception handler register"); 628 return 0; 629} 630 631namespace llvm { 632unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 633 switch (VT.getSimpleVT().SimpleTy) { 634 default: return Reg; 635 case MVT::i8: 636 if (High) { 637 switch (Reg) { 638 default: return 0; 639 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 640 return X86::AH; 641 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 642 return X86::DH; 643 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 644 return X86::CH; 645 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 646 return X86::BH; 647 } 648 } else { 649 switch (Reg) { 650 default: return 0; 651 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 652 return X86::AL; 653 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 654 return X86::DL; 655 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 656 return X86::CL; 657 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 658 return X86::BL; 659 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 660 return X86::SIL; 661 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 662 return X86::DIL; 663 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 664 return X86::BPL; 665 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 666 return X86::SPL; 667 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 668 return X86::R8B; 669 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 670 return X86::R9B; 671 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 672 return X86::R10B; 673 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 674 return X86::R11B; 675 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 676 return X86::R12B; 677 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 678 return X86::R13B; 679 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 680 return X86::R14B; 681 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 682 return X86::R15B; 683 } 684 } 685 case MVT::i16: 686 switch (Reg) { 687 default: return Reg; 688 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 689 return X86::AX; 690 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 691 return X86::DX; 692 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 693 return X86::CX; 694 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 695 return X86::BX; 696 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 697 return X86::SI; 698 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 699 return X86::DI; 700 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 701 return X86::BP; 702 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 703 return X86::SP; 704 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 705 return X86::R8W; 706 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 707 return X86::R9W; 708 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 709 return X86::R10W; 710 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 711 return X86::R11W; 712 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 713 return X86::R12W; 714 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 715 return X86::R13W; 716 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 717 return X86::R14W; 718 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 719 return X86::R15W; 720 } 721 case MVT::i32: 722 switch (Reg) { 723 default: return Reg; 724 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 725 return X86::EAX; 726 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 727 return X86::EDX; 728 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 729 return X86::ECX; 730 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 731 return X86::EBX; 732 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 733 return X86::ESI; 734 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 735 return X86::EDI; 736 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 737 return X86::EBP; 738 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 739 return X86::ESP; 740 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 741 return X86::R8D; 742 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 743 return X86::R9D; 744 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 745 return X86::R10D; 746 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 747 return X86::R11D; 748 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 749 return X86::R12D; 750 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 751 return X86::R13D; 752 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 753 return X86::R14D; 754 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 755 return X86::R15D; 756 } 757 case MVT::i64: 758 switch (Reg) { 759 default: return Reg; 760 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 761 return X86::RAX; 762 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 763 return X86::RDX; 764 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 765 return X86::RCX; 766 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 767 return X86::RBX; 768 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 769 return X86::RSI; 770 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 771 return X86::RDI; 772 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 773 return X86::RBP; 774 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 775 return X86::RSP; 776 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 777 return X86::R8; 778 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 779 return X86::R9; 780 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 781 return X86::R10; 782 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 783 return X86::R11; 784 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 785 return X86::R12; 786 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 787 return X86::R13; 788 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 789 return X86::R14; 790 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 791 return X86::R15; 792 } 793 } 794 795 return Reg; 796} 797} 798 799#include "X86GenRegisterInfo.inc" 800 801namespace { 802 struct MSAH : public MachineFunctionPass { 803 static char ID; 804 MSAH() : MachineFunctionPass(ID) {} 805 806 virtual bool runOnMachineFunction(MachineFunction &MF) { 807 const X86TargetMachine *TM = 808 static_cast<const X86TargetMachine *>(&MF.getTarget()); 809 const X86RegisterInfo *X86RI = TM->getRegisterInfo(); 810 MachineRegisterInfo &RI = MF.getRegInfo(); 811 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 812 unsigned StackAlignment = X86RI->getStackAlignment(); 813 814 // Be over-conservative: scan over all vreg defs and find whether vector 815 // registers are used. If yes, there is a possibility that vector register 816 // will be spilled and thus require dynamic stack realignment. 817 for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) { 818 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 819 if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) { 820 FuncInfo->setReserveFP(true); 821 return true; 822 } 823 } 824 // Nothing to do 825 return false; 826 } 827 828 virtual const char *getPassName() const { 829 return "X86 Maximal Stack Alignment Check"; 830 } 831 832 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 833 AU.setPreservesCFG(); 834 MachineFunctionPass::getAnalysisUsage(AU); 835 } 836 }; 837 838 char MSAH::ID = 0; 839} 840 841FunctionPass* 842llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } 843