X86RegisterInfo.cpp revision e4c64454051962ab56187c966e981043ff17ae4f
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameLowering.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/ErrorHandling.h" 41#include "llvm/Support/CommandLine.h" 42using namespace llvm; 43 44cl::opt<bool> 45ForceStackAlign("force-align-stack", 46 cl::desc("Force align the stack to the minimum alignment" 47 " needed for the function."), 48 cl::init(false), cl::Hidden); 49 50X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 51 const TargetInstrInfo &tii) 52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 53 X86::ADJCALLSTACKDOWN64 : 54 X86::ADJCALLSTACKDOWN32, 55 tm.getSubtarget<X86Subtarget>().is64Bit() ? 56 X86::ADJCALLSTACKUP64 : 57 X86::ADJCALLSTACKUP32), 58 TM(tm), TII(tii) { 59 // Cache some information. 60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 61 Is64Bit = Subtarget->is64Bit(); 62 IsWin64 = Subtarget->isTargetWin64(); 63 StackAlign = TM.getFrameLowering()->getStackAlignment(); 64 65 if (Is64Bit) { 66 SlotSize = 8; 67 StackPtr = X86::RSP; 68 FramePtr = X86::RBP; 69 } else { 70 SlotSize = 4; 71 StackPtr = X86::ESP; 72 FramePtr = X86::EBP; 73 } 74} 75 76/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 77/// specific numbering, used in debug info and exception tables. 78int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 79 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 80 unsigned Flavour = DWARFFlavour::X86_64; 81 82 if (!Subtarget->is64Bit()) { 83 if (Subtarget->isTargetDarwin()) { 84 if (isEH) 85 Flavour = DWARFFlavour::X86_32_DarwinEH; 86 else 87 Flavour = DWARFFlavour::X86_32_Generic; 88 } else if (Subtarget->isTargetCygMing()) { 89 // Unsupported by now, just quick fallback 90 Flavour = DWARFFlavour::X86_32_Generic; 91 } else { 92 Flavour = DWARFFlavour::X86_32_Generic; 93 } 94 } 95 96 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 97} 98 99/// getX86RegNum - This function maps LLVM register identifiers to their X86 100/// specific numbering, which is used in various places encoding instructions. 101unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 102 switch(RegNo) { 103 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 104 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 105 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 106 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 107 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 108 return N86::ESP; 109 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 110 return N86::EBP; 111 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 112 return N86::ESI; 113 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 114 return N86::EDI; 115 116 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 117 return N86::EAX; 118 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 119 return N86::ECX; 120 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 121 return N86::EDX; 122 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 123 return N86::EBX; 124 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 125 return N86::ESP; 126 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 127 return N86::EBP; 128 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 129 return N86::ESI; 130 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 131 return N86::EDI; 132 133 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 134 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 135 return RegNo-X86::ST0; 136 137 case X86::XMM0: case X86::XMM8: 138 case X86::YMM0: case X86::YMM8: case X86::MM0: 139 return 0; 140 case X86::XMM1: case X86::XMM9: 141 case X86::YMM1: case X86::YMM9: case X86::MM1: 142 return 1; 143 case X86::XMM2: case X86::XMM10: 144 case X86::YMM2: case X86::YMM10: case X86::MM2: 145 return 2; 146 case X86::XMM3: case X86::XMM11: 147 case X86::YMM3: case X86::YMM11: case X86::MM3: 148 return 3; 149 case X86::XMM4: case X86::XMM12: 150 case X86::YMM4: case X86::YMM12: case X86::MM4: 151 return 4; 152 case X86::XMM5: case X86::XMM13: 153 case X86::YMM5: case X86::YMM13: case X86::MM5: 154 return 5; 155 case X86::XMM6: case X86::XMM14: 156 case X86::YMM6: case X86::YMM14: case X86::MM6: 157 return 6; 158 case X86::XMM7: case X86::XMM15: 159 case X86::YMM7: case X86::YMM15: case X86::MM7: 160 return 7; 161 162 case X86::ES: return 0; 163 case X86::CS: return 1; 164 case X86::SS: return 2; 165 case X86::DS: return 3; 166 case X86::FS: return 4; 167 case X86::GS: return 5; 168 169 case X86::CR0: case X86::CR8 : case X86::DR0: return 0; 170 case X86::CR1: case X86::CR9 : case X86::DR1: return 1; 171 case X86::CR2: case X86::CR10: case X86::DR2: return 2; 172 case X86::CR3: case X86::CR11: case X86::DR3: return 3; 173 case X86::CR4: case X86::CR12: case X86::DR4: return 4; 174 case X86::CR5: case X86::CR13: case X86::DR5: return 5; 175 case X86::CR6: case X86::CR14: case X86::DR6: return 6; 176 case X86::CR7: case X86::CR15: case X86::DR7: return 7; 177 178 // Pseudo index registers are equivalent to a "none" 179 // scaled index (See Intel Manual 2A, table 2-3) 180 case X86::EIZ: 181 case X86::RIZ: 182 return 4; 183 184 default: 185 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 186 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 187 return 0; 188 } 189} 190 191const TargetRegisterClass * 192X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 193 const TargetRegisterClass *B, 194 unsigned SubIdx) const { 195 switch (SubIdx) { 196 default: return 0; 197 case X86::sub_8bit: 198 if (B == &X86::GR8RegClass) { 199 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 200 return A; 201 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 202 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 203 A == &X86::GR64_NOREXRegClass || 204 A == &X86::GR64_NOSPRegClass || 205 A == &X86::GR64_NOREX_NOSPRegClass) 206 return &X86::GR64_ABCDRegClass; 207 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 208 A == &X86::GR32_NOREXRegClass || 209 A == &X86::GR32_NOSPRegClass) 210 return &X86::GR32_ABCDRegClass; 211 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 212 A == &X86::GR16_NOREXRegClass) 213 return &X86::GR16_ABCDRegClass; 214 } else if (B == &X86::GR8_NOREXRegClass) { 215 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 216 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 217 return &X86::GR64_NOREXRegClass; 218 else if (A == &X86::GR64_ABCDRegClass) 219 return &X86::GR64_ABCDRegClass; 220 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 221 A == &X86::GR32_NOSPRegClass) 222 return &X86::GR32_NOREXRegClass; 223 else if (A == &X86::GR32_ABCDRegClass) 224 return &X86::GR32_ABCDRegClass; 225 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 226 return &X86::GR16_NOREXRegClass; 227 else if (A == &X86::GR16_ABCDRegClass) 228 return &X86::GR16_ABCDRegClass; 229 } 230 break; 231 case X86::sub_8bit_hi: 232 if (B == &X86::GR8_ABCD_HRegClass || 233 B->hasSubClass(&X86::GR8_ABCD_HRegClass)) 234 switch (A->getSize()) { 235 case 2: return getCommonSubClass(A, &X86::GR16_ABCDRegClass); 236 case 4: return getCommonSubClass(A, &X86::GR32_ABCDRegClass); 237 case 8: return getCommonSubClass(A, &X86::GR64_ABCDRegClass); 238 default: return 0; 239 } 240 break; 241 case X86::sub_16bit: 242 if (B == &X86::GR16RegClass) { 243 if (A->getSize() == 4 || A->getSize() == 8) 244 return A; 245 } else if (B == &X86::GR16_ABCDRegClass) { 246 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 247 A == &X86::GR64_NOREXRegClass || 248 A == &X86::GR64_NOSPRegClass || 249 A == &X86::GR64_NOREX_NOSPRegClass) 250 return &X86::GR64_ABCDRegClass; 251 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 252 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 253 return &X86::GR32_ABCDRegClass; 254 } else if (B == &X86::GR16_NOREXRegClass) { 255 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 256 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 257 return &X86::GR64_NOREXRegClass; 258 else if (A == &X86::GR64_ABCDRegClass) 259 return &X86::GR64_ABCDRegClass; 260 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 261 A == &X86::GR32_NOSPRegClass) 262 return &X86::GR32_NOREXRegClass; 263 else if (A == &X86::GR32_ABCDRegClass) 264 return &X86::GR64_ABCDRegClass; 265 } 266 break; 267 case X86::sub_32bit: 268 if (B == &X86::GR32RegClass) { 269 if (A->getSize() == 8) 270 return A; 271 } else if (B == &X86::GR32_NOSPRegClass) { 272 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass) 273 return &X86::GR64_NOSPRegClass; 274 if (A->getSize() == 8) 275 return getCommonSubClass(A, &X86::GR64_NOSPRegClass); 276 } else if (B == &X86::GR32_ABCDRegClass) { 277 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 278 A == &X86::GR64_NOREXRegClass || 279 A == &X86::GR64_NOSPRegClass || 280 A == &X86::GR64_NOREX_NOSPRegClass) 281 return &X86::GR64_ABCDRegClass; 282 } else if (B == &X86::GR32_NOREXRegClass) { 283 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 284 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 285 return &X86::GR64_NOREXRegClass; 286 else if (A == &X86::GR64_ABCDRegClass) 287 return &X86::GR64_ABCDRegClass; 288 } 289 break; 290 case X86::sub_ss: 291 if (B == &X86::FR32RegClass) 292 return A; 293 break; 294 case X86::sub_sd: 295 if (B == &X86::FR64RegClass) 296 return A; 297 break; 298 case X86::sub_xmm: 299 if (B == &X86::VR128RegClass) 300 return A; 301 break; 302 } 303 return 0; 304} 305 306const TargetRegisterClass* 307X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{ 308 const TargetRegisterClass *Super = RC; 309 TargetRegisterClass::sc_iterator I = RC->superclasses_begin(); 310 do { 311 switch (Super->getID()) { 312 case X86::GR8RegClassID: 313 case X86::GR16RegClassID: 314 case X86::GR32RegClassID: 315 case X86::GR64RegClassID: 316 case X86::FR32RegClassID: 317 case X86::FR64RegClassID: 318 case X86::RFP32RegClassID: 319 case X86::RFP64RegClassID: 320 case X86::RFP80RegClassID: 321 case X86::VR128RegClassID: 322 case X86::VR256RegClassID: 323 // Don't return a super-class that would shrink the spill size. 324 // That can happen with the vector and float classes. 325 if (Super->getSize() == RC->getSize()) 326 return Super; 327 } 328 Super = *I++; 329 } while (Super); 330 return RC; 331} 332 333const TargetRegisterClass * 334X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 335 switch (Kind) { 336 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 337 case 0: // Normal GPRs. 338 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 339 return &X86::GR64RegClass; 340 return &X86::GR32RegClass; 341 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 342 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 343 return &X86::GR64_NOSPRegClass; 344 return &X86::GR32_NOSPRegClass; 345 case 2: // Available for tailcall (not callee-saved GPRs). 346 if (TM.getSubtarget<X86Subtarget>().isTargetWin64()) 347 return &X86::GR64_TCW64RegClass; 348 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 349 return &X86::GR64_TCRegClass; 350 return &X86::GR32_TCRegClass; 351 } 352} 353 354const TargetRegisterClass * 355X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 356 if (RC == &X86::CCRRegClass) { 357 if (Is64Bit) 358 return &X86::GR64RegClass; 359 else 360 return &X86::GR32RegClass; 361 } 362 return RC; 363} 364 365unsigned 366X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 367 MachineFunction &MF) const { 368 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 369 370 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 371 switch (RC->getID()) { 372 default: 373 return 0; 374 case X86::GR32RegClassID: 375 return 4 - FPDiff; 376 case X86::GR64RegClassID: 377 return 12 - FPDiff; 378 case X86::VR128RegClassID: 379 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4; 380 case X86::VR64RegClassID: 381 return 4; 382 } 383} 384 385const unsigned * 386X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 387 bool callsEHReturn = false; 388 bool ghcCall = false; 389 390 if (MF) { 391 callsEHReturn = MF->getMMI().callsEHReturn(); 392 const Function *F = MF->getFunction(); 393 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 394 } 395 396 static const unsigned GhcCalleeSavedRegs[] = { 397 0 398 }; 399 400 static const unsigned CalleeSavedRegs32Bit[] = { 401 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 402 }; 403 404 static const unsigned CalleeSavedRegs32EHRet[] = { 405 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 406 }; 407 408 static const unsigned CalleeSavedRegs64Bit[] = { 409 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 410 }; 411 412 static const unsigned CalleeSavedRegs64EHRet[] = { 413 X86::RAX, X86::RDX, X86::RBX, X86::R12, 414 X86::R13, X86::R14, X86::R15, X86::RBP, 0 415 }; 416 417 static const unsigned CalleeSavedRegsWin64[] = { 418 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 419 X86::R12, X86::R13, X86::R14, X86::R15, 420 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 421 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 422 X86::XMM14, X86::XMM15, 0 423 }; 424 425 if (ghcCall) { 426 return GhcCalleeSavedRegs; 427 } else if (Is64Bit) { 428 if (IsWin64) 429 return CalleeSavedRegsWin64; 430 else 431 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 432 } else { 433 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 434 } 435} 436 437BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 438 BitVector Reserved(getNumRegs()); 439 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 440 441 // Set the stack-pointer register and its aliases as reserved. 442 Reserved.set(X86::RSP); 443 Reserved.set(X86::ESP); 444 Reserved.set(X86::SP); 445 Reserved.set(X86::SPL); 446 447 // Set the instruction pointer register and its aliases as reserved. 448 Reserved.set(X86::RIP); 449 Reserved.set(X86::EIP); 450 Reserved.set(X86::IP); 451 452 // Set the frame-pointer register and its aliases as reserved if needed. 453 if (TFI->hasFP(MF)) { 454 Reserved.set(X86::RBP); 455 Reserved.set(X86::EBP); 456 Reserved.set(X86::BP); 457 Reserved.set(X86::BPL); 458 } 459 460 // Mark the x87 stack registers as reserved, since they don't behave normally 461 // with respect to liveness. We don't fully model the effects of x87 stack 462 // pushes and pops after stackification. 463 Reserved.set(X86::ST0); 464 Reserved.set(X86::ST1); 465 Reserved.set(X86::ST2); 466 Reserved.set(X86::ST3); 467 Reserved.set(X86::ST4); 468 Reserved.set(X86::ST5); 469 Reserved.set(X86::ST6); 470 Reserved.set(X86::ST7); 471 472 // Mark the segment registers as reserved. 473 Reserved.set(X86::CS); 474 Reserved.set(X86::SS); 475 Reserved.set(X86::DS); 476 Reserved.set(X86::ES); 477 Reserved.set(X86::FS); 478 Reserved.set(X86::GS); 479 480 return Reserved; 481} 482 483//===----------------------------------------------------------------------===// 484// Stack Frame Processing methods 485//===----------------------------------------------------------------------===// 486 487bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 488 const MachineFrameInfo *MFI = MF.getFrameInfo(); 489 return (RealignStack && 490 !MFI->hasVarSizedObjects()); 491} 492 493bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 494 const MachineFrameInfo *MFI = MF.getFrameInfo(); 495 const Function *F = MF.getFunction(); 496 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || 497 F->hasFnAttr(Attribute::StackAlignment)); 498 499 // FIXME: Currently we don't support stack realignment for functions with 500 // variable-sized allocas. 501 // FIXME: It's more complicated than this... 502 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 503 report_fatal_error( 504 "Stack realignment in presence of dynamic allocas is not supported"); 505 506 // If we've requested that we force align the stack do so now. 507 if (ForceStackAlign) 508 return canRealignStack(MF); 509 510 return requiresRealignment && canRealignStack(MF); 511} 512 513bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 514 unsigned Reg, int &FrameIdx) const { 515 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 516 517 if (Reg == FramePtr && TFI->hasFP(MF)) { 518 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 519 return true; 520 } 521 return false; 522} 523 524static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 525 if (is64Bit) { 526 if (isInt<8>(Imm)) 527 return X86::SUB64ri8; 528 return X86::SUB64ri32; 529 } else { 530 if (isInt<8>(Imm)) 531 return X86::SUB32ri8; 532 return X86::SUB32ri; 533 } 534} 535 536static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 537 if (is64Bit) { 538 if (isInt<8>(Imm)) 539 return X86::ADD64ri8; 540 return X86::ADD64ri32; 541 } else { 542 if (isInt<8>(Imm)) 543 return X86::ADD32ri8; 544 return X86::ADD32ri; 545 } 546} 547 548void X86RegisterInfo:: 549eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 550 MachineBasicBlock::iterator I) const { 551 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 552 bool reseveCallFrame = TFI->hasReservedCallFrame(MF); 553 int Opcode = I->getOpcode(); 554 bool isDestroy = Opcode == getCallFrameDestroyOpcode(); 555 DebugLoc DL = I->getDebugLoc(); 556 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0; 557 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0; 558 I = MBB.erase(I); 559 560 if (!reseveCallFrame) { 561 // If the stack pointer can be changed after prologue, turn the 562 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 563 // adjcallstackdown instruction into 'add ESP, <amt>' 564 // TODO: consider using push / pop instead of sub + store / add 565 if (Amount == 0) 566 return; 567 568 // We need to keep the stack aligned properly. To do this, we round the 569 // amount of space needed for the outgoing arguments up to the next 570 // alignment boundary. 571 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 572 573 MachineInstr *New = 0; 574 if (Opcode == getCallFrameSetupOpcode()) { 575 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)), 576 StackPtr) 577 .addReg(StackPtr) 578 .addImm(Amount); 579 } else { 580 assert(Opcode == getCallFrameDestroyOpcode()); 581 582 // Factor out the amount the callee already popped. 583 Amount -= CalleeAmt; 584 585 if (Amount) { 586 unsigned Opc = getADDriOpcode(Is64Bit, Amount); 587 New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 588 .addReg(StackPtr).addImm(Amount); 589 } 590 } 591 592 if (New) { 593 // The EFLAGS implicit def is dead. 594 New->getOperand(3).setIsDead(); 595 596 // Replace the pseudo instruction with a new instruction. 597 MBB.insert(I, New); 598 } 599 600 return; 601 } 602 603 if (Opcode == getCallFrameDestroyOpcode() && CalleeAmt) { 604 // If we are performing frame pointer elimination and if the callee pops 605 // something off the stack pointer, add it back. We do this until we have 606 // more advanced stack pointer tracking ability. 607 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt); 608 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 609 .addReg(StackPtr).addImm(CalleeAmt); 610 611 // The EFLAGS implicit def is dead. 612 New->getOperand(3).setIsDead(); 613 MBB.insert(I, New); 614 } 615} 616 617void 618X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 619 int SPAdj, RegScavenger *RS) const{ 620 assert(SPAdj == 0 && "Unexpected"); 621 622 unsigned i = 0; 623 MachineInstr &MI = *II; 624 MachineFunction &MF = *MI.getParent()->getParent(); 625 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 626 627 while (!MI.getOperand(i).isFI()) { 628 ++i; 629 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 630 } 631 632 int FrameIndex = MI.getOperand(i).getIndex(); 633 unsigned BasePtr; 634 635 unsigned Opc = MI.getOpcode(); 636 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 637 if (needsStackRealignment(MF)) 638 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 639 else if (AfterFPPop) 640 BasePtr = StackPtr; 641 else 642 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 643 644 // This must be part of a four operand memory reference. Replace the 645 // FrameIndex with base register with EBP. Add an offset to the offset. 646 MI.getOperand(i).ChangeToRegister(BasePtr, false); 647 648 // Now add the frame object offset to the offset from EBP. 649 int FIOffset; 650 if (AfterFPPop) { 651 // Tail call jmp happens after FP is popped. 652 const MachineFrameInfo *MFI = MF.getFrameInfo(); 653 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 654 } else 655 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 656 657 if (MI.getOperand(i+3).isImm()) { 658 // Offset is a 32-bit integer. 659 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm()); 660 MI.getOperand(i + 3).ChangeToImmediate(Offset); 661 } else { 662 // Offset is symbolic. This is extremely rare. 663 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); 664 MI.getOperand(i+3).setOffset(Offset); 665 } 666} 667 668unsigned X86RegisterInfo::getRARegister() const { 669 return Is64Bit ? X86::RIP // Should have dwarf #16. 670 : X86::EIP; // Should have dwarf #8. 671} 672 673unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 674 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 675 return TFI->hasFP(MF) ? FramePtr : StackPtr; 676} 677 678unsigned X86RegisterInfo::getEHExceptionRegister() const { 679 llvm_unreachable("What is the exception register"); 680 return 0; 681} 682 683unsigned X86RegisterInfo::getEHHandlerRegister() const { 684 llvm_unreachable("What is the exception handler register"); 685 return 0; 686} 687 688namespace llvm { 689unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 690 switch (VT.getSimpleVT().SimpleTy) { 691 default: return Reg; 692 case MVT::i8: 693 if (High) { 694 switch (Reg) { 695 default: return 0; 696 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 697 return X86::AH; 698 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 699 return X86::DH; 700 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 701 return X86::CH; 702 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 703 return X86::BH; 704 } 705 } else { 706 switch (Reg) { 707 default: return 0; 708 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 709 return X86::AL; 710 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 711 return X86::DL; 712 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 713 return X86::CL; 714 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 715 return X86::BL; 716 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 717 return X86::SIL; 718 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 719 return X86::DIL; 720 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 721 return X86::BPL; 722 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 723 return X86::SPL; 724 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 725 return X86::R8B; 726 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 727 return X86::R9B; 728 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 729 return X86::R10B; 730 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 731 return X86::R11B; 732 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 733 return X86::R12B; 734 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 735 return X86::R13B; 736 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 737 return X86::R14B; 738 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 739 return X86::R15B; 740 } 741 } 742 case MVT::i16: 743 switch (Reg) { 744 default: return Reg; 745 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 746 return X86::AX; 747 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 748 return X86::DX; 749 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 750 return X86::CX; 751 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 752 return X86::BX; 753 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 754 return X86::SI; 755 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 756 return X86::DI; 757 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 758 return X86::BP; 759 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 760 return X86::SP; 761 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 762 return X86::R8W; 763 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 764 return X86::R9W; 765 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 766 return X86::R10W; 767 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 768 return X86::R11W; 769 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 770 return X86::R12W; 771 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 772 return X86::R13W; 773 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 774 return X86::R14W; 775 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 776 return X86::R15W; 777 } 778 case MVT::i32: 779 switch (Reg) { 780 default: return Reg; 781 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 782 return X86::EAX; 783 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 784 return X86::EDX; 785 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 786 return X86::ECX; 787 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 788 return X86::EBX; 789 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 790 return X86::ESI; 791 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 792 return X86::EDI; 793 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 794 return X86::EBP; 795 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 796 return X86::ESP; 797 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 798 return X86::R8D; 799 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 800 return X86::R9D; 801 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 802 return X86::R10D; 803 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 804 return X86::R11D; 805 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 806 return X86::R12D; 807 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 808 return X86::R13D; 809 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 810 return X86::R14D; 811 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 812 return X86::R15D; 813 } 814 case MVT::i64: 815 switch (Reg) { 816 default: return Reg; 817 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 818 return X86::RAX; 819 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 820 return X86::RDX; 821 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 822 return X86::RCX; 823 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 824 return X86::RBX; 825 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 826 return X86::RSI; 827 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 828 return X86::RDI; 829 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 830 return X86::RBP; 831 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 832 return X86::RSP; 833 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 834 return X86::R8; 835 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 836 return X86::R9; 837 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 838 return X86::R10; 839 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 840 return X86::R11; 841 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 842 return X86::R12; 843 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 844 return X86::R13; 845 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 846 return X86::R14; 847 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 848 return X86::R15; 849 } 850 } 851 852 return Reg; 853} 854} 855 856#include "X86GenRegisterInfo.inc" 857 858namespace { 859 struct MSAH : public MachineFunctionPass { 860 static char ID; 861 MSAH() : MachineFunctionPass(ID) {} 862 863 virtual bool runOnMachineFunction(MachineFunction &MF) { 864 const X86TargetMachine *TM = 865 static_cast<const X86TargetMachine *>(&MF.getTarget()); 866 const X86RegisterInfo *X86RI = TM->getRegisterInfo(); 867 MachineRegisterInfo &RI = MF.getRegInfo(); 868 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 869 unsigned StackAlignment = X86RI->getStackAlignment(); 870 871 // Be over-conservative: scan over all vreg defs and find whether vector 872 // registers are used. If yes, there is a possibility that vector register 873 // will be spilled and thus require dynamic stack realignment. 874 for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) { 875 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 876 if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) { 877 FuncInfo->setReserveFP(true); 878 return true; 879 } 880 } 881 // Nothing to do 882 return false; 883 } 884 885 virtual const char *getPassName() const { 886 return "X86 Maximal Stack Alignment Check"; 887 } 888 889 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 890 AU.setPreservesCFG(); 891 MachineFunctionPass::getAnalysisUsage(AU); 892 } 893 }; 894 895 char MSAH::ID = 0; 896} 897 898FunctionPass* 899llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } 900