X86RegisterInfo.cpp revision d9e3385ced2dc887e2fe8e1c071bd2611e4d3ede
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/ErrorHandling.h" 41#include "llvm/Support/CommandLine.h" 42using namespace llvm; 43 44cl::opt<bool> 45ForceStackAlign("force-align-stack", 46 cl::desc("Force align the stack to the minimum alignment" 47 " needed for the function."), 48 cl::init(false), cl::Hidden); 49 50X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 51 const TargetInstrInfo &tii) 52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 53 X86::ADJCALLSTACKDOWN64 : 54 X86::ADJCALLSTACKDOWN32, 55 tm.getSubtarget<X86Subtarget>().is64Bit() ? 56 X86::ADJCALLSTACKUP64 : 57 X86::ADJCALLSTACKUP32), 58 TM(tm), TII(tii) { 59 // Cache some information. 60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 61 Is64Bit = Subtarget->is64Bit(); 62 IsWin64 = Subtarget->isTargetWin64(); 63 StackAlign = TM.getFrameInfo()->getStackAlignment(); 64 65 if (Is64Bit) { 66 SlotSize = 8; 67 StackPtr = X86::RSP; 68 FramePtr = X86::RBP; 69 } else { 70 SlotSize = 4; 71 StackPtr = X86::ESP; 72 FramePtr = X86::EBP; 73 } 74} 75 76/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 77/// specific numbering, used in debug info and exception tables. 78int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 79 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 80 unsigned Flavour = DWARFFlavour::X86_64; 81 82 if (!Subtarget->is64Bit()) { 83 if (Subtarget->isTargetDarwin()) { 84 if (isEH) 85 Flavour = DWARFFlavour::X86_32_DarwinEH; 86 else 87 Flavour = DWARFFlavour::X86_32_Generic; 88 } else if (Subtarget->isTargetCygMing()) { 89 // Unsupported by now, just quick fallback 90 Flavour = DWARFFlavour::X86_32_Generic; 91 } else { 92 Flavour = DWARFFlavour::X86_32_Generic; 93 } 94 } 95 96 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 97} 98 99/// getX86RegNum - This function maps LLVM register identifiers to their X86 100/// specific numbering, which is used in various places encoding instructions. 101unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 102 switch(RegNo) { 103 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 104 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 105 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 106 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 107 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 108 return N86::ESP; 109 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 110 return N86::EBP; 111 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 112 return N86::ESI; 113 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 114 return N86::EDI; 115 116 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 117 return N86::EAX; 118 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 119 return N86::ECX; 120 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 121 return N86::EDX; 122 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 123 return N86::EBX; 124 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 125 return N86::ESP; 126 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 127 return N86::EBP; 128 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 129 return N86::ESI; 130 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 131 return N86::EDI; 132 133 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 134 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 135 return RegNo-X86::ST0; 136 137 case X86::XMM0: case X86::XMM8: 138 case X86::YMM0: case X86::YMM8: case X86::MM0: 139 return 0; 140 case X86::XMM1: case X86::XMM9: 141 case X86::YMM1: case X86::YMM9: case X86::MM1: 142 return 1; 143 case X86::XMM2: case X86::XMM10: 144 case X86::YMM2: case X86::YMM10: case X86::MM2: 145 return 2; 146 case X86::XMM3: case X86::XMM11: 147 case X86::YMM3: case X86::YMM11: case X86::MM3: 148 return 3; 149 case X86::XMM4: case X86::XMM12: 150 case X86::YMM4: case X86::YMM12: case X86::MM4: 151 return 4; 152 case X86::XMM5: case X86::XMM13: 153 case X86::YMM5: case X86::YMM13: case X86::MM5: 154 return 5; 155 case X86::XMM6: case X86::XMM14: 156 case X86::YMM6: case X86::YMM14: case X86::MM6: 157 return 6; 158 case X86::XMM7: case X86::XMM15: 159 case X86::YMM7: case X86::YMM15: case X86::MM7: 160 return 7; 161 162 case X86::ES: return 0; 163 case X86::CS: return 1; 164 case X86::SS: return 2; 165 case X86::DS: return 3; 166 case X86::FS: return 4; 167 case X86::GS: return 5; 168 169 case X86::CR0: case X86::CR8 : case X86::DR0: return 0; 170 case X86::CR1: case X86::CR9 : case X86::DR1: return 1; 171 case X86::CR2: case X86::CR10: case X86::DR2: return 2; 172 case X86::CR3: case X86::CR11: case X86::DR3: return 3; 173 case X86::CR4: case X86::CR12: case X86::DR4: return 4; 174 case X86::CR5: case X86::CR13: case X86::DR5: return 5; 175 case X86::CR6: case X86::CR14: case X86::DR6: return 6; 176 case X86::CR7: case X86::CR15: case X86::DR7: return 7; 177 178 // Pseudo index registers are equivalent to a "none" 179 // scaled index (See Intel Manual 2A, table 2-3) 180 case X86::EIZ: 181 case X86::RIZ: 182 return 4; 183 184 default: 185 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 186 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 187 return 0; 188 } 189} 190 191const TargetRegisterClass * 192X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 193 const TargetRegisterClass *B, 194 unsigned SubIdx) const { 195 switch (SubIdx) { 196 default: return 0; 197 case X86::sub_8bit: 198 if (B == &X86::GR8RegClass) { 199 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 200 return A; 201 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 202 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 203 A == &X86::GR64_NOREXRegClass || 204 A == &X86::GR64_NOSPRegClass || 205 A == &X86::GR64_NOREX_NOSPRegClass) 206 return &X86::GR64_ABCDRegClass; 207 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 208 A == &X86::GR32_NOREXRegClass || 209 A == &X86::GR32_NOSPRegClass) 210 return &X86::GR32_ABCDRegClass; 211 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 212 A == &X86::GR16_NOREXRegClass) 213 return &X86::GR16_ABCDRegClass; 214 } else if (B == &X86::GR8_NOREXRegClass) { 215 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 216 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 217 return &X86::GR64_NOREXRegClass; 218 else if (A == &X86::GR64_ABCDRegClass) 219 return &X86::GR64_ABCDRegClass; 220 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 221 A == &X86::GR32_NOSPRegClass) 222 return &X86::GR32_NOREXRegClass; 223 else if (A == &X86::GR32_ABCDRegClass) 224 return &X86::GR32_ABCDRegClass; 225 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 226 return &X86::GR16_NOREXRegClass; 227 else if (A == &X86::GR16_ABCDRegClass) 228 return &X86::GR16_ABCDRegClass; 229 } 230 break; 231 case X86::sub_8bit_hi: 232 if (B == &X86::GR8_ABCD_HRegClass) { 233 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 234 A == &X86::GR64_NOREXRegClass || 235 A == &X86::GR64_NOSPRegClass || 236 A == &X86::GR64_NOREX_NOSPRegClass) 237 return &X86::GR64_ABCDRegClass; 238 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 239 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 240 return &X86::GR32_ABCDRegClass; 241 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 242 A == &X86::GR16_NOREXRegClass) 243 return &X86::GR16_ABCDRegClass; 244 } 245 break; 246 case X86::sub_16bit: 247 if (B == &X86::GR16RegClass) { 248 if (A->getSize() == 4 || A->getSize() == 8) 249 return A; 250 } else if (B == &X86::GR16_ABCDRegClass) { 251 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 252 A == &X86::GR64_NOREXRegClass || 253 A == &X86::GR64_NOSPRegClass || 254 A == &X86::GR64_NOREX_NOSPRegClass) 255 return &X86::GR64_ABCDRegClass; 256 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 257 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 258 return &X86::GR32_ABCDRegClass; 259 } else if (B == &X86::GR16_NOREXRegClass) { 260 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 261 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 262 return &X86::GR64_NOREXRegClass; 263 else if (A == &X86::GR64_ABCDRegClass) 264 return &X86::GR64_ABCDRegClass; 265 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 266 A == &X86::GR32_NOSPRegClass) 267 return &X86::GR32_NOREXRegClass; 268 else if (A == &X86::GR32_ABCDRegClass) 269 return &X86::GR64_ABCDRegClass; 270 } 271 break; 272 case X86::sub_32bit: 273 if (B == &X86::GR32RegClass) { 274 if (A->getSize() == 8) 275 return A; 276 } else if (B == &X86::GR32_NOSPRegClass) { 277 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass) 278 return &X86::GR64_NOSPRegClass; 279 if (A->getSize() == 8) 280 return getCommonSubClass(A, &X86::GR64_NOSPRegClass); 281 } else if (B == &X86::GR32_ABCDRegClass) { 282 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 283 A == &X86::GR64_NOREXRegClass || 284 A == &X86::GR64_NOSPRegClass || 285 A == &X86::GR64_NOREX_NOSPRegClass) 286 return &X86::GR64_ABCDRegClass; 287 } else if (B == &X86::GR32_NOREXRegClass) { 288 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 289 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 290 return &X86::GR64_NOREXRegClass; 291 else if (A == &X86::GR64_ABCDRegClass) 292 return &X86::GR64_ABCDRegClass; 293 } 294 break; 295 case X86::sub_ss: 296 if (B == &X86::FR32RegClass) 297 return A; 298 break; 299 case X86::sub_sd: 300 if (B == &X86::FR64RegClass) 301 return A; 302 break; 303 case X86::sub_xmm: 304 if (B == &X86::VR128RegClass) 305 return A; 306 break; 307 } 308 return 0; 309} 310 311const TargetRegisterClass * 312X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 313 switch (Kind) { 314 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 315 case 0: // Normal GPRs. 316 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 317 return &X86::GR64RegClass; 318 return &X86::GR32RegClass; 319 case 1: // Normal GRPs except the stack pointer (for encoding reasons). 320 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 321 return &X86::GR64_NOSPRegClass; 322 return &X86::GR32_NOSPRegClass; 323 } 324} 325 326const TargetRegisterClass * 327X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 328 if (RC == &X86::CCRRegClass) { 329 if (Is64Bit) 330 return &X86::GR64RegClass; 331 else 332 return &X86::GR32RegClass; 333 } 334 return NULL; 335} 336 337const unsigned * 338X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 339 bool callsEHReturn = false; 340 bool ghcCall = false; 341 342 if (MF) { 343 callsEHReturn = MF->getMMI().callsEHReturn(); 344 const Function *F = MF->getFunction(); 345 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 346 } 347 348 static const unsigned GhcCalleeSavedRegs[] = { 349 0 350 }; 351 352 static const unsigned CalleeSavedRegs32Bit[] = { 353 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 354 }; 355 356 static const unsigned CalleeSavedRegs32EHRet[] = { 357 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 358 }; 359 360 static const unsigned CalleeSavedRegs64Bit[] = { 361 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 362 }; 363 364 static const unsigned CalleeSavedRegs64EHRet[] = { 365 X86::RAX, X86::RDX, X86::RBX, X86::R12, 366 X86::R13, X86::R14, X86::R15, X86::RBP, 0 367 }; 368 369 static const unsigned CalleeSavedRegsWin64[] = { 370 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 371 X86::R12, X86::R13, X86::R14, X86::R15, 372 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 373 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 374 X86::XMM14, X86::XMM15, 0 375 }; 376 377 if (ghcCall) { 378 return GhcCalleeSavedRegs; 379 } else if (Is64Bit) { 380 if (IsWin64) 381 return CalleeSavedRegsWin64; 382 else 383 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 384 } else { 385 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 386 } 387} 388 389BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 390 BitVector Reserved(getNumRegs()); 391 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 392 393 // Set the stack-pointer register and its aliases as reserved. 394 Reserved.set(X86::RSP); 395 Reserved.set(X86::ESP); 396 Reserved.set(X86::SP); 397 Reserved.set(X86::SPL); 398 399 // Set the instruction pointer register and its aliases as reserved. 400 Reserved.set(X86::RIP); 401 Reserved.set(X86::EIP); 402 Reserved.set(X86::IP); 403 404 // Set the frame-pointer register and its aliases as reserved if needed. 405 if (TFI->hasFP(MF)) { 406 Reserved.set(X86::RBP); 407 Reserved.set(X86::EBP); 408 Reserved.set(X86::BP); 409 Reserved.set(X86::BPL); 410 } 411 412 // Mark the x87 stack registers as reserved, since they don't behave normally 413 // with respect to liveness. We don't fully model the effects of x87 stack 414 // pushes and pops after stackification. 415 Reserved.set(X86::ST0); 416 Reserved.set(X86::ST1); 417 Reserved.set(X86::ST2); 418 Reserved.set(X86::ST3); 419 Reserved.set(X86::ST4); 420 Reserved.set(X86::ST5); 421 Reserved.set(X86::ST6); 422 Reserved.set(X86::ST7); 423 return Reserved; 424} 425 426//===----------------------------------------------------------------------===// 427// Stack Frame Processing methods 428//===----------------------------------------------------------------------===// 429 430bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 431 const MachineFrameInfo *MFI = MF.getFrameInfo(); 432 return (RealignStack && 433 !MFI->hasVarSizedObjects()); 434} 435 436bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 437 const MachineFrameInfo *MFI = MF.getFrameInfo(); 438 const Function *F = MF.getFunction(); 439 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || 440 F->hasFnAttr(Attribute::StackAlignment)); 441 442 // FIXME: Currently we don't support stack realignment for functions with 443 // variable-sized allocas. 444 // FIXME: It's more complicated than this... 445 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 446 report_fatal_error( 447 "Stack realignment in presense of dynamic allocas is not supported"); 448 449 // If we've requested that we force align the stack do so now. 450 if (ForceStackAlign) 451 return canRealignStack(MF); 452 453 return requiresRealignment && canRealignStack(MF); 454} 455 456bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 457 unsigned Reg, int &FrameIdx) const { 458 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 459 460 if (Reg == FramePtr && TFI->hasFP(MF)) { 461 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 462 return true; 463 } 464 return false; 465} 466 467int 468X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 469 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 470 const MachineFrameInfo *MFI = MF.getFrameInfo(); 471 int Offset = MFI->getObjectOffset(FI) - TFI->getOffsetOfLocalArea(); 472 uint64_t StackSize = MFI->getStackSize(); 473 474 if (needsStackRealignment(MF)) { 475 if (FI < 0) { 476 // Skip the saved EBP. 477 Offset += SlotSize; 478 } else { 479 unsigned Align = MFI->getObjectAlignment(FI); 480 assert((-(Offset + StackSize)) % Align == 0); 481 Align = 0; 482 return Offset + StackSize; 483 } 484 // FIXME: Support tail calls 485 } else { 486 if (!TFI->hasFP(MF)) 487 return Offset + StackSize; 488 489 // Skip the saved EBP. 490 Offset += SlotSize; 491 492 // Skip the RETADDR move area 493 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 494 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 495 if (TailCallReturnAddrDelta < 0) 496 Offset -= TailCallReturnAddrDelta; 497 } 498 499 return Offset; 500} 501 502static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 503 if (is64Bit) { 504 if (isInt<8>(Imm)) 505 return X86::SUB64ri8; 506 return X86::SUB64ri32; 507 } else { 508 if (isInt<8>(Imm)) 509 return X86::SUB32ri8; 510 return X86::SUB32ri; 511 } 512} 513 514static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 515 if (is64Bit) { 516 if (isInt<8>(Imm)) 517 return X86::ADD64ri8; 518 return X86::ADD64ri32; 519 } else { 520 if (isInt<8>(Imm)) 521 return X86::ADD32ri8; 522 return X86::ADD32ri; 523 } 524} 525 526void X86RegisterInfo:: 527eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 528 MachineBasicBlock::iterator I) const { 529 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 530 531 if (!TFI->hasReservedCallFrame(MF)) { 532 // If the stack pointer can be changed after prologue, turn the 533 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 534 // adjcallstackdown instruction into 'add ESP, <amt>' 535 // TODO: consider using push / pop instead of sub + store / add 536 MachineInstr *Old = I; 537 uint64_t Amount = Old->getOperand(0).getImm(); 538 if (Amount != 0) { 539 // We need to keep the stack aligned properly. To do this, we round the 540 // amount of space needed for the outgoing arguments up to the next 541 // alignment boundary. 542 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 543 544 MachineInstr *New = 0; 545 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 546 New = BuildMI(MF, Old->getDebugLoc(), 547 TII.get(getSUBriOpcode(Is64Bit, Amount)), 548 StackPtr) 549 .addReg(StackPtr) 550 .addImm(Amount); 551 } else { 552 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 553 554 // Factor out the amount the callee already popped. 555 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 556 Amount -= CalleeAmt; 557 558 if (Amount) { 559 unsigned Opc = getADDriOpcode(Is64Bit, Amount); 560 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 561 .addReg(StackPtr) 562 .addImm(Amount); 563 } 564 } 565 566 if (New) { 567 // The EFLAGS implicit def is dead. 568 New->getOperand(3).setIsDead(); 569 570 // Replace the pseudo instruction with a new instruction. 571 MBB.insert(I, New); 572 } 573 } 574 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 575 // If we are performing frame pointer elimination and if the callee pops 576 // something off the stack pointer, add it back. We do this until we have 577 // more advanced stack pointer tracking ability. 578 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 579 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt); 580 MachineInstr *Old = I; 581 MachineInstr *New = 582 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 583 StackPtr) 584 .addReg(StackPtr) 585 .addImm(CalleeAmt); 586 587 // The EFLAGS implicit def is dead. 588 New->getOperand(3).setIsDead(); 589 MBB.insert(I, New); 590 } 591 } 592 593 MBB.erase(I); 594} 595 596void 597X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 598 int SPAdj, RegScavenger *RS) const{ 599 assert(SPAdj == 0 && "Unexpected"); 600 601 unsigned i = 0; 602 MachineInstr &MI = *II; 603 MachineFunction &MF = *MI.getParent()->getParent(); 604 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 605 606 while (!MI.getOperand(i).isFI()) { 607 ++i; 608 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 609 } 610 611 int FrameIndex = MI.getOperand(i).getIndex(); 612 unsigned BasePtr; 613 614 unsigned Opc = MI.getOpcode(); 615 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 616 if (needsStackRealignment(MF)) 617 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 618 else if (AfterFPPop) 619 BasePtr = StackPtr; 620 else 621 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 622 623 // This must be part of a four operand memory reference. Replace the 624 // FrameIndex with base register with EBP. Add an offset to the offset. 625 MI.getOperand(i).ChangeToRegister(BasePtr, false); 626 627 // Now add the frame object offset to the offset from EBP. 628 int FIOffset; 629 if (AfterFPPop) { 630 // Tail call jmp happens after FP is popped. 631 const MachineFrameInfo *MFI = MF.getFrameInfo(); 632 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 633 } else 634 FIOffset = getFrameIndexOffset(MF, FrameIndex); 635 636 if (MI.getOperand(i+3).isImm()) { 637 // Offset is a 32-bit integer. 638 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm()); 639 MI.getOperand(i + 3).ChangeToImmediate(Offset); 640 } else { 641 // Offset is symbolic. This is extremely rare. 642 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); 643 MI.getOperand(i+3).setOffset(Offset); 644 } 645} 646 647void 648X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 649 RegScavenger *RS) const { 650 MachineFrameInfo *MFI = MF.getFrameInfo(); 651 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 652 653 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 654 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 655 656 if (TailCallReturnAddrDelta < 0) { 657 // create RETURNADDR area 658 // arg 659 // arg 660 // RETADDR 661 // { ... 662 // RETADDR area 663 // ... 664 // } 665 // [EBP] 666 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 667 (-1U*SlotSize)+TailCallReturnAddrDelta, true); 668 } 669 670 if (TFI->hasFP(MF)) { 671 assert((TailCallReturnAddrDelta <= 0) && 672 "The Delta should always be zero or negative"); 673 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 674 675 // Create a frame entry for the EBP register that must be saved. 676 int FrameIdx = MFI->CreateFixedObject(SlotSize, 677 -(int)SlotSize + 678 TFI.getOffsetOfLocalArea() + 679 TailCallReturnAddrDelta, 680 true); 681 assert(FrameIdx == MFI->getObjectIndexBegin() && 682 "Slot for EBP register must be last in order to be found!"); 683 FrameIdx = 0; 684 } 685} 686 687unsigned X86RegisterInfo::getRARegister() const { 688 return Is64Bit ? X86::RIP // Should have dwarf #16. 689 : X86::EIP; // Should have dwarf #8. 690} 691 692unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 693 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 694 return TFI->hasFP(MF) ? FramePtr : StackPtr; 695} 696 697unsigned X86RegisterInfo::getEHExceptionRegister() const { 698 llvm_unreachable("What is the exception register"); 699 return 0; 700} 701 702unsigned X86RegisterInfo::getEHHandlerRegister() const { 703 llvm_unreachable("What is the exception handler register"); 704 return 0; 705} 706 707namespace llvm { 708unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 709 switch (VT.getSimpleVT().SimpleTy) { 710 default: return Reg; 711 case MVT::i8: 712 if (High) { 713 switch (Reg) { 714 default: return 0; 715 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 716 return X86::AH; 717 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 718 return X86::DH; 719 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 720 return X86::CH; 721 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 722 return X86::BH; 723 } 724 } else { 725 switch (Reg) { 726 default: return 0; 727 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 728 return X86::AL; 729 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 730 return X86::DL; 731 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 732 return X86::CL; 733 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 734 return X86::BL; 735 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 736 return X86::SIL; 737 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 738 return X86::DIL; 739 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 740 return X86::BPL; 741 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 742 return X86::SPL; 743 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 744 return X86::R8B; 745 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 746 return X86::R9B; 747 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 748 return X86::R10B; 749 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 750 return X86::R11B; 751 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 752 return X86::R12B; 753 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 754 return X86::R13B; 755 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 756 return X86::R14B; 757 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 758 return X86::R15B; 759 } 760 } 761 case MVT::i16: 762 switch (Reg) { 763 default: return Reg; 764 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 765 return X86::AX; 766 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 767 return X86::DX; 768 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 769 return X86::CX; 770 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 771 return X86::BX; 772 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 773 return X86::SI; 774 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 775 return X86::DI; 776 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 777 return X86::BP; 778 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 779 return X86::SP; 780 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 781 return X86::R8W; 782 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 783 return X86::R9W; 784 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 785 return X86::R10W; 786 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 787 return X86::R11W; 788 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 789 return X86::R12W; 790 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 791 return X86::R13W; 792 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 793 return X86::R14W; 794 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 795 return X86::R15W; 796 } 797 case MVT::i32: 798 switch (Reg) { 799 default: return Reg; 800 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 801 return X86::EAX; 802 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 803 return X86::EDX; 804 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 805 return X86::ECX; 806 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 807 return X86::EBX; 808 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 809 return X86::ESI; 810 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 811 return X86::EDI; 812 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 813 return X86::EBP; 814 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 815 return X86::ESP; 816 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 817 return X86::R8D; 818 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 819 return X86::R9D; 820 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 821 return X86::R10D; 822 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 823 return X86::R11D; 824 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 825 return X86::R12D; 826 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 827 return X86::R13D; 828 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 829 return X86::R14D; 830 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 831 return X86::R15D; 832 } 833 case MVT::i64: 834 switch (Reg) { 835 default: return Reg; 836 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 837 return X86::RAX; 838 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 839 return X86::RDX; 840 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 841 return X86::RCX; 842 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 843 return X86::RBX; 844 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 845 return X86::RSI; 846 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 847 return X86::RDI; 848 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 849 return X86::RBP; 850 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 851 return X86::RSP; 852 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 853 return X86::R8; 854 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 855 return X86::R9; 856 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 857 return X86::R10; 858 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 859 return X86::R11; 860 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 861 return X86::R12; 862 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 863 return X86::R13; 864 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 865 return X86::R14; 866 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 867 return X86::R15; 868 } 869 } 870 871 return Reg; 872} 873} 874 875#include "X86GenRegisterInfo.inc" 876 877namespace { 878 struct MSAH : public MachineFunctionPass { 879 static char ID; 880 MSAH() : MachineFunctionPass(ID) {} 881 882 virtual bool runOnMachineFunction(MachineFunction &MF) { 883 const X86TargetMachine *TM = 884 static_cast<const X86TargetMachine *>(&MF.getTarget()); 885 const X86RegisterInfo *X86RI = TM->getRegisterInfo(); 886 MachineRegisterInfo &RI = MF.getRegInfo(); 887 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 888 unsigned StackAlignment = X86RI->getStackAlignment(); 889 890 // Be over-conservative: scan over all vreg defs and find whether vector 891 // registers are used. If yes, there is a possibility that vector register 892 // will be spilled and thus require dynamic stack realignment. 893 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; 894 RegNum < RI.getLastVirtReg(); ++RegNum) 895 if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) { 896 FuncInfo->setReserveFP(true); 897 return true; 898 } 899 900 // Nothing to do 901 return false; 902 } 903 904 virtual const char *getPassName() const { 905 return "X86 Maximal Stack Alignment Check"; 906 } 907 908 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 909 AU.setPreservesCFG(); 910 MachineFunctionPass::getAnalysisUsage(AU); 911 } 912 }; 913 914 char MSAH::ID = 0; 915} 916 917FunctionPass* 918llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } 919