X86RegisterInfo.cpp revision 2fa82bc3da45d272f12a96a61074b637faa62e0b
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameLowering.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/ErrorHandling.h" 41#include "llvm/Support/CommandLine.h" 42using namespace llvm; 43 44cl::opt<bool> 45ForceStackAlign("force-align-stack", 46 cl::desc("Force align the stack to the minimum alignment" 47 " needed for the function."), 48 cl::init(false), cl::Hidden); 49 50X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 51 const TargetInstrInfo &tii) 52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 53 X86::ADJCALLSTACKDOWN64 : 54 X86::ADJCALLSTACKDOWN32, 55 tm.getSubtarget<X86Subtarget>().is64Bit() ? 56 X86::ADJCALLSTACKUP64 : 57 X86::ADJCALLSTACKUP32), 58 TM(tm), TII(tii) { 59 // Cache some information. 60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 61 Is64Bit = Subtarget->is64Bit(); 62 IsWin64 = Subtarget->isTargetWin64(); 63 64 if (Is64Bit) { 65 SlotSize = 8; 66 StackPtr = X86::RSP; 67 FramePtr = X86::RBP; 68 } else { 69 SlotSize = 4; 70 StackPtr = X86::ESP; 71 FramePtr = X86::EBP; 72 } 73} 74 75static unsigned getFlavour(const X86Subtarget *Subtarget, bool isEH) { 76 if (!Subtarget->is64Bit()) { 77 if (Subtarget->isTargetDarwin()) { 78 if (isEH) 79 return DWARFFlavour::X86_32_DarwinEH; 80 else 81 return DWARFFlavour::X86_32_Generic; 82 } else if (Subtarget->isTargetCygMing()) { 83 // Unsupported by now, just quick fallback 84 return DWARFFlavour::X86_32_Generic; 85 } else { 86 return DWARFFlavour::X86_32_Generic; 87 } 88 } 89 return DWARFFlavour::X86_64; 90} 91 92/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 93/// specific numbering, used in debug info and exception tables. 94int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 95 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 96 unsigned Flavour = getFlavour(Subtarget, isEH); 97 98 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 99} 100 101/// getLLVMRegNum - This function maps DWARF register numbers to LLVM register. 102int X86RegisterInfo::getLLVMRegNum(unsigned DwarfRegNo, bool isEH) const { 103 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 104 unsigned Flavour = getFlavour(Subtarget, isEH); 105 106 return X86GenRegisterInfo::getLLVMRegNumFull(DwarfRegNo, Flavour); 107} 108 109int 110X86RegisterInfo::getSEHRegNum(unsigned i) const { 111 int reg = getX86RegNum(i); 112 switch (i) { 113 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 114 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 115 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 116 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 117 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 118 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 119 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 120 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 121 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: 122 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: 123 case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11: 124 case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15: 125 reg += 8; 126 } 127 return reg; 128} 129 130/// getX86RegNum - This function maps LLVM register identifiers to their X86 131/// specific numbering, which is used in various places encoding instructions. 132unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 133 switch(RegNo) { 134 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 135 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 136 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 137 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 138 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 139 return N86::ESP; 140 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 141 return N86::EBP; 142 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 143 return N86::ESI; 144 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 145 return N86::EDI; 146 147 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 148 return N86::EAX; 149 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 150 return N86::ECX; 151 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 152 return N86::EDX; 153 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 154 return N86::EBX; 155 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 156 return N86::ESP; 157 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 158 return N86::EBP; 159 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 160 return N86::ESI; 161 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 162 return N86::EDI; 163 164 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 165 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 166 return RegNo-X86::ST0; 167 168 case X86::XMM0: case X86::XMM8: 169 case X86::YMM0: case X86::YMM8: case X86::MM0: 170 return 0; 171 case X86::XMM1: case X86::XMM9: 172 case X86::YMM1: case X86::YMM9: case X86::MM1: 173 return 1; 174 case X86::XMM2: case X86::XMM10: 175 case X86::YMM2: case X86::YMM10: case X86::MM2: 176 return 2; 177 case X86::XMM3: case X86::XMM11: 178 case X86::YMM3: case X86::YMM11: case X86::MM3: 179 return 3; 180 case X86::XMM4: case X86::XMM12: 181 case X86::YMM4: case X86::YMM12: case X86::MM4: 182 return 4; 183 case X86::XMM5: case X86::XMM13: 184 case X86::YMM5: case X86::YMM13: case X86::MM5: 185 return 5; 186 case X86::XMM6: case X86::XMM14: 187 case X86::YMM6: case X86::YMM14: case X86::MM6: 188 return 6; 189 case X86::XMM7: case X86::XMM15: 190 case X86::YMM7: case X86::YMM15: case X86::MM7: 191 return 7; 192 193 case X86::ES: return 0; 194 case X86::CS: return 1; 195 case X86::SS: return 2; 196 case X86::DS: return 3; 197 case X86::FS: return 4; 198 case X86::GS: return 5; 199 200 case X86::CR0: case X86::CR8 : case X86::DR0: return 0; 201 case X86::CR1: case X86::CR9 : case X86::DR1: return 1; 202 case X86::CR2: case X86::CR10: case X86::DR2: return 2; 203 case X86::CR3: case X86::CR11: case X86::DR3: return 3; 204 case X86::CR4: case X86::CR12: case X86::DR4: return 4; 205 case X86::CR5: case X86::CR13: case X86::DR5: return 5; 206 case X86::CR6: case X86::CR14: case X86::DR6: return 6; 207 case X86::CR7: case X86::CR15: case X86::DR7: return 7; 208 209 // Pseudo index registers are equivalent to a "none" 210 // scaled index (See Intel Manual 2A, table 2-3) 211 case X86::EIZ: 212 case X86::RIZ: 213 return 4; 214 215 default: 216 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 217 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 218 return 0; 219 } 220} 221 222const TargetRegisterClass * 223X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 224 const TargetRegisterClass *B, 225 unsigned SubIdx) const { 226 switch (SubIdx) { 227 default: return 0; 228 case X86::sub_8bit: 229 if (B == &X86::GR8RegClass) { 230 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 231 return A; 232 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 233 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 234 A == &X86::GR64_NOREXRegClass || 235 A == &X86::GR64_NOSPRegClass || 236 A == &X86::GR64_NOREX_NOSPRegClass) 237 return &X86::GR64_ABCDRegClass; 238 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 239 A == &X86::GR32_NOREXRegClass || 240 A == &X86::GR32_NOSPRegClass) 241 return &X86::GR32_ABCDRegClass; 242 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 243 A == &X86::GR16_NOREXRegClass) 244 return &X86::GR16_ABCDRegClass; 245 } else if (B == &X86::GR8_NOREXRegClass) { 246 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 247 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 248 return &X86::GR64_NOREXRegClass; 249 else if (A == &X86::GR64_ABCDRegClass) 250 return &X86::GR64_ABCDRegClass; 251 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 252 A == &X86::GR32_NOSPRegClass) 253 return &X86::GR32_NOREXRegClass; 254 else if (A == &X86::GR32_ABCDRegClass) 255 return &X86::GR32_ABCDRegClass; 256 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 257 return &X86::GR16_NOREXRegClass; 258 else if (A == &X86::GR16_ABCDRegClass) 259 return &X86::GR16_ABCDRegClass; 260 } 261 break; 262 case X86::sub_8bit_hi: 263 if (B->hasSubClassEq(&X86::GR8_ABCD_HRegClass)) 264 switch (A->getSize()) { 265 case 2: return getCommonSubClass(A, &X86::GR16_ABCDRegClass); 266 case 4: return getCommonSubClass(A, &X86::GR32_ABCDRegClass); 267 case 8: return getCommonSubClass(A, &X86::GR64_ABCDRegClass); 268 default: return 0; 269 } 270 break; 271 case X86::sub_16bit: 272 if (B == &X86::GR16RegClass) { 273 if (A->getSize() == 4 || A->getSize() == 8) 274 return A; 275 } else if (B == &X86::GR16_ABCDRegClass) { 276 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 277 A == &X86::GR64_NOREXRegClass || 278 A == &X86::GR64_NOSPRegClass || 279 A == &X86::GR64_NOREX_NOSPRegClass) 280 return &X86::GR64_ABCDRegClass; 281 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 282 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 283 return &X86::GR32_ABCDRegClass; 284 } else if (B == &X86::GR16_NOREXRegClass) { 285 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 286 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 287 return &X86::GR64_NOREXRegClass; 288 else if (A == &X86::GR64_ABCDRegClass) 289 return &X86::GR64_ABCDRegClass; 290 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 291 A == &X86::GR32_NOSPRegClass) 292 return &X86::GR32_NOREXRegClass; 293 else if (A == &X86::GR32_ABCDRegClass) 294 return &X86::GR64_ABCDRegClass; 295 } 296 break; 297 case X86::sub_32bit: 298 if (B == &X86::GR32RegClass) { 299 if (A->getSize() == 8) 300 return A; 301 } else if (B == &X86::GR32_NOSPRegClass) { 302 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass) 303 return &X86::GR64_NOSPRegClass; 304 if (A->getSize() == 8) 305 return getCommonSubClass(A, &X86::GR64_NOSPRegClass); 306 } else if (B == &X86::GR32_ABCDRegClass) { 307 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 308 A == &X86::GR64_NOREXRegClass || 309 A == &X86::GR64_NOSPRegClass || 310 A == &X86::GR64_NOREX_NOSPRegClass) 311 return &X86::GR64_ABCDRegClass; 312 } else if (B == &X86::GR32_NOREXRegClass) { 313 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass) 314 return &X86::GR64_NOREXRegClass; 315 else if (A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 316 return &X86::GR64_NOREX_NOSPRegClass; 317 else if (A == &X86::GR64_ABCDRegClass) 318 return &X86::GR64_ABCDRegClass; 319 } else if (B == &X86::GR32_NOREX_NOSPRegClass) { 320 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 321 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 322 return &X86::GR64_NOREX_NOSPRegClass; 323 else if (A == &X86::GR64_ABCDRegClass) 324 return &X86::GR64_ABCDRegClass; 325 } 326 break; 327 case X86::sub_ss: 328 if (B == &X86::FR32RegClass) 329 return A; 330 break; 331 case X86::sub_sd: 332 if (B == &X86::FR64RegClass) 333 return A; 334 break; 335 case X86::sub_xmm: 336 if (B == &X86::VR128RegClass) 337 return A; 338 break; 339 } 340 return 0; 341} 342 343const TargetRegisterClass* 344X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{ 345 const TargetRegisterClass *Super = RC; 346 TargetRegisterClass::sc_iterator I = RC->superclasses_begin(); 347 do { 348 switch (Super->getID()) { 349 case X86::GR8RegClassID: 350 case X86::GR16RegClassID: 351 case X86::GR32RegClassID: 352 case X86::GR64RegClassID: 353 case X86::FR32RegClassID: 354 case X86::FR64RegClassID: 355 case X86::RFP32RegClassID: 356 case X86::RFP64RegClassID: 357 case X86::RFP80RegClassID: 358 case X86::VR128RegClassID: 359 case X86::VR256RegClassID: 360 // Don't return a super-class that would shrink the spill size. 361 // That can happen with the vector and float classes. 362 if (Super->getSize() == RC->getSize()) 363 return Super; 364 } 365 Super = *I++; 366 } while (Super); 367 return RC; 368} 369 370const TargetRegisterClass * 371X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 372 switch (Kind) { 373 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 374 case 0: // Normal GPRs. 375 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 376 return &X86::GR64RegClass; 377 return &X86::GR32RegClass; 378 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 379 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 380 return &X86::GR64_NOSPRegClass; 381 return &X86::GR32_NOSPRegClass; 382 case 2: // Available for tailcall (not callee-saved GPRs). 383 if (TM.getSubtarget<X86Subtarget>().isTargetWin64()) 384 return &X86::GR64_TCW64RegClass; 385 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 386 return &X86::GR64_TCRegClass; 387 return &X86::GR32_TCRegClass; 388 } 389} 390 391const TargetRegisterClass * 392X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 393 if (RC == &X86::CCRRegClass) { 394 if (Is64Bit) 395 return &X86::GR64RegClass; 396 else 397 return &X86::GR32RegClass; 398 } 399 return RC; 400} 401 402unsigned 403X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 404 MachineFunction &MF) const { 405 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 406 407 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 408 switch (RC->getID()) { 409 default: 410 return 0; 411 case X86::GR32RegClassID: 412 return 4 - FPDiff; 413 case X86::GR64RegClassID: 414 return 12 - FPDiff; 415 case X86::VR128RegClassID: 416 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4; 417 case X86::VR64RegClassID: 418 return 4; 419 } 420} 421 422const unsigned * 423X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 424 bool callsEHReturn = false; 425 bool ghcCall = false; 426 427 if (MF) { 428 callsEHReturn = MF->getMMI().callsEHReturn(); 429 const Function *F = MF->getFunction(); 430 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 431 } 432 433 static const unsigned GhcCalleeSavedRegs[] = { 434 0 435 }; 436 437 static const unsigned CalleeSavedRegs32Bit[] = { 438 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 439 }; 440 441 static const unsigned CalleeSavedRegs32EHRet[] = { 442 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 443 }; 444 445 static const unsigned CalleeSavedRegs64Bit[] = { 446 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 447 }; 448 449 static const unsigned CalleeSavedRegs64EHRet[] = { 450 X86::RAX, X86::RDX, X86::RBX, X86::R12, 451 X86::R13, X86::R14, X86::R15, X86::RBP, 0 452 }; 453 454 static const unsigned CalleeSavedRegsWin64[] = { 455 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 456 X86::R12, X86::R13, X86::R14, X86::R15, 457 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 458 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 459 X86::XMM14, X86::XMM15, 0 460 }; 461 462 if (ghcCall) { 463 return GhcCalleeSavedRegs; 464 } else if (Is64Bit) { 465 if (IsWin64) 466 return CalleeSavedRegsWin64; 467 else 468 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 469 } else { 470 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 471 } 472} 473 474BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 475 BitVector Reserved(getNumRegs()); 476 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 477 478 // Set the stack-pointer register and its aliases as reserved. 479 Reserved.set(X86::RSP); 480 Reserved.set(X86::ESP); 481 Reserved.set(X86::SP); 482 Reserved.set(X86::SPL); 483 484 // Set the instruction pointer register and its aliases as reserved. 485 Reserved.set(X86::RIP); 486 Reserved.set(X86::EIP); 487 Reserved.set(X86::IP); 488 489 // Set the frame-pointer register and its aliases as reserved if needed. 490 if (TFI->hasFP(MF)) { 491 Reserved.set(X86::RBP); 492 Reserved.set(X86::EBP); 493 Reserved.set(X86::BP); 494 Reserved.set(X86::BPL); 495 } 496 497 // Mark the x87 stack registers as reserved, since they don't behave normally 498 // with respect to liveness. We don't fully model the effects of x87 stack 499 // pushes and pops after stackification. 500 Reserved.set(X86::ST0); 501 Reserved.set(X86::ST1); 502 Reserved.set(X86::ST2); 503 Reserved.set(X86::ST3); 504 Reserved.set(X86::ST4); 505 Reserved.set(X86::ST5); 506 Reserved.set(X86::ST6); 507 Reserved.set(X86::ST7); 508 509 // Mark the segment registers as reserved. 510 Reserved.set(X86::CS); 511 Reserved.set(X86::SS); 512 Reserved.set(X86::DS); 513 Reserved.set(X86::ES); 514 Reserved.set(X86::FS); 515 Reserved.set(X86::GS); 516 517 // Reserve the registers that only exist in 64-bit mode. 518 if (!Is64Bit) { 519 // These 8-bit registers are part of the x86-64 extension even though their 520 // super-registers are old 32-bits. 521 Reserved.set(X86::SIL); 522 Reserved.set(X86::DIL); 523 Reserved.set(X86::BPL); 524 Reserved.set(X86::SPL); 525 526 for (unsigned n = 0; n != 8; ++n) { 527 // R8, R9, ... 528 const unsigned GPR64[] = { 529 X86::R8, X86::R9, X86::R10, X86::R11, 530 X86::R12, X86::R13, X86::R14, X86::R15 531 }; 532 for (const unsigned *AI = getOverlaps(GPR64[n]); unsigned Reg = *AI; ++AI) 533 Reserved.set(Reg); 534 535 // XMM8, XMM9, ... 536 assert(X86::XMM15 == X86::XMM8+7); 537 for (const unsigned *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI; 538 ++AI) 539 Reserved.set(Reg); 540 } 541 } 542 543 return Reserved; 544} 545 546//===----------------------------------------------------------------------===// 547// Stack Frame Processing methods 548//===----------------------------------------------------------------------===// 549 550bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 551 const MachineFrameInfo *MFI = MF.getFrameInfo(); 552 return (RealignStack && 553 !MFI->hasVarSizedObjects()); 554} 555 556bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 557 const MachineFrameInfo *MFI = MF.getFrameInfo(); 558 const Function *F = MF.getFunction(); 559 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 560 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || 561 F->hasFnAttr(Attribute::StackAlignment)); 562 563 // FIXME: Currently we don't support stack realignment for functions with 564 // variable-sized allocas. 565 // FIXME: It's more complicated than this... 566 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 567 report_fatal_error( 568 "Stack realignment in presence of dynamic allocas is not supported"); 569 570 // If we've requested that we force align the stack do so now. 571 if (ForceStackAlign) 572 return canRealignStack(MF); 573 574 return requiresRealignment && canRealignStack(MF); 575} 576 577bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 578 unsigned Reg, int &FrameIdx) const { 579 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 580 581 if (Reg == FramePtr && TFI->hasFP(MF)) { 582 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 583 return true; 584 } 585 return false; 586} 587 588static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 589 if (is64Bit) { 590 if (isInt<8>(Imm)) 591 return X86::SUB64ri8; 592 return X86::SUB64ri32; 593 } else { 594 if (isInt<8>(Imm)) 595 return X86::SUB32ri8; 596 return X86::SUB32ri; 597 } 598} 599 600static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 601 if (is64Bit) { 602 if (isInt<8>(Imm)) 603 return X86::ADD64ri8; 604 return X86::ADD64ri32; 605 } else { 606 if (isInt<8>(Imm)) 607 return X86::ADD32ri8; 608 return X86::ADD32ri; 609 } 610} 611 612void X86RegisterInfo:: 613eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 614 MachineBasicBlock::iterator I) const { 615 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 616 bool reseveCallFrame = TFI->hasReservedCallFrame(MF); 617 int Opcode = I->getOpcode(); 618 bool isDestroy = Opcode == getCallFrameDestroyOpcode(); 619 DebugLoc DL = I->getDebugLoc(); 620 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0; 621 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0; 622 I = MBB.erase(I); 623 624 if (!reseveCallFrame) { 625 // If the stack pointer can be changed after prologue, turn the 626 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 627 // adjcallstackdown instruction into 'add ESP, <amt>' 628 // TODO: consider using push / pop instead of sub + store / add 629 if (Amount == 0) 630 return; 631 632 // We need to keep the stack aligned properly. To do this, we round the 633 // amount of space needed for the outgoing arguments up to the next 634 // alignment boundary. 635 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 636 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 637 638 MachineInstr *New = 0; 639 if (Opcode == getCallFrameSetupOpcode()) { 640 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)), 641 StackPtr) 642 .addReg(StackPtr) 643 .addImm(Amount); 644 } else { 645 assert(Opcode == getCallFrameDestroyOpcode()); 646 647 // Factor out the amount the callee already popped. 648 Amount -= CalleeAmt; 649 650 if (Amount) { 651 unsigned Opc = getADDriOpcode(Is64Bit, Amount); 652 New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 653 .addReg(StackPtr).addImm(Amount); 654 } 655 } 656 657 if (New) { 658 // The EFLAGS implicit def is dead. 659 New->getOperand(3).setIsDead(); 660 661 // Replace the pseudo instruction with a new instruction. 662 MBB.insert(I, New); 663 } 664 665 return; 666 } 667 668 if (Opcode == getCallFrameDestroyOpcode() && CalleeAmt) { 669 // If we are performing frame pointer elimination and if the callee pops 670 // something off the stack pointer, add it back. We do this until we have 671 // more advanced stack pointer tracking ability. 672 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt); 673 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 674 .addReg(StackPtr).addImm(CalleeAmt); 675 676 // The EFLAGS implicit def is dead. 677 New->getOperand(3).setIsDead(); 678 MBB.insert(I, New); 679 } 680} 681 682void 683X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 684 int SPAdj, RegScavenger *RS) const{ 685 assert(SPAdj == 0 && "Unexpected"); 686 687 unsigned i = 0; 688 MachineInstr &MI = *II; 689 MachineFunction &MF = *MI.getParent()->getParent(); 690 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 691 692 while (!MI.getOperand(i).isFI()) { 693 ++i; 694 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 695 } 696 697 int FrameIndex = MI.getOperand(i).getIndex(); 698 unsigned BasePtr; 699 700 unsigned Opc = MI.getOpcode(); 701 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 702 if (needsStackRealignment(MF)) 703 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 704 else if (AfterFPPop) 705 BasePtr = StackPtr; 706 else 707 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 708 709 // This must be part of a four operand memory reference. Replace the 710 // FrameIndex with base register with EBP. Add an offset to the offset. 711 MI.getOperand(i).ChangeToRegister(BasePtr, false); 712 713 // Now add the frame object offset to the offset from EBP. 714 int FIOffset; 715 if (AfterFPPop) { 716 // Tail call jmp happens after FP is popped. 717 const MachineFrameInfo *MFI = MF.getFrameInfo(); 718 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 719 } else 720 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 721 722 if (MI.getOperand(i+3).isImm()) { 723 // Offset is a 32-bit integer. 724 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm()); 725 MI.getOperand(i + 3).ChangeToImmediate(Offset); 726 } else { 727 // Offset is symbolic. This is extremely rare. 728 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); 729 MI.getOperand(i+3).setOffset(Offset); 730 } 731} 732 733unsigned X86RegisterInfo::getRARegister() const { 734 return Is64Bit ? X86::RIP // Should have dwarf #16. 735 : X86::EIP; // Should have dwarf #8. 736} 737 738unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 739 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 740 return TFI->hasFP(MF) ? FramePtr : StackPtr; 741} 742 743unsigned X86RegisterInfo::getEHExceptionRegister() const { 744 llvm_unreachable("What is the exception register"); 745 return 0; 746} 747 748unsigned X86RegisterInfo::getEHHandlerRegister() const { 749 llvm_unreachable("What is the exception handler register"); 750 return 0; 751} 752 753namespace llvm { 754unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 755 switch (VT.getSimpleVT().SimpleTy) { 756 default: return Reg; 757 case MVT::i8: 758 if (High) { 759 switch (Reg) { 760 default: return 0; 761 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 762 return X86::AH; 763 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 764 return X86::DH; 765 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 766 return X86::CH; 767 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 768 return X86::BH; 769 } 770 } else { 771 switch (Reg) { 772 default: return 0; 773 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 774 return X86::AL; 775 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 776 return X86::DL; 777 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 778 return X86::CL; 779 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 780 return X86::BL; 781 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 782 return X86::SIL; 783 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 784 return X86::DIL; 785 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 786 return X86::BPL; 787 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 788 return X86::SPL; 789 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 790 return X86::R8B; 791 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 792 return X86::R9B; 793 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 794 return X86::R10B; 795 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 796 return X86::R11B; 797 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 798 return X86::R12B; 799 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 800 return X86::R13B; 801 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 802 return X86::R14B; 803 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 804 return X86::R15B; 805 } 806 } 807 case MVT::i16: 808 switch (Reg) { 809 default: return Reg; 810 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 811 return X86::AX; 812 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 813 return X86::DX; 814 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 815 return X86::CX; 816 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 817 return X86::BX; 818 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 819 return X86::SI; 820 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 821 return X86::DI; 822 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 823 return X86::BP; 824 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 825 return X86::SP; 826 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 827 return X86::R8W; 828 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 829 return X86::R9W; 830 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 831 return X86::R10W; 832 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 833 return X86::R11W; 834 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 835 return X86::R12W; 836 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 837 return X86::R13W; 838 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 839 return X86::R14W; 840 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 841 return X86::R15W; 842 } 843 case MVT::i32: 844 switch (Reg) { 845 default: return Reg; 846 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 847 return X86::EAX; 848 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 849 return X86::EDX; 850 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 851 return X86::ECX; 852 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 853 return X86::EBX; 854 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 855 return X86::ESI; 856 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 857 return X86::EDI; 858 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 859 return X86::EBP; 860 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 861 return X86::ESP; 862 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 863 return X86::R8D; 864 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 865 return X86::R9D; 866 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 867 return X86::R10D; 868 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 869 return X86::R11D; 870 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 871 return X86::R12D; 872 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 873 return X86::R13D; 874 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 875 return X86::R14D; 876 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 877 return X86::R15D; 878 } 879 case MVT::i64: 880 switch (Reg) { 881 default: return Reg; 882 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 883 return X86::RAX; 884 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 885 return X86::RDX; 886 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 887 return X86::RCX; 888 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 889 return X86::RBX; 890 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 891 return X86::RSI; 892 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 893 return X86::RDI; 894 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 895 return X86::RBP; 896 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 897 return X86::RSP; 898 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 899 return X86::R8; 900 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 901 return X86::R9; 902 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 903 return X86::R10; 904 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 905 return X86::R11; 906 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 907 return X86::R12; 908 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 909 return X86::R13; 910 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 911 return X86::R14; 912 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 913 return X86::R15; 914 } 915 } 916 917 return Reg; 918} 919} 920 921#include "X86GenRegisterInfo.inc" 922 923namespace { 924 struct MSAH : public MachineFunctionPass { 925 static char ID; 926 MSAH() : MachineFunctionPass(ID) {} 927 928 virtual bool runOnMachineFunction(MachineFunction &MF) { 929 const X86TargetMachine *TM = 930 static_cast<const X86TargetMachine *>(&MF.getTarget()); 931 const TargetFrameLowering *TFI = TM->getFrameLowering(); 932 MachineRegisterInfo &RI = MF.getRegInfo(); 933 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 934 unsigned StackAlignment = TFI->getStackAlignment(); 935 936 // Be over-conservative: scan over all vreg defs and find whether vector 937 // registers are used. If yes, there is a possibility that vector register 938 // will be spilled and thus require dynamic stack realignment. 939 for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) { 940 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 941 if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) { 942 FuncInfo->setReserveFP(true); 943 return true; 944 } 945 } 946 // Nothing to do 947 return false; 948 } 949 950 virtual const char *getPassName() const { 951 return "X86 Maximal Stack Alignment Check"; 952 } 953 954 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 955 AU.setPreservesCFG(); 956 MachineFunctionPass::getAnalysisUsage(AU); 957 } 958 }; 959 960 char MSAH::ID = 0; 961} 962 963FunctionPass* 964llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } 965