X86RegisterInfo.cpp revision bc57c6db4a3a1f5df4450d8dbb100e1eb6944c28
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/ErrorHandling.h" 41#include "llvm/Support/CommandLine.h" 42using namespace llvm; 43 44static cl::opt<bool> 45ForceStackAlign("force-align-stack", 46 cl::desc("Force align the stack to the minimum alignment" 47 " needed for the function."), 48 cl::init(false), cl::Hidden); 49 50X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 51 const TargetInstrInfo &tii) 52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 53 X86::ADJCALLSTACKDOWN64 : 54 X86::ADJCALLSTACKDOWN32, 55 tm.getSubtarget<X86Subtarget>().is64Bit() ? 56 X86::ADJCALLSTACKUP64 : 57 X86::ADJCALLSTACKUP32), 58 TM(tm), TII(tii) { 59 // Cache some information. 60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 61 Is64Bit = Subtarget->is64Bit(); 62 IsWin64 = Subtarget->isTargetWin64(); 63 StackAlign = TM.getFrameInfo()->getStackAlignment(); 64 65 if (Is64Bit) { 66 SlotSize = 8; 67 StackPtr = X86::RSP; 68 FramePtr = X86::RBP; 69 } else { 70 SlotSize = 4; 71 StackPtr = X86::ESP; 72 FramePtr = X86::EBP; 73 } 74} 75 76/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 77/// specific numbering, used in debug info and exception tables. 78int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 79 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 80 unsigned Flavour = DWARFFlavour::X86_64; 81 82 if (!Subtarget->is64Bit()) { 83 if (Subtarget->isTargetDarwin()) { 84 if (isEH) 85 Flavour = DWARFFlavour::X86_32_DarwinEH; 86 else 87 Flavour = DWARFFlavour::X86_32_Generic; 88 } else if (Subtarget->isTargetCygMing()) { 89 // Unsupported by now, just quick fallback 90 Flavour = DWARFFlavour::X86_32_Generic; 91 } else { 92 Flavour = DWARFFlavour::X86_32_Generic; 93 } 94 } 95 96 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 97} 98 99/// getX86RegNum - This function maps LLVM register identifiers to their X86 100/// specific numbering, which is used in various places encoding instructions. 101unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 102 switch(RegNo) { 103 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 104 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 105 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 106 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 107 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 108 return N86::ESP; 109 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 110 return N86::EBP; 111 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 112 return N86::ESI; 113 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 114 return N86::EDI; 115 116 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 117 return N86::EAX; 118 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 119 return N86::ECX; 120 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 121 return N86::EDX; 122 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 123 return N86::EBX; 124 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 125 return N86::ESP; 126 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 127 return N86::EBP; 128 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 129 return N86::ESI; 130 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 131 return N86::EDI; 132 133 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 134 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 135 return RegNo-X86::ST0; 136 137 case X86::XMM0: case X86::XMM8: 138 case X86::YMM0: case X86::YMM8: case X86::MM0: 139 return 0; 140 case X86::XMM1: case X86::XMM9: 141 case X86::YMM1: case X86::YMM9: case X86::MM1: 142 return 1; 143 case X86::XMM2: case X86::XMM10: 144 case X86::YMM2: case X86::YMM10: case X86::MM2: 145 return 2; 146 case X86::XMM3: case X86::XMM11: 147 case X86::YMM3: case X86::YMM11: case X86::MM3: 148 return 3; 149 case X86::XMM4: case X86::XMM12: 150 case X86::YMM4: case X86::YMM12: case X86::MM4: 151 return 4; 152 case X86::XMM5: case X86::XMM13: 153 case X86::YMM5: case X86::YMM13: case X86::MM5: 154 return 5; 155 case X86::XMM6: case X86::XMM14: 156 case X86::YMM6: case X86::YMM14: case X86::MM6: 157 return 6; 158 case X86::XMM7: case X86::XMM15: 159 case X86::YMM7: case X86::YMM15: case X86::MM7: 160 return 7; 161 162 case X86::ES: return 0; 163 case X86::CS: return 1; 164 case X86::SS: return 2; 165 case X86::DS: return 3; 166 case X86::FS: return 4; 167 case X86::GS: return 5; 168 169 case X86::CR0: case X86::CR8 : case X86::DR0: return 0; 170 case X86::CR1: case X86::CR9 : case X86::DR1: return 1; 171 case X86::CR2: case X86::CR10: case X86::DR2: return 2; 172 case X86::CR3: case X86::CR11: case X86::DR3: return 3; 173 case X86::CR4: case X86::CR12: case X86::DR4: return 4; 174 case X86::CR5: case X86::CR13: case X86::DR5: return 5; 175 case X86::CR6: case X86::CR14: case X86::DR6: return 6; 176 case X86::CR7: case X86::CR15: case X86::DR7: return 7; 177 178 // Pseudo index registers are equivalent to a "none" 179 // scaled index (See Intel Manual 2A, table 2-3) 180 case X86::EIZ: 181 case X86::RIZ: 182 return 4; 183 184 default: 185 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 186 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 187 return 0; 188 } 189} 190 191const TargetRegisterClass * 192X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 193 const TargetRegisterClass *B, 194 unsigned SubIdx) const { 195 switch (SubIdx) { 196 default: return 0; 197 case X86::sub_8bit: 198 if (B == &X86::GR8RegClass) { 199 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 200 return A; 201 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 202 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 203 A == &X86::GR64_NOREXRegClass || 204 A == &X86::GR64_NOSPRegClass || 205 A == &X86::GR64_NOREX_NOSPRegClass) 206 return &X86::GR64_ABCDRegClass; 207 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 208 A == &X86::GR32_NOREXRegClass || 209 A == &X86::GR32_NOSPRegClass) 210 return &X86::GR32_ABCDRegClass; 211 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 212 A == &X86::GR16_NOREXRegClass) 213 return &X86::GR16_ABCDRegClass; 214 } else if (B == &X86::GR8_NOREXRegClass) { 215 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 216 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 217 return &X86::GR64_NOREXRegClass; 218 else if (A == &X86::GR64_ABCDRegClass) 219 return &X86::GR64_ABCDRegClass; 220 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 221 A == &X86::GR32_NOSPRegClass) 222 return &X86::GR32_NOREXRegClass; 223 else if (A == &X86::GR32_ABCDRegClass) 224 return &X86::GR32_ABCDRegClass; 225 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 226 return &X86::GR16_NOREXRegClass; 227 else if (A == &X86::GR16_ABCDRegClass) 228 return &X86::GR16_ABCDRegClass; 229 } 230 break; 231 case X86::sub_8bit_hi: 232 if (B == &X86::GR8_ABCD_HRegClass) { 233 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 234 A == &X86::GR64_NOREXRegClass || 235 A == &X86::GR64_NOSPRegClass || 236 A == &X86::GR64_NOREX_NOSPRegClass) 237 return &X86::GR64_ABCDRegClass; 238 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 239 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 240 return &X86::GR32_ABCDRegClass; 241 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 242 A == &X86::GR16_NOREXRegClass) 243 return &X86::GR16_ABCDRegClass; 244 } 245 break; 246 case X86::sub_16bit: 247 if (B == &X86::GR16RegClass) { 248 if (A->getSize() == 4 || A->getSize() == 8) 249 return A; 250 } else if (B == &X86::GR16_ABCDRegClass) { 251 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 252 A == &X86::GR64_NOREXRegClass || 253 A == &X86::GR64_NOSPRegClass || 254 A == &X86::GR64_NOREX_NOSPRegClass) 255 return &X86::GR64_ABCDRegClass; 256 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 257 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 258 return &X86::GR32_ABCDRegClass; 259 } else if (B == &X86::GR16_NOREXRegClass) { 260 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 261 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 262 return &X86::GR64_NOREXRegClass; 263 else if (A == &X86::GR64_ABCDRegClass) 264 return &X86::GR64_ABCDRegClass; 265 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 266 A == &X86::GR32_NOSPRegClass) 267 return &X86::GR32_NOREXRegClass; 268 else if (A == &X86::GR32_ABCDRegClass) 269 return &X86::GR64_ABCDRegClass; 270 } 271 break; 272 case X86::sub_32bit: 273 if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) { 274 if (A->getSize() == 8) 275 return A; 276 } else if (B == &X86::GR32_ABCDRegClass) { 277 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 278 A == &X86::GR64_NOREXRegClass || 279 A == &X86::GR64_NOSPRegClass || 280 A == &X86::GR64_NOREX_NOSPRegClass) 281 return &X86::GR64_ABCDRegClass; 282 } else if (B == &X86::GR32_NOREXRegClass) { 283 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 284 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 285 return &X86::GR64_NOREXRegClass; 286 else if (A == &X86::GR64_ABCDRegClass) 287 return &X86::GR64_ABCDRegClass; 288 } 289 break; 290 case X86::sub_ss: 291 if (B == &X86::FR32RegClass) 292 return A; 293 break; 294 case X86::sub_sd: 295 if (B == &X86::FR64RegClass) 296 return A; 297 break; 298 case X86::sub_xmm: 299 if (B == &X86::VR128RegClass) 300 return A; 301 break; 302 } 303 return 0; 304} 305 306const TargetRegisterClass * 307X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 308 switch (Kind) { 309 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 310 case 0: // Normal GPRs. 311 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 312 return &X86::GR64RegClass; 313 return &X86::GR32RegClass; 314 case 1: // Normal GRPs except the stack pointer (for encoding reasons). 315 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 316 return &X86::GR64_NOSPRegClass; 317 return &X86::GR32_NOSPRegClass; 318 } 319} 320 321const TargetRegisterClass * 322X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 323 if (RC == &X86::CCRRegClass) { 324 if (Is64Bit) 325 return &X86::GR64RegClass; 326 else 327 return &X86::GR32RegClass; 328 } 329 return NULL; 330} 331 332const unsigned * 333X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 334 bool callsEHReturn = false; 335 bool ghcCall = false; 336 337 if (MF) { 338 callsEHReturn = MF->getMMI().callsEHReturn(); 339 const Function *F = MF->getFunction(); 340 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 341 } 342 343 static const unsigned GhcCalleeSavedRegs[] = { 344 0 345 }; 346 347 static const unsigned CalleeSavedRegs32Bit[] = { 348 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 349 }; 350 351 static const unsigned CalleeSavedRegs32EHRet[] = { 352 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 353 }; 354 355 static const unsigned CalleeSavedRegs64Bit[] = { 356 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 357 }; 358 359 static const unsigned CalleeSavedRegs64EHRet[] = { 360 X86::RAX, X86::RDX, X86::RBX, X86::R12, 361 X86::R13, X86::R14, X86::R15, X86::RBP, 0 362 }; 363 364 static const unsigned CalleeSavedRegsWin64[] = { 365 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 366 X86::R12, X86::R13, X86::R14, X86::R15, 367 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 368 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 369 X86::XMM14, X86::XMM15, 0 370 }; 371 372 if (ghcCall) { 373 return GhcCalleeSavedRegs; 374 } else if (Is64Bit) { 375 if (IsWin64) 376 return CalleeSavedRegsWin64; 377 else 378 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 379 } else { 380 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 381 } 382} 383 384BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 385 BitVector Reserved(getNumRegs()); 386 // Set the stack-pointer register and its aliases as reserved. 387 Reserved.set(X86::RSP); 388 Reserved.set(X86::ESP); 389 Reserved.set(X86::SP); 390 Reserved.set(X86::SPL); 391 392 // Set the instruction pointer register and its aliases as reserved. 393 Reserved.set(X86::RIP); 394 Reserved.set(X86::EIP); 395 Reserved.set(X86::IP); 396 397 // Set the frame-pointer register and its aliases as reserved if needed. 398 if (hasFP(MF)) { 399 Reserved.set(X86::RBP); 400 Reserved.set(X86::EBP); 401 Reserved.set(X86::BP); 402 Reserved.set(X86::BPL); 403 } 404 405 // Mark the x87 stack registers as reserved, since they don't behave normally 406 // with respect to liveness. We don't fully model the effects of x87 stack 407 // pushes and pops after stackification. 408 Reserved.set(X86::ST0); 409 Reserved.set(X86::ST1); 410 Reserved.set(X86::ST2); 411 Reserved.set(X86::ST3); 412 Reserved.set(X86::ST4); 413 Reserved.set(X86::ST5); 414 Reserved.set(X86::ST6); 415 Reserved.set(X86::ST7); 416 return Reserved; 417} 418 419//===----------------------------------------------------------------------===// 420// Stack Frame Processing methods 421//===----------------------------------------------------------------------===// 422 423/// hasFP - Return true if the specified function should have a dedicated frame 424/// pointer register. This is true if the function has variable sized allocas 425/// or if frame pointer elimination is disabled. 426bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 427 const MachineFrameInfo *MFI = MF.getFrameInfo(); 428 const MachineModuleInfo &MMI = MF.getMMI(); 429 430 return (DisableFramePointerElim(MF) || 431 needsStackRealignment(MF) || 432 MFI->hasVarSizedObjects() || 433 MFI->isFrameAddressTaken() || 434 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 435 MMI.callsUnwindInit()); 436} 437 438bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 439 const MachineFrameInfo *MFI = MF.getFrameInfo(); 440 return (RealignStack && 441 !MFI->hasVarSizedObjects()); 442} 443 444bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 445 const MachineFrameInfo *MFI = MF.getFrameInfo(); 446 const Function *F = MF.getFunction(); 447 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || 448 F->hasFnAttr(Attribute::StackAlignment)); 449 450 // FIXME: Currently we don't support stack realignment for functions with 451 // variable-sized allocas. 452 // FIXME: It's more complicated than this... 453 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 454 report_fatal_error( 455 "Stack realignment in presense of dynamic allocas is not supported"); 456 457 // If we've requested that we force align the stack do so now. 458 if (ForceStackAlign) 459 return canRealignStack(MF); 460 461 return requiresRealignment && canRealignStack(MF); 462} 463 464bool X86RegisterInfo::hasReservedCallFrame(const MachineFunction &MF) const { 465 return !MF.getFrameInfo()->hasVarSizedObjects(); 466} 467 468bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 469 unsigned Reg, int &FrameIdx) const { 470 if (Reg == FramePtr && hasFP(MF)) { 471 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 472 return true; 473 } 474 return false; 475} 476 477int 478X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 479 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 480 const MachineFrameInfo *MFI = MF.getFrameInfo(); 481 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea(); 482 uint64_t StackSize = MFI->getStackSize(); 483 484 if (needsStackRealignment(MF)) { 485 if (FI < 0) { 486 // Skip the saved EBP. 487 Offset += SlotSize; 488 } else { 489 unsigned Align = MFI->getObjectAlignment(FI); 490 assert((-(Offset + StackSize)) % Align == 0); 491 Align = 0; 492 return Offset + StackSize; 493 } 494 // FIXME: Support tail calls 495 } else { 496 if (!hasFP(MF)) 497 return Offset + StackSize; 498 499 // Skip the saved EBP. 500 Offset += SlotSize; 501 502 // Skip the RETADDR move area 503 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 504 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 505 if (TailCallReturnAddrDelta < 0) 506 Offset -= TailCallReturnAddrDelta; 507 } 508 509 return Offset; 510} 511 512static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 513 if (is64Bit) { 514 if (isInt<8>(Imm)) 515 return X86::SUB64ri8; 516 return X86::SUB64ri32; 517 } else { 518 if (isInt<8>(Imm)) 519 return X86::SUB32ri8; 520 return X86::SUB32ri; 521 } 522} 523 524static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 525 if (is64Bit) { 526 if (isInt<8>(Imm)) 527 return X86::ADD64ri8; 528 return X86::ADD64ri32; 529 } else { 530 if (isInt<8>(Imm)) 531 return X86::ADD32ri8; 532 return X86::ADD32ri; 533 } 534} 535 536void X86RegisterInfo:: 537eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 538 MachineBasicBlock::iterator I) const { 539 if (!hasReservedCallFrame(MF)) { 540 // If the stack pointer can be changed after prologue, turn the 541 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 542 // adjcallstackdown instruction into 'add ESP, <amt>' 543 // TODO: consider using push / pop instead of sub + store / add 544 MachineInstr *Old = I; 545 uint64_t Amount = Old->getOperand(0).getImm(); 546 if (Amount != 0) { 547 // We need to keep the stack aligned properly. To do this, we round the 548 // amount of space needed for the outgoing arguments up to the next 549 // alignment boundary. 550 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 551 552 MachineInstr *New = 0; 553 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 554 New = BuildMI(MF, Old->getDebugLoc(), 555 TII.get(getSUBriOpcode(Is64Bit, Amount)), 556 StackPtr) 557 .addReg(StackPtr) 558 .addImm(Amount); 559 } else { 560 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 561 562 // Factor out the amount the callee already popped. 563 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 564 Amount -= CalleeAmt; 565 566 if (Amount) { 567 unsigned Opc = getADDriOpcode(Is64Bit, Amount); 568 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 569 .addReg(StackPtr) 570 .addImm(Amount); 571 } 572 } 573 574 if (New) { 575 // The EFLAGS implicit def is dead. 576 New->getOperand(3).setIsDead(); 577 578 // Replace the pseudo instruction with a new instruction. 579 MBB.insert(I, New); 580 } 581 } 582 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 583 // If we are performing frame pointer elimination and if the callee pops 584 // something off the stack pointer, add it back. We do this until we have 585 // more advanced stack pointer tracking ability. 586 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 587 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt); 588 MachineInstr *Old = I; 589 MachineInstr *New = 590 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 591 StackPtr) 592 .addReg(StackPtr) 593 .addImm(CalleeAmt); 594 595 // The EFLAGS implicit def is dead. 596 New->getOperand(3).setIsDead(); 597 MBB.insert(I, New); 598 } 599 } 600 601 MBB.erase(I); 602} 603 604void 605X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 606 int SPAdj, RegScavenger *RS) const{ 607 assert(SPAdj == 0 && "Unexpected"); 608 609 unsigned i = 0; 610 MachineInstr &MI = *II; 611 MachineFunction &MF = *MI.getParent()->getParent(); 612 613 while (!MI.getOperand(i).isFI()) { 614 ++i; 615 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 616 } 617 618 int FrameIndex = MI.getOperand(i).getIndex(); 619 unsigned BasePtr; 620 621 unsigned Opc = MI.getOpcode(); 622 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 623 if (needsStackRealignment(MF)) 624 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 625 else if (AfterFPPop) 626 BasePtr = StackPtr; 627 else 628 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 629 630 // This must be part of a four operand memory reference. Replace the 631 // FrameIndex with base register with EBP. Add an offset to the offset. 632 MI.getOperand(i).ChangeToRegister(BasePtr, false); 633 634 // Now add the frame object offset to the offset from EBP. 635 int FIOffset; 636 if (AfterFPPop) { 637 // Tail call jmp happens after FP is popped. 638 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 639 const MachineFrameInfo *MFI = MF.getFrameInfo(); 640 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI.getOffsetOfLocalArea(); 641 } else 642 FIOffset = getFrameIndexOffset(MF, FrameIndex); 643 644 if (MI.getOperand(i+3).isImm()) { 645 // Offset is a 32-bit integer. 646 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm()); 647 MI.getOperand(i + 3).ChangeToImmediate(Offset); 648 } else { 649 // Offset is symbolic. This is extremely rare. 650 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); 651 MI.getOperand(i+3).setOffset(Offset); 652 } 653} 654 655void 656X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 657 RegScavenger *RS) const { 658 MachineFrameInfo *MFI = MF.getFrameInfo(); 659 660 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 661 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 662 663 if (TailCallReturnAddrDelta < 0) { 664 // create RETURNADDR area 665 // arg 666 // arg 667 // RETADDR 668 // { ... 669 // RETADDR area 670 // ... 671 // } 672 // [EBP] 673 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 674 (-1U*SlotSize)+TailCallReturnAddrDelta, true); 675 } 676 677 if (hasFP(MF)) { 678 assert((TailCallReturnAddrDelta <= 0) && 679 "The Delta should always be zero or negative"); 680 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 681 682 // Create a frame entry for the EBP register that must be saved. 683 int FrameIdx = MFI->CreateFixedObject(SlotSize, 684 -(int)SlotSize + 685 TFI.getOffsetOfLocalArea() + 686 TailCallReturnAddrDelta, 687 true); 688 assert(FrameIdx == MFI->getObjectIndexBegin() && 689 "Slot for EBP register must be last in order to be found!"); 690 FrameIdx = 0; 691 } 692} 693 694/// emitSPUpdate - Emit a series of instructions to increment / decrement the 695/// stack pointer by a constant value. 696static 697void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 698 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 699 const TargetInstrInfo &TII) { 700 bool isSub = NumBytes < 0; 701 uint64_t Offset = isSub ? -NumBytes : NumBytes; 702 unsigned Opc = isSub ? 703 getSUBriOpcode(Is64Bit, Offset) : 704 getADDriOpcode(Is64Bit, Offset); 705 uint64_t Chunk = (1LL << 31) - 1; 706 DebugLoc DL = MBB.findDebugLoc(MBBI); 707 708 while (Offset) { 709 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 710 MachineInstr *MI = 711 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 712 .addReg(StackPtr) 713 .addImm(ThisVal); 714 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 715 Offset -= ThisVal; 716 } 717} 718 719/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 720static 721void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 722 unsigned StackPtr, uint64_t *NumBytes = NULL) { 723 if (MBBI == MBB.begin()) return; 724 725 MachineBasicBlock::iterator PI = prior(MBBI); 726 unsigned Opc = PI->getOpcode(); 727 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 728 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 729 PI->getOperand(0).getReg() == StackPtr) { 730 if (NumBytes) 731 *NumBytes += PI->getOperand(2).getImm(); 732 MBB.erase(PI); 733 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 734 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 735 PI->getOperand(0).getReg() == StackPtr) { 736 if (NumBytes) 737 *NumBytes -= PI->getOperand(2).getImm(); 738 MBB.erase(PI); 739 } 740} 741 742/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator. 743static 744void mergeSPUpdatesDown(MachineBasicBlock &MBB, 745 MachineBasicBlock::iterator &MBBI, 746 unsigned StackPtr, uint64_t *NumBytes = NULL) { 747 // FIXME: THIS ISN'T RUN!!! 748 return; 749 750 if (MBBI == MBB.end()) return; 751 752 MachineBasicBlock::iterator NI = llvm::next(MBBI); 753 if (NI == MBB.end()) return; 754 755 unsigned Opc = NI->getOpcode(); 756 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 757 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 758 NI->getOperand(0).getReg() == StackPtr) { 759 if (NumBytes) 760 *NumBytes -= NI->getOperand(2).getImm(); 761 MBB.erase(NI); 762 MBBI = NI; 763 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 764 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 765 NI->getOperand(0).getReg() == StackPtr) { 766 if (NumBytes) 767 *NumBytes += NI->getOperand(2).getImm(); 768 MBB.erase(NI); 769 MBBI = NI; 770 } 771} 772 773/// mergeSPUpdates - Checks the instruction before/after the passed 774/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 775/// stack adjustment is returned as a positive value for ADD and a negative for 776/// SUB. 777static int mergeSPUpdates(MachineBasicBlock &MBB, 778 MachineBasicBlock::iterator &MBBI, 779 unsigned StackPtr, 780 bool doMergeWithPrevious) { 781 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 782 (!doMergeWithPrevious && MBBI == MBB.end())) 783 return 0; 784 785 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 786 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 787 unsigned Opc = PI->getOpcode(); 788 int Offset = 0; 789 790 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 791 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 792 PI->getOperand(0).getReg() == StackPtr){ 793 Offset += PI->getOperand(2).getImm(); 794 MBB.erase(PI); 795 if (!doMergeWithPrevious) MBBI = NI; 796 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 797 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 798 PI->getOperand(0).getReg() == StackPtr) { 799 Offset -= PI->getOperand(2).getImm(); 800 MBB.erase(PI); 801 if (!doMergeWithPrevious) MBBI = NI; 802 } 803 804 return Offset; 805} 806 807void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, 808 MCSymbol *Label, 809 unsigned FramePtr) const { 810 MachineFrameInfo *MFI = MF.getFrameInfo(); 811 MachineModuleInfo &MMI = MF.getMMI(); 812 813 // Add callee saved registers to move list. 814 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 815 if (CSI.empty()) return; 816 817 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 818 const TargetData *TD = MF.getTarget().getTargetData(); 819 bool HasFP = hasFP(MF); 820 821 // Calculate amount of bytes used for return address storing. 822 int stackGrowth = 823 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 824 TargetFrameInfo::StackGrowsUp ? 825 TD->getPointerSize() : -TD->getPointerSize()); 826 827 // FIXME: This is dirty hack. The code itself is pretty mess right now. 828 // It should be rewritten from scratch and generalized sometimes. 829 830 // Determine maximum offset (minumum due to stack growth). 831 int64_t MaxOffset = 0; 832 for (std::vector<CalleeSavedInfo>::const_iterator 833 I = CSI.begin(), E = CSI.end(); I != E; ++I) 834 MaxOffset = std::min(MaxOffset, 835 MFI->getObjectOffset(I->getFrameIdx())); 836 837 // Calculate offsets. 838 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 839 for (std::vector<CalleeSavedInfo>::const_iterator 840 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 841 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 842 unsigned Reg = I->getReg(); 843 Offset = MaxOffset - Offset + saveAreaOffset; 844 845 // Don't output a new machine move if we're re-saving the frame 846 // pointer. This happens when the PrologEpilogInserter has inserted an extra 847 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 848 // generates one when frame pointers are used. If we generate a "machine 849 // move" for this extra "PUSH", the linker will lose track of the fact that 850 // the frame pointer should have the value of the first "PUSH" when it's 851 // trying to unwind. 852 // 853 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 854 // another bug. I.e., one where we generate a prolog like this: 855 // 856 // pushl %ebp 857 // movl %esp, %ebp 858 // pushl %ebp 859 // pushl %esi 860 // ... 861 // 862 // The immediate re-push of EBP is unnecessary. At the least, it's an 863 // optimization bug. EBP can be used as a scratch register in certain 864 // cases, but probably not when we have a frame pointer. 865 if (HasFP && FramePtr == Reg) 866 continue; 867 868 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 869 MachineLocation CSSrc(Reg); 870 Moves.push_back(MachineMove(Label, CSDst, CSSrc)); 871 } 872} 873 874/// emitPrologue - Push callee-saved registers onto the stack, which 875/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 876/// space for local variables. Also emit labels used by the exception handler to 877/// generate the exception handling frames. 878void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 879 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 880 MachineBasicBlock::iterator MBBI = MBB.begin(); 881 MachineFrameInfo *MFI = MF.getFrameInfo(); 882 const Function *Fn = MF.getFunction(); 883 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 884 MachineModuleInfo &MMI = MF.getMMI(); 885 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 886 bool needsFrameMoves = MMI.hasDebugInfo() || 887 !Fn->doesNotThrow() || UnwindTablesMandatory; 888 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 889 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 890 bool HasFP = hasFP(MF); 891 DebugLoc DL; 892 893 // If we're forcing a stack realignment we can't rely on just the frame 894 // info, we need to know the ABI stack alignment as well in case we 895 // have a call out. Otherwise just make sure we have some alignment - we'll 896 // go with the minimum SlotSize. 897 if (ForceStackAlign) { 898 if (MFI->hasCalls()) 899 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 900 else if (MaxAlign < SlotSize) 901 MaxAlign = SlotSize; 902 } 903 904 // Add RETADDR move area to callee saved frame size. 905 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 906 if (TailCallReturnAddrDelta < 0) 907 X86FI->setCalleeSavedFrameSize( 908 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 909 910 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 911 // function, and use up to 128 bytes of stack space, don't have a frame 912 // pointer, calls, or dynamic alloca then we do not need to adjust the 913 // stack pointer (we fit in the Red Zone). 914 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 915 !needsStackRealignment(MF) && 916 !MFI->hasVarSizedObjects() && // No dynamic alloca. 917 !MFI->adjustsStack() && // No calls. 918 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone 919 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 920 if (HasFP) MinSize += SlotSize; 921 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 922 MFI->setStackSize(StackSize); 923 } else if (Subtarget->isTargetWin64()) { 924 // We need to always allocate 32 bytes as register spill area. 925 // FIXME: We might reuse these 32 bytes for leaf functions. 926 StackSize += 32; 927 MFI->setStackSize(StackSize); 928 } 929 930 // Insert stack pointer adjustment for later moving of return addr. Only 931 // applies to tail call optimized functions where the callee argument stack 932 // size is bigger than the callers. 933 if (TailCallReturnAddrDelta < 0) { 934 MachineInstr *MI = 935 BuildMI(MBB, MBBI, DL, 936 TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)), 937 StackPtr) 938 .addReg(StackPtr) 939 .addImm(-TailCallReturnAddrDelta); 940 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 941 } 942 943 // Mapping for machine moves: 944 // 945 // DST: VirtualFP AND 946 // SRC: VirtualFP => DW_CFA_def_cfa_offset 947 // ELSE => DW_CFA_def_cfa 948 // 949 // SRC: VirtualFP AND 950 // DST: Register => DW_CFA_def_cfa_register 951 // 952 // ELSE 953 // OFFSET < 0 => DW_CFA_offset_extended_sf 954 // REG < 64 => DW_CFA_offset + Reg 955 // ELSE => DW_CFA_offset_extended 956 957 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 958 const TargetData *TD = MF.getTarget().getTargetData(); 959 uint64_t NumBytes = 0; 960 int stackGrowth = -TD->getPointerSize(); 961 962 if (HasFP) { 963 // Calculate required stack adjustment. 964 uint64_t FrameSize = StackSize - SlotSize; 965 if (needsStackRealignment(MF)) 966 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 967 968 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 969 970 // Get the offset of the stack slot for the EBP register, which is 971 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 972 // Update the frame offset adjustment. 973 MFI->setOffsetAdjustment(-NumBytes); 974 975 // Save EBP/RBP into the appropriate stack slot. 976 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 977 .addReg(FramePtr, RegState::Kill); 978 979 if (needsFrameMoves) { 980 // Mark the place where EBP/RBP was saved. 981 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 982 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel); 983 984 // Define the current CFA rule to use the provided offset. 985 if (StackSize) { 986 MachineLocation SPDst(MachineLocation::VirtualFP); 987 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 988 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 989 } else { 990 // FIXME: Verify & implement for FP 991 MachineLocation SPDst(StackPtr); 992 MachineLocation SPSrc(StackPtr, stackGrowth); 993 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 994 } 995 996 // Change the rule for the FramePtr to be an "offset" rule. 997 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); 998 MachineLocation FPSrc(FramePtr); 999 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 1000 } 1001 1002 // Update EBP with the new base value... 1003 BuildMI(MBB, MBBI, DL, 1004 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 1005 .addReg(StackPtr); 1006 1007 if (needsFrameMoves) { 1008 // Mark effective beginning of when frame pointer becomes valid. 1009 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 1010 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel); 1011 1012 // Define the current CFA to use the EBP/RBP register. 1013 MachineLocation FPDst(FramePtr); 1014 MachineLocation FPSrc(MachineLocation::VirtualFP); 1015 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 1016 } 1017 1018 // Mark the FramePtr as live-in in every block except the entry. 1019 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 1020 I != E; ++I) 1021 I->addLiveIn(FramePtr); 1022 1023 // Realign stack 1024 if (needsStackRealignment(MF)) { 1025 MachineInstr *MI = 1026 BuildMI(MBB, MBBI, DL, 1027 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 1028 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 1029 1030 // The EFLAGS implicit def is dead. 1031 MI->getOperand(3).setIsDead(); 1032 } 1033 } else { 1034 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 1035 } 1036 1037 // Skip the callee-saved push instructions. 1038 bool PushedRegs = false; 1039 int StackOffset = 2 * stackGrowth; 1040 1041 while (MBBI != MBB.end() && 1042 (MBBI->getOpcode() == X86::PUSH32r || 1043 MBBI->getOpcode() == X86::PUSH64r)) { 1044 PushedRegs = true; 1045 ++MBBI; 1046 1047 if (!HasFP && needsFrameMoves) { 1048 // Mark callee-saved push instruction. 1049 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 1050 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); 1051 1052 // Define the current CFA rule to use the provided offset. 1053 unsigned Ptr = StackSize ? 1054 MachineLocation::VirtualFP : StackPtr; 1055 MachineLocation SPDst(Ptr); 1056 MachineLocation SPSrc(Ptr, StackOffset); 1057 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1058 StackOffset += stackGrowth; 1059 } 1060 } 1061 1062 DL = MBB.findDebugLoc(MBBI); 1063 1064 // Adjust stack pointer: ESP -= numbytes. 1065 1066 // Windows and cygwin/mingw require a prologue helper routine when allocating 1067 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 1068 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe 1069 // the stack and adjust the stack pointer in one go. The 64-bit version 1070 // of __chkstk is only responsible for probing the stack. The 64-bit 1071 // prologue is responsible for adjusting the stack pointer. Touching the 1072 // stack at 4K increments is necessary to ensure that the guard pages used 1073 // by the OS virtual memory manager are allocated in correct sequence. 1074 if (NumBytes >= 4096 && 1075 (Subtarget->isTargetCygMing() || Subtarget->isTargetWin32())) { 1076 // Check, whether EAX is livein for this function. 1077 bool isEAXAlive = false; 1078 for (MachineRegisterInfo::livein_iterator 1079 II = MF.getRegInfo().livein_begin(), 1080 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 1081 unsigned Reg = II->first; 1082 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1083 Reg == X86::AH || Reg == X86::AL); 1084 } 1085 1086 1087 const char *StackProbeSymbol = 1088 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 1089 if (!isEAXAlive) { 1090 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1091 .addImm(NumBytes); 1092 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1093 .addExternalSymbol(StackProbeSymbol) 1094 .addReg(StackPtr, RegState::Define | RegState::Implicit) 1095 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 1096 } else { 1097 // Save EAX 1098 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1099 .addReg(X86::EAX, RegState::Kill); 1100 1101 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1102 // allocated bytes for EAX. 1103 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1104 .addImm(NumBytes - 4); 1105 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1106 .addExternalSymbol(StackProbeSymbol) 1107 .addReg(StackPtr, RegState::Define | RegState::Implicit) 1108 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 1109 1110 // Restore EAX 1111 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 1112 X86::EAX), 1113 StackPtr, false, NumBytes - 4); 1114 MBB.insert(MBBI, MI); 1115 } 1116 } else if (NumBytes) { 1117 // If there is an SUB32ri of ESP immediately before this instruction, merge 1118 // the two. This can be the case when tail call elimination is enabled and 1119 // the callee has more arguments then the caller. 1120 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 1121 1122 // If there is an ADD32ri or SUB32ri of ESP immediately after this 1123 // instruction, merge the two instructions. 1124 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 1125 1126 if (NumBytes) 1127 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1128 } 1129 1130 if ((NumBytes || PushedRegs) && needsFrameMoves) { 1131 // Mark end of stack pointer adjustment. 1132 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 1133 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); 1134 1135 if (!HasFP && NumBytes) { 1136 // Define the current CFA rule to use the provided offset. 1137 if (StackSize) { 1138 MachineLocation SPDst(MachineLocation::VirtualFP); 1139 MachineLocation SPSrc(MachineLocation::VirtualFP, 1140 -StackSize + stackGrowth); 1141 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1142 } else { 1143 // FIXME: Verify & implement for FP 1144 MachineLocation SPDst(StackPtr); 1145 MachineLocation SPSrc(StackPtr, stackGrowth); 1146 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1147 } 1148 } 1149 1150 // Emit DWARF info specifying the offsets of the callee-saved registers. 1151 if (PushedRegs) 1152 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr); 1153 } 1154} 1155 1156void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1157 MachineBasicBlock &MBB) const { 1158 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1159 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1160 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1161 unsigned RetOpcode = MBBI->getOpcode(); 1162 DebugLoc DL = MBBI->getDebugLoc(); 1163 1164 switch (RetOpcode) { 1165 default: 1166 llvm_unreachable("Can only insert epilog into returning blocks"); 1167 case X86::RET: 1168 case X86::RETI: 1169 case X86::TCRETURNdi: 1170 case X86::TCRETURNri: 1171 case X86::TCRETURNmi: 1172 case X86::TCRETURNdi64: 1173 case X86::TCRETURNri64: 1174 case X86::TCRETURNmi64: 1175 case X86::EH_RETURN: 1176 case X86::EH_RETURN64: 1177 break; // These are ok 1178 } 1179 1180 // Get the number of bytes to allocate from the FrameInfo. 1181 uint64_t StackSize = MFI->getStackSize(); 1182 uint64_t MaxAlign = MFI->getMaxAlignment(); 1183 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1184 uint64_t NumBytes = 0; 1185 1186 // If we're forcing a stack realignment we can't rely on just the frame 1187 // info, we need to know the ABI stack alignment as well in case we 1188 // have a call out. Otherwise just make sure we have some alignment - we'll 1189 // go with the minimum. 1190 if (ForceStackAlign) { 1191 if (MFI->hasCalls()) 1192 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 1193 else 1194 MaxAlign = MaxAlign ? MaxAlign : 4; 1195 } 1196 1197 if (hasFP(MF)) { 1198 // Calculate required stack adjustment. 1199 uint64_t FrameSize = StackSize - SlotSize; 1200 if (needsStackRealignment(MF)) 1201 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 1202 1203 NumBytes = FrameSize - CSSize; 1204 1205 // Pop EBP. 1206 BuildMI(MBB, MBBI, DL, 1207 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1208 } else { 1209 NumBytes = StackSize - CSSize; 1210 } 1211 1212 // Skip the callee-saved pop instructions. 1213 MachineBasicBlock::iterator LastCSPop = MBBI; 1214 while (MBBI != MBB.begin()) { 1215 MachineBasicBlock::iterator PI = prior(MBBI); 1216 unsigned Opc = PI->getOpcode(); 1217 1218 if (Opc != X86::POP32r && Opc != X86::POP64r && 1219 !PI->getDesc().isTerminator()) 1220 break; 1221 1222 --MBBI; 1223 } 1224 1225 DL = MBBI->getDebugLoc(); 1226 1227 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1228 // instruction, merge the two instructions. 1229 if (NumBytes || MFI->hasVarSizedObjects()) 1230 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1231 1232 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1233 // slot before popping them off! Same applies for the case, when stack was 1234 // realigned. 1235 if (needsStackRealignment(MF)) { 1236 // We cannot use LEA here, because stack pointer was realigned. We need to 1237 // deallocate local frame back. 1238 if (CSSize) { 1239 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1240 MBBI = prior(LastCSPop); 1241 } 1242 1243 BuildMI(MBB, MBBI, DL, 1244 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1245 StackPtr).addReg(FramePtr); 1246 } else if (MFI->hasVarSizedObjects()) { 1247 if (CSSize) { 1248 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1249 MachineInstr *MI = 1250 addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1251 FramePtr, false, -CSSize); 1252 MBB.insert(MBBI, MI); 1253 } else { 1254 BuildMI(MBB, MBBI, DL, 1255 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1256 .addReg(FramePtr); 1257 } 1258 } else if (NumBytes) { 1259 // Adjust stack pointer back: ESP += numbytes. 1260 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1261 } 1262 1263 // We're returning from function via eh_return. 1264 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1265 MBBI = prior(MBB.end()); 1266 MachineOperand &DestAddr = MBBI->getOperand(0); 1267 assert(DestAddr.isReg() && "Offset should be in register!"); 1268 BuildMI(MBB, MBBI, DL, 1269 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1270 StackPtr).addReg(DestAddr.getReg()); 1271 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1272 RetOpcode == X86::TCRETURNmi || 1273 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 || 1274 RetOpcode == X86::TCRETURNmi64) { 1275 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64; 1276 // Tail call return: adjust the stack pointer and jump to callee. 1277 MBBI = prior(MBB.end()); 1278 MachineOperand &JumpTarget = MBBI->getOperand(0); 1279 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); 1280 assert(StackAdjust.isImm() && "Expecting immediate value."); 1281 1282 // Adjust stack pointer. 1283 int StackAdj = StackAdjust.getImm(); 1284 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1285 int Offset = 0; 1286 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1287 1288 // Incoporate the retaddr area. 1289 Offset = StackAdj-MaxTCDelta; 1290 assert(Offset >= 0 && "Offset should never be negative"); 1291 1292 if (Offset) { 1293 // Check for possible merge with preceeding ADD instruction. 1294 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1295 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 1296 } 1297 1298 // Jump to label or value in register. 1299 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) { 1300 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi) 1301 ? X86::TAILJMPd : X86::TAILJMPd64)). 1302 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1303 JumpTarget.getTargetFlags()); 1304 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) { 1305 MachineInstrBuilder MIB = 1306 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi) 1307 ? X86::TAILJMPm : X86::TAILJMPm64)); 1308 for (unsigned i = 0; i != 5; ++i) 1309 MIB.addOperand(MBBI->getOperand(i)); 1310 } else if (RetOpcode == X86::TCRETURNri64) { 1311 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)). 1312 addReg(JumpTarget.getReg(), RegState::Kill); 1313 } else { 1314 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)). 1315 addReg(JumpTarget.getReg(), RegState::Kill); 1316 } 1317 1318 MachineInstr *NewMI = prior(MBBI); 1319 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i) 1320 NewMI->addOperand(MBBI->getOperand(i)); 1321 1322 // Delete the pseudo instruction TCRETURN. 1323 MBB.erase(MBBI); 1324 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1325 (X86FI->getTCReturnAddrDelta() < 0)) { 1326 // Add the return addr area delta back since we are not tail calling. 1327 int delta = -1*X86FI->getTCReturnAddrDelta(); 1328 MBBI = prior(MBB.end()); 1329 1330 // Check for possible merge with preceeding ADD instruction. 1331 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1332 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 1333 } 1334} 1335 1336unsigned X86RegisterInfo::getRARegister() const { 1337 return Is64Bit ? X86::RIP // Should have dwarf #16. 1338 : X86::EIP; // Should have dwarf #8. 1339} 1340 1341unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1342 return hasFP(MF) ? FramePtr : StackPtr; 1343} 1344 1345void 1346X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const { 1347 // Calculate amount of bytes used for return address storing 1348 int stackGrowth = (Is64Bit ? -8 : -4); 1349 1350 // Initial state of the frame pointer is esp+stackGrowth. 1351 MachineLocation Dst(MachineLocation::VirtualFP); 1352 MachineLocation Src(StackPtr, stackGrowth); 1353 Moves.push_back(MachineMove(0, Dst, Src)); 1354 1355 // Add return address to move list 1356 MachineLocation CSDst(StackPtr, stackGrowth); 1357 MachineLocation CSSrc(getRARegister()); 1358 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1359} 1360 1361unsigned X86RegisterInfo::getEHExceptionRegister() const { 1362 llvm_unreachable("What is the exception register"); 1363 return 0; 1364} 1365 1366unsigned X86RegisterInfo::getEHHandlerRegister() const { 1367 llvm_unreachable("What is the exception handler register"); 1368 return 0; 1369} 1370 1371namespace llvm { 1372unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 1373 switch (VT.getSimpleVT().SimpleTy) { 1374 default: return Reg; 1375 case MVT::i8: 1376 if (High) { 1377 switch (Reg) { 1378 default: return 0; 1379 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1380 return X86::AH; 1381 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1382 return X86::DH; 1383 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1384 return X86::CH; 1385 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1386 return X86::BH; 1387 } 1388 } else { 1389 switch (Reg) { 1390 default: return 0; 1391 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1392 return X86::AL; 1393 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1394 return X86::DL; 1395 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1396 return X86::CL; 1397 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1398 return X86::BL; 1399 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1400 return X86::SIL; 1401 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1402 return X86::DIL; 1403 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1404 return X86::BPL; 1405 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1406 return X86::SPL; 1407 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1408 return X86::R8B; 1409 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1410 return X86::R9B; 1411 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1412 return X86::R10B; 1413 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1414 return X86::R11B; 1415 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1416 return X86::R12B; 1417 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1418 return X86::R13B; 1419 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1420 return X86::R14B; 1421 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1422 return X86::R15B; 1423 } 1424 } 1425 case MVT::i16: 1426 switch (Reg) { 1427 default: return Reg; 1428 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1429 return X86::AX; 1430 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1431 return X86::DX; 1432 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1433 return X86::CX; 1434 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1435 return X86::BX; 1436 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1437 return X86::SI; 1438 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1439 return X86::DI; 1440 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1441 return X86::BP; 1442 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1443 return X86::SP; 1444 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1445 return X86::R8W; 1446 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1447 return X86::R9W; 1448 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1449 return X86::R10W; 1450 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1451 return X86::R11W; 1452 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1453 return X86::R12W; 1454 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1455 return X86::R13W; 1456 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1457 return X86::R14W; 1458 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1459 return X86::R15W; 1460 } 1461 case MVT::i32: 1462 switch (Reg) { 1463 default: return Reg; 1464 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1465 return X86::EAX; 1466 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1467 return X86::EDX; 1468 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1469 return X86::ECX; 1470 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1471 return X86::EBX; 1472 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1473 return X86::ESI; 1474 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1475 return X86::EDI; 1476 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1477 return X86::EBP; 1478 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1479 return X86::ESP; 1480 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1481 return X86::R8D; 1482 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1483 return X86::R9D; 1484 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1485 return X86::R10D; 1486 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1487 return X86::R11D; 1488 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1489 return X86::R12D; 1490 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1491 return X86::R13D; 1492 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1493 return X86::R14D; 1494 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1495 return X86::R15D; 1496 } 1497 case MVT::i64: 1498 switch (Reg) { 1499 default: return Reg; 1500 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1501 return X86::RAX; 1502 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1503 return X86::RDX; 1504 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1505 return X86::RCX; 1506 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1507 return X86::RBX; 1508 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1509 return X86::RSI; 1510 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1511 return X86::RDI; 1512 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1513 return X86::RBP; 1514 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1515 return X86::RSP; 1516 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1517 return X86::R8; 1518 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1519 return X86::R9; 1520 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1521 return X86::R10; 1522 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1523 return X86::R11; 1524 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1525 return X86::R12; 1526 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1527 return X86::R13; 1528 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1529 return X86::R14; 1530 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1531 return X86::R15; 1532 } 1533 } 1534 1535 return Reg; 1536} 1537} 1538 1539#include "X86GenRegisterInfo.inc" 1540 1541namespace { 1542 struct MSAH : public MachineFunctionPass { 1543 static char ID; 1544 MSAH() : MachineFunctionPass(ID) {} 1545 1546 virtual bool runOnMachineFunction(MachineFunction &MF) { 1547 const X86TargetMachine *TM = 1548 static_cast<const X86TargetMachine *>(&MF.getTarget()); 1549 const X86RegisterInfo *X86RI = TM->getRegisterInfo(); 1550 MachineRegisterInfo &RI = MF.getRegInfo(); 1551 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1552 unsigned StackAlignment = X86RI->getStackAlignment(); 1553 1554 // Be over-conservative: scan over all vreg defs and find whether vector 1555 // registers are used. If yes, there is a possibility that vector register 1556 // will be spilled and thus require dynamic stack realignment. 1557 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; 1558 RegNum < RI.getLastVirtReg(); ++RegNum) 1559 if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) { 1560 FuncInfo->setReserveFP(true); 1561 return true; 1562 } 1563 1564 // Nothing to do 1565 return false; 1566 } 1567 1568 virtual const char *getPassName() const { 1569 return "X86 Maximal Stack Alignment Check"; 1570 } 1571 1572 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1573 AU.setPreservesCFG(); 1574 MachineFunctionPass::getAnalysisUsage(AU); 1575 } 1576 }; 1577 1578 char MSAH::ID = 0; 1579} 1580 1581FunctionPass* 1582llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } 1583