X86RegisterInfo.cpp revision d33fa0f9e0c5ecfa5097c359bdfbb2a0f2895889
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/ErrorHandling.h" 41using namespace llvm; 42 43X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 44 const TargetInstrInfo &tii) 45 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 46 X86::ADJCALLSTACKDOWN64 : 47 X86::ADJCALLSTACKDOWN32, 48 tm.getSubtarget<X86Subtarget>().is64Bit() ? 49 X86::ADJCALLSTACKUP64 : 50 X86::ADJCALLSTACKUP32), 51 TM(tm), TII(tii) { 52 // Cache some information. 53 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 54 Is64Bit = Subtarget->is64Bit(); 55 IsWin64 = Subtarget->isTargetWin64(); 56 StackAlign = TM.getFrameInfo()->getStackAlignment(); 57 58 if (Is64Bit) { 59 SlotSize = 8; 60 StackPtr = X86::RSP; 61 FramePtr = X86::RBP; 62 } else { 63 SlotSize = 4; 64 StackPtr = X86::ESP; 65 FramePtr = X86::EBP; 66 } 67} 68 69/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 70/// specific numbering, used in debug info and exception tables. 71int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 72 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 73 unsigned Flavour = DWARFFlavour::X86_64; 74 75 if (!Subtarget->is64Bit()) { 76 if (Subtarget->isTargetDarwin()) { 77 if (isEH) 78 Flavour = DWARFFlavour::X86_32_DarwinEH; 79 else 80 Flavour = DWARFFlavour::X86_32_Generic; 81 } else if (Subtarget->isTargetCygMing()) { 82 // Unsupported by now, just quick fallback 83 Flavour = DWARFFlavour::X86_32_Generic; 84 } else { 85 Flavour = DWARFFlavour::X86_32_Generic; 86 } 87 } 88 89 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 90} 91 92/// getX86RegNum - This function maps LLVM register identifiers to their X86 93/// specific numbering, which is used in various places encoding instructions. 94unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 95 switch(RegNo) { 96 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 97 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 98 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 99 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 100 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 101 return N86::ESP; 102 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 103 return N86::EBP; 104 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 105 return N86::ESI; 106 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 107 return N86::EDI; 108 109 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 110 return N86::EAX; 111 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 112 return N86::ECX; 113 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 114 return N86::EDX; 115 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 116 return N86::EBX; 117 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 118 return N86::ESP; 119 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 120 return N86::EBP; 121 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 122 return N86::ESI; 123 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 124 return N86::EDI; 125 126 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 127 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 128 return RegNo-X86::ST0; 129 130 case X86::XMM0: case X86::XMM8: case X86::MM0: 131 return 0; 132 case X86::XMM1: case X86::XMM9: case X86::MM1: 133 return 1; 134 case X86::XMM2: case X86::XMM10: case X86::MM2: 135 return 2; 136 case X86::XMM3: case X86::XMM11: case X86::MM3: 137 return 3; 138 case X86::XMM4: case X86::XMM12: case X86::MM4: 139 return 4; 140 case X86::XMM5: case X86::XMM13: case X86::MM5: 141 return 5; 142 case X86::XMM6: case X86::XMM14: case X86::MM6: 143 return 6; 144 case X86::XMM7: case X86::XMM15: case X86::MM7: 145 return 7; 146 147 default: 148 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 149 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 150 return 0; 151 } 152} 153 154const TargetRegisterClass * 155X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 156 const TargetRegisterClass *B, 157 unsigned SubIdx) const { 158 switch (SubIdx) { 159 default: return 0; 160 case 1: 161 // 8-bit 162 if (B == &X86::GR8RegClass) { 163 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 164 return A; 165 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 166 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 167 A == &X86::GR64_NOREXRegClass || 168 A == &X86::GR64_NOSPRegClass || 169 A == &X86::GR64_NOREX_NOSPRegClass) 170 return &X86::GR64_ABCDRegClass; 171 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 172 A == &X86::GR32_NOREXRegClass || 173 A == &X86::GR32_NOSPRegClass) 174 return &X86::GR32_ABCDRegClass; 175 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 176 A == &X86::GR16_NOREXRegClass) 177 return &X86::GR16_ABCDRegClass; 178 } else if (B == &X86::GR8_NOREXRegClass) { 179 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 180 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 181 return &X86::GR64_NOREXRegClass; 182 else if (A == &X86::GR64_ABCDRegClass) 183 return &X86::GR64_ABCDRegClass; 184 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 185 A == &X86::GR32_NOSPRegClass) 186 return &X86::GR32_NOREXRegClass; 187 else if (A == &X86::GR32_ABCDRegClass) 188 return &X86::GR32_ABCDRegClass; 189 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 190 return &X86::GR16_NOREXRegClass; 191 else if (A == &X86::GR16_ABCDRegClass) 192 return &X86::GR16_ABCDRegClass; 193 } else if (B == &X86::FR32RegClass) { 194 return A; 195 } 196 break; 197 case 2: 198 // 8-bit hi 199 if (B == &X86::GR8_ABCD_HRegClass) { 200 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 201 A == &X86::GR64_NOREXRegClass || 202 A == &X86::GR64_NOSPRegClass || 203 A == &X86::GR64_NOREX_NOSPRegClass) 204 return &X86::GR64_ABCDRegClass; 205 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 206 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 207 return &X86::GR32_ABCDRegClass; 208 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 209 A == &X86::GR16_NOREXRegClass) 210 return &X86::GR16_ABCDRegClass; 211 } else if (B == &X86::FR64RegClass) { 212 return A; 213 } 214 break; 215 case 3: 216 // 16-bit 217 if (B == &X86::GR16RegClass) { 218 if (A->getSize() == 4 || A->getSize() == 8) 219 return A; 220 } else if (B == &X86::GR16_ABCDRegClass) { 221 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 222 A == &X86::GR64_NOREXRegClass || 223 A == &X86::GR64_NOSPRegClass || 224 A == &X86::GR64_NOREX_NOSPRegClass) 225 return &X86::GR64_ABCDRegClass; 226 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 227 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 228 return &X86::GR32_ABCDRegClass; 229 } else if (B == &X86::GR16_NOREXRegClass) { 230 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 231 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 232 return &X86::GR64_NOREXRegClass; 233 else if (A == &X86::GR64_ABCDRegClass) 234 return &X86::GR64_ABCDRegClass; 235 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 236 A == &X86::GR32_NOSPRegClass) 237 return &X86::GR32_NOREXRegClass; 238 else if (A == &X86::GR32_ABCDRegClass) 239 return &X86::GR64_ABCDRegClass; 240 } else if (B == &X86::VR128RegClass) { 241 return A; 242 } 243 break; 244 case 4: 245 // 32-bit 246 if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) { 247 if (A->getSize() == 8) 248 return A; 249 } else if (B == &X86::GR32_ABCDRegClass) { 250 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 251 A == &X86::GR64_NOREXRegClass || 252 A == &X86::GR64_NOSPRegClass || 253 A == &X86::GR64_NOREX_NOSPRegClass) 254 return &X86::GR64_ABCDRegClass; 255 } else if (B == &X86::GR32_NOREXRegClass) { 256 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 257 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 258 return &X86::GR64_NOREXRegClass; 259 else if (A == &X86::GR64_ABCDRegClass) 260 return &X86::GR64_ABCDRegClass; 261 } 262 break; 263 } 264 return 0; 265} 266 267const TargetRegisterClass * 268X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 269 switch (Kind) { 270 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 271 case 0: // Normal GPRs. 272 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 273 return &X86::GR64RegClass; 274 return &X86::GR32RegClass; 275 case 1: // Normal GRPs except the stack pointer (for encoding reasons). 276 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 277 return &X86::GR64_NOSPRegClass; 278 return &X86::GR32_NOSPRegClass; 279 } 280} 281 282const TargetRegisterClass * 283X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 284 if (RC == &X86::CCRRegClass) { 285 if (Is64Bit) 286 return &X86::GR64RegClass; 287 else 288 return &X86::GR32RegClass; 289 } 290 return NULL; 291} 292 293const unsigned * 294X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 295 bool callsEHReturn = false; 296 bool ghcCall = false; 297 298 if (MF) { 299 callsEHReturn = MF->getMMI().callsEHReturn(); 300 const Function *F = MF->getFunction(); 301 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 302 } 303 304 static const unsigned GhcCalleeSavedRegs[] = { 305 0 306 }; 307 308 static const unsigned CalleeSavedRegs32Bit[] = { 309 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 310 }; 311 312 static const unsigned CalleeSavedRegs32EHRet[] = { 313 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 314 }; 315 316 static const unsigned CalleeSavedRegs64Bit[] = { 317 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 318 }; 319 320 static const unsigned CalleeSavedRegs64EHRet[] = { 321 X86::RAX, X86::RDX, X86::RBX, X86::R12, 322 X86::R13, X86::R14, X86::R15, X86::RBP, 0 323 }; 324 325 static const unsigned CalleeSavedRegsWin64[] = { 326 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 327 X86::R12, X86::R13, X86::R14, X86::R15, 328 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 329 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 330 X86::XMM14, X86::XMM15, 0 331 }; 332 333 if (ghcCall) { 334 return GhcCalleeSavedRegs; 335 } else if (Is64Bit) { 336 if (IsWin64) 337 return CalleeSavedRegsWin64; 338 else 339 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 340 } else { 341 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 342 } 343} 344 345const TargetRegisterClass* const* 346X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 347 bool callsEHReturn = false; 348 if (MF) 349 callsEHReturn = MF->getMMI().callsEHReturn(); 350 351 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 352 &X86::GR32RegClass, &X86::GR32RegClass, 353 &X86::GR32RegClass, &X86::GR32RegClass, 0 354 }; 355 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 356 &X86::GR32RegClass, &X86::GR32RegClass, 357 &X86::GR32RegClass, &X86::GR32RegClass, 358 &X86::GR32RegClass, &X86::GR32RegClass, 0 359 }; 360 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 361 &X86::GR64RegClass, &X86::GR64RegClass, 362 &X86::GR64RegClass, &X86::GR64RegClass, 363 &X86::GR64RegClass, &X86::GR64RegClass, 0 364 }; 365 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = { 366 &X86::GR64RegClass, &X86::GR64RegClass, 367 &X86::GR64RegClass, &X86::GR64RegClass, 368 &X86::GR64RegClass, &X86::GR64RegClass, 369 &X86::GR64RegClass, &X86::GR64RegClass, 0 370 }; 371 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { 372 &X86::GR64RegClass, &X86::GR64RegClass, 373 &X86::GR64RegClass, &X86::GR64RegClass, 374 &X86::GR64RegClass, &X86::GR64RegClass, 375 &X86::GR64RegClass, &X86::GR64RegClass, 376 &X86::VR128RegClass, &X86::VR128RegClass, 377 &X86::VR128RegClass, &X86::VR128RegClass, 378 &X86::VR128RegClass, &X86::VR128RegClass, 379 &X86::VR128RegClass, &X86::VR128RegClass, 380 &X86::VR128RegClass, &X86::VR128RegClass, 0 381 }; 382 383 if (Is64Bit) { 384 if (IsWin64) 385 return CalleeSavedRegClassesWin64; 386 else 387 return (callsEHReturn ? 388 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit); 389 } else { 390 return (callsEHReturn ? 391 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit); 392 } 393} 394 395BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 396 BitVector Reserved(getNumRegs()); 397 // Set the stack-pointer register and its aliases as reserved. 398 Reserved.set(X86::RSP); 399 Reserved.set(X86::ESP); 400 Reserved.set(X86::SP); 401 Reserved.set(X86::SPL); 402 403 // Set the instruction pointer register and its aliases as reserved. 404 Reserved.set(X86::RIP); 405 Reserved.set(X86::EIP); 406 Reserved.set(X86::IP); 407 408 // Set the frame-pointer register and its aliases as reserved if needed. 409 if (hasFP(MF)) { 410 Reserved.set(X86::RBP); 411 Reserved.set(X86::EBP); 412 Reserved.set(X86::BP); 413 Reserved.set(X86::BPL); 414 } 415 416 // Mark the x87 stack registers as reserved, since they don't behave normally 417 // with respect to liveness. We don't fully model the effects of x87 stack 418 // pushes and pops after stackification. 419 Reserved.set(X86::ST0); 420 Reserved.set(X86::ST1); 421 Reserved.set(X86::ST2); 422 Reserved.set(X86::ST3); 423 Reserved.set(X86::ST4); 424 Reserved.set(X86::ST5); 425 Reserved.set(X86::ST6); 426 Reserved.set(X86::ST7); 427 return Reserved; 428} 429 430//===----------------------------------------------------------------------===// 431// Stack Frame Processing methods 432//===----------------------------------------------------------------------===// 433 434/// hasFP - Return true if the specified function should have a dedicated frame 435/// pointer register. This is true if the function has variable sized allocas 436/// or if frame pointer elimination is disabled. 437bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 438 const MachineFrameInfo *MFI = MF.getFrameInfo(); 439 const MachineModuleInfo &MMI = MF.getMMI(); 440 441 return (DisableFramePointerElim(MF) || 442 needsStackRealignment(MF) || 443 MFI->hasVarSizedObjects() || 444 MFI->isFrameAddressTaken() || 445 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 446 MMI.callsUnwindInit()); 447} 448 449bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 450 const MachineFrameInfo *MFI = MF.getFrameInfo(); 451 return (RealignStack && 452 !MFI->hasVarSizedObjects()); 453} 454 455bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 456 const MachineFrameInfo *MFI = MF.getFrameInfo(); 457 const Function *F = MF.getFunction(); 458 bool requiresRealignment = 459 RealignStack && ((MFI->getMaxAlignment() > StackAlign) || 460 F->hasFnAttr(Attribute::StackAlignment)); 461 462 // FIXME: Currently we don't support stack realignment for functions with 463 // variable-sized allocas. 464 // FIXME: Temporary disable the error - it seems to be too conservative. 465 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 466 report_fatal_error( 467 "Stack realignment in presense of dynamic allocas is not supported"); 468 469 return (requiresRealignment && !MFI->hasVarSizedObjects()); 470} 471 472bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 473 return !MF.getFrameInfo()->hasVarSizedObjects(); 474} 475 476bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, 477 int &FrameIdx) const { 478 if (Reg == FramePtr && hasFP(MF)) { 479 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 480 return true; 481 } 482 return false; 483} 484 485int 486X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 487 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 488 const MachineFrameInfo *MFI = MF.getFrameInfo(); 489 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea(); 490 uint64_t StackSize = MFI->getStackSize(); 491 492 if (needsStackRealignment(MF)) { 493 if (FI < 0) { 494 // Skip the saved EBP. 495 Offset += SlotSize; 496 } else { 497 unsigned Align = MFI->getObjectAlignment(FI); 498 assert((-(Offset + StackSize)) % Align == 0); 499 Align = 0; 500 return Offset + StackSize; 501 } 502 // FIXME: Support tail calls 503 } else { 504 if (!hasFP(MF)) 505 return Offset + StackSize; 506 507 // Skip the saved EBP. 508 Offset += SlotSize; 509 510 // Skip the RETADDR move area 511 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 512 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 513 if (TailCallReturnAddrDelta < 0) 514 Offset -= TailCallReturnAddrDelta; 515 } 516 517 return Offset; 518} 519 520void X86RegisterInfo:: 521eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 522 MachineBasicBlock::iterator I) const { 523 if (!hasReservedCallFrame(MF)) { 524 // If the stack pointer can be changed after prologue, turn the 525 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 526 // adjcallstackdown instruction into 'add ESP, <amt>' 527 // TODO: consider using push / pop instead of sub + store / add 528 MachineInstr *Old = I; 529 uint64_t Amount = Old->getOperand(0).getImm(); 530 if (Amount != 0) { 531 // We need to keep the stack aligned properly. To do this, we round the 532 // amount of space needed for the outgoing arguments up to the next 533 // alignment boundary. 534 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 535 536 MachineInstr *New = 0; 537 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 538 New = BuildMI(MF, Old->getDebugLoc(), 539 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), 540 StackPtr) 541 .addReg(StackPtr) 542 .addImm(Amount); 543 } else { 544 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 545 546 // Factor out the amount the callee already popped. 547 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 548 Amount -= CalleeAmt; 549 550 if (Amount) { 551 unsigned Opc = (Amount < 128) ? 552 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 553 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 554 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 555 .addReg(StackPtr) 556 .addImm(Amount); 557 } 558 } 559 560 if (New) { 561 // The EFLAGS implicit def is dead. 562 New->getOperand(3).setIsDead(); 563 564 // Replace the pseudo instruction with a new instruction. 565 MBB.insert(I, New); 566 } 567 } 568 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 569 // If we are performing frame pointer elimination and if the callee pops 570 // something off the stack pointer, add it back. We do this until we have 571 // more advanced stack pointer tracking ability. 572 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 573 unsigned Opc = (CalleeAmt < 128) ? 574 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 575 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 576 MachineInstr *Old = I; 577 MachineInstr *New = 578 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 579 StackPtr) 580 .addReg(StackPtr) 581 .addImm(CalleeAmt); 582 583 // The EFLAGS implicit def is dead. 584 New->getOperand(3).setIsDead(); 585 MBB.insert(I, New); 586 } 587 } 588 589 MBB.erase(I); 590} 591 592unsigned 593X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 594 int SPAdj, FrameIndexValue *Value, 595 RegScavenger *RS) const{ 596 assert(SPAdj == 0 && "Unexpected"); 597 598 unsigned i = 0; 599 MachineInstr &MI = *II; 600 MachineFunction &MF = *MI.getParent()->getParent(); 601 602 while (!MI.getOperand(i).isFI()) { 603 ++i; 604 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 605 } 606 607 int FrameIndex = MI.getOperand(i).getIndex(); 608 unsigned BasePtr; 609 610 unsigned Opc = MI.getOpcode(); 611 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 612 if (needsStackRealignment(MF)) 613 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 614 else if (AfterFPPop) 615 BasePtr = StackPtr; 616 else 617 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 618 619 // This must be part of a four operand memory reference. Replace the 620 // FrameIndex with base register with EBP. Add an offset to the offset. 621 MI.getOperand(i).ChangeToRegister(BasePtr, false); 622 623 // Now add the frame object offset to the offset from EBP. 624 int FIOffset; 625 if (AfterFPPop) { 626 // Tail call jmp happens after FP is popped. 627 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 628 const MachineFrameInfo *MFI = MF.getFrameInfo(); 629 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI.getOffsetOfLocalArea(); 630 } else 631 FIOffset = getFrameIndexOffset(MF, FrameIndex); 632 633 if (MI.getOperand(i+3).isImm()) { 634 // Offset is a 32-bit integer. 635 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm()); 636 MI.getOperand(i + 3).ChangeToImmediate(Offset); 637 } else { 638 // Offset is symbolic. This is extremely rare. 639 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); 640 MI.getOperand(i+3).setOffset(Offset); 641 } 642 return 0; 643} 644 645void 646X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 647 RegScavenger *RS) const { 648 MachineFrameInfo *MFI = MF.getFrameInfo(); 649 650 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 651 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 652 653 if (TailCallReturnAddrDelta < 0) { 654 // create RETURNADDR area 655 // arg 656 // arg 657 // RETADDR 658 // { ... 659 // RETADDR area 660 // ... 661 // } 662 // [EBP] 663 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 664 (-1U*SlotSize)+TailCallReturnAddrDelta, 665 true, false); 666 } 667 668 if (hasFP(MF)) { 669 assert((TailCallReturnAddrDelta <= 0) && 670 "The Delta should always be zero or negative"); 671 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 672 673 // Create a frame entry for the EBP register that must be saved. 674 int FrameIdx = MFI->CreateFixedObject(SlotSize, 675 -(int)SlotSize + 676 TFI.getOffsetOfLocalArea() + 677 TailCallReturnAddrDelta, 678 true, false); 679 assert(FrameIdx == MFI->getObjectIndexBegin() && 680 "Slot for EBP register must be last in order to be found!"); 681 FrameIdx = 0; 682 } 683} 684 685/// emitSPUpdate - Emit a series of instructions to increment / decrement the 686/// stack pointer by a constant value. 687static 688void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 689 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 690 const TargetInstrInfo &TII) { 691 bool isSub = NumBytes < 0; 692 uint64_t Offset = isSub ? -NumBytes : NumBytes; 693 unsigned Opc = isSub 694 ? ((Offset < 128) ? 695 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 696 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 697 : ((Offset < 128) ? 698 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 699 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 700 uint64_t Chunk = (1LL << 31) - 1; 701 DebugLoc DL = MBB.findDebugLoc(MBBI); 702 703 while (Offset) { 704 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 705 MachineInstr *MI = 706 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 707 .addReg(StackPtr) 708 .addImm(ThisVal); 709 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 710 Offset -= ThisVal; 711 } 712} 713 714/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 715static 716void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 717 unsigned StackPtr, uint64_t *NumBytes = NULL) { 718 if (MBBI == MBB.begin()) return; 719 720 MachineBasicBlock::iterator PI = prior(MBBI); 721 unsigned Opc = PI->getOpcode(); 722 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 723 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 724 PI->getOperand(0).getReg() == StackPtr) { 725 if (NumBytes) 726 *NumBytes += PI->getOperand(2).getImm(); 727 MBB.erase(PI); 728 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 729 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 730 PI->getOperand(0).getReg() == StackPtr) { 731 if (NumBytes) 732 *NumBytes -= PI->getOperand(2).getImm(); 733 MBB.erase(PI); 734 } 735} 736 737/// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 738static 739void mergeSPUpdatesDown(MachineBasicBlock &MBB, 740 MachineBasicBlock::iterator &MBBI, 741 unsigned StackPtr, uint64_t *NumBytes = NULL) { 742 // FIXME: THIS ISN'T RUN!!! 743 return; 744 745 if (MBBI == MBB.end()) return; 746 747 MachineBasicBlock::iterator NI = llvm::next(MBBI); 748 if (NI == MBB.end()) return; 749 750 unsigned Opc = NI->getOpcode(); 751 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 752 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 753 NI->getOperand(0).getReg() == StackPtr) { 754 if (NumBytes) 755 *NumBytes -= NI->getOperand(2).getImm(); 756 MBB.erase(NI); 757 MBBI = NI; 758 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 759 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 760 NI->getOperand(0).getReg() == StackPtr) { 761 if (NumBytes) 762 *NumBytes += NI->getOperand(2).getImm(); 763 MBB.erase(NI); 764 MBBI = NI; 765 } 766} 767 768/// mergeSPUpdates - Checks the instruction before/after the passed 769/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 770/// stack adjustment is returned as a positive value for ADD and a negative for 771/// SUB. 772static int mergeSPUpdates(MachineBasicBlock &MBB, 773 MachineBasicBlock::iterator &MBBI, 774 unsigned StackPtr, 775 bool doMergeWithPrevious) { 776 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 777 (!doMergeWithPrevious && MBBI == MBB.end())) 778 return 0; 779 780 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 781 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 782 unsigned Opc = PI->getOpcode(); 783 int Offset = 0; 784 785 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 786 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 787 PI->getOperand(0).getReg() == StackPtr){ 788 Offset += PI->getOperand(2).getImm(); 789 MBB.erase(PI); 790 if (!doMergeWithPrevious) MBBI = NI; 791 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 792 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 793 PI->getOperand(0).getReg() == StackPtr) { 794 Offset -= PI->getOperand(2).getImm(); 795 MBB.erase(PI); 796 if (!doMergeWithPrevious) MBBI = NI; 797 } 798 799 return Offset; 800} 801 802void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, 803 MCSymbol *Label, 804 unsigned FramePtr) const { 805 MachineFrameInfo *MFI = MF.getFrameInfo(); 806 MachineModuleInfo &MMI = MF.getMMI(); 807 808 // Add callee saved registers to move list. 809 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 810 if (CSI.empty()) return; 811 812 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 813 const TargetData *TD = MF.getTarget().getTargetData(); 814 bool HasFP = hasFP(MF); 815 816 // Calculate amount of bytes used for return address storing. 817 int stackGrowth = 818 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 819 TargetFrameInfo::StackGrowsUp ? 820 TD->getPointerSize() : -TD->getPointerSize()); 821 822 // FIXME: This is dirty hack. The code itself is pretty mess right now. 823 // It should be rewritten from scratch and generalized sometimes. 824 825 // Determine maximum offset (minumum due to stack growth). 826 int64_t MaxOffset = 0; 827 for (std::vector<CalleeSavedInfo>::const_iterator 828 I = CSI.begin(), E = CSI.end(); I != E; ++I) 829 MaxOffset = std::min(MaxOffset, 830 MFI->getObjectOffset(I->getFrameIdx())); 831 832 // Calculate offsets. 833 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 834 for (std::vector<CalleeSavedInfo>::const_iterator 835 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 836 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 837 unsigned Reg = I->getReg(); 838 Offset = MaxOffset - Offset + saveAreaOffset; 839 840 // Don't output a new machine move if we're re-saving the frame 841 // pointer. This happens when the PrologEpilogInserter has inserted an extra 842 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 843 // generates one when frame pointers are used. If we generate a "machine 844 // move" for this extra "PUSH", the linker will lose track of the fact that 845 // the frame pointer should have the value of the first "PUSH" when it's 846 // trying to unwind. 847 // 848 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 849 // another bug. I.e., one where we generate a prolog like this: 850 // 851 // pushl %ebp 852 // movl %esp, %ebp 853 // pushl %ebp 854 // pushl %esi 855 // ... 856 // 857 // The immediate re-push of EBP is unnecessary. At the least, it's an 858 // optimization bug. EBP can be used as a scratch register in certain 859 // cases, but probably not when we have a frame pointer. 860 if (HasFP && FramePtr == Reg) 861 continue; 862 863 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 864 MachineLocation CSSrc(Reg); 865 Moves.push_back(MachineMove(Label, CSDst, CSSrc)); 866 } 867} 868 869/// emitPrologue - Push callee-saved registers onto the stack, which 870/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 871/// space for local variables. Also emit labels used by the exception handler to 872/// generate the exception handling frames. 873void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 874 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 875 MachineBasicBlock::iterator MBBI = MBB.begin(); 876 MachineFrameInfo *MFI = MF.getFrameInfo(); 877 const Function *Fn = MF.getFunction(); 878 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 879 MachineModuleInfo &MMI = MF.getMMI(); 880 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 881 bool needsFrameMoves = MMI.hasDebugInfo() || 882 !Fn->doesNotThrow() || UnwindTablesMandatory; 883 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 884 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 885 bool HasFP = hasFP(MF); 886 DebugLoc DL; 887 888 // Add RETADDR move area to callee saved frame size. 889 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 890 if (TailCallReturnAddrDelta < 0) 891 X86FI->setCalleeSavedFrameSize( 892 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 893 894 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 895 // function, and use up to 128 bytes of stack space, don't have a frame 896 // pointer, calls, or dynamic alloca then we do not need to adjust the 897 // stack pointer (we fit in the Red Zone). 898 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 899 !needsStackRealignment(MF) && 900 !MFI->hasVarSizedObjects() && // No dynamic alloca. 901 !MFI->adjustsStack() && // Doesn't adjust stack. 902 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone 903 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 904 if (HasFP) MinSize += SlotSize; 905 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 906 MFI->setStackSize(StackSize); 907 } else if (Subtarget->isTargetWin64()) { 908 // We need to always allocate 32 bytes as register spill area. 909 // FIXME: We might reuse these 32 bytes for leaf functions. 910 StackSize += 32; 911 MFI->setStackSize(StackSize); 912 } 913 914 // Insert stack pointer adjustment for later moving of return addr. Only 915 // applies to tail call optimized functions where the callee argument stack 916 // size is bigger than the callers. 917 if (TailCallReturnAddrDelta < 0) { 918 MachineInstr *MI = 919 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 920 StackPtr) 921 .addReg(StackPtr) 922 .addImm(-TailCallReturnAddrDelta); 923 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 924 } 925 926 // Mapping for machine moves: 927 // 928 // DST: VirtualFP AND 929 // SRC: VirtualFP => DW_CFA_def_cfa_offset 930 // ELSE => DW_CFA_def_cfa 931 // 932 // SRC: VirtualFP AND 933 // DST: Register => DW_CFA_def_cfa_register 934 // 935 // ELSE 936 // OFFSET < 0 => DW_CFA_offset_extended_sf 937 // REG < 64 => DW_CFA_offset + Reg 938 // ELSE => DW_CFA_offset_extended 939 940 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 941 const TargetData *TD = MF.getTarget().getTargetData(); 942 uint64_t NumBytes = 0; 943 int stackGrowth = -TD->getPointerSize(); 944 945 if (HasFP) { 946 // Calculate required stack adjustment. 947 uint64_t FrameSize = StackSize - SlotSize; 948 if (needsStackRealignment(MF)) 949 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 950 951 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 952 953 // Get the offset of the stack slot for the EBP register, which is 954 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 955 // Update the frame offset adjustment. 956 MFI->setOffsetAdjustment(-NumBytes); 957 958 // Save EBP/RBP into the appropriate stack slot. 959 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 960 .addReg(FramePtr, RegState::Kill); 961 962 if (needsFrameMoves) { 963 // Mark the place where EBP/RBP was saved. 964 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 965 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel); 966 967 // Define the current CFA rule to use the provided offset. 968 if (StackSize) { 969 MachineLocation SPDst(MachineLocation::VirtualFP); 970 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 971 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 972 } else { 973 // FIXME: Verify & implement for FP 974 MachineLocation SPDst(StackPtr); 975 MachineLocation SPSrc(StackPtr, stackGrowth); 976 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 977 } 978 979 // Change the rule for the FramePtr to be an "offset" rule. 980 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); 981 MachineLocation FPSrc(FramePtr); 982 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 983 } 984 985 // Update EBP with the new base value... 986 BuildMI(MBB, MBBI, DL, 987 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 988 .addReg(StackPtr); 989 990 if (needsFrameMoves) { 991 // Mark effective beginning of when frame pointer becomes valid. 992 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 993 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel); 994 995 // Define the current CFA to use the EBP/RBP register. 996 MachineLocation FPDst(FramePtr); 997 MachineLocation FPSrc(MachineLocation::VirtualFP); 998 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 999 } 1000 1001 // Mark the FramePtr as live-in in every block except the entry. 1002 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 1003 I != E; ++I) 1004 I->addLiveIn(FramePtr); 1005 1006 // Realign stack 1007 if (needsStackRealignment(MF)) { 1008 MachineInstr *MI = 1009 BuildMI(MBB, MBBI, DL, 1010 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 1011 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 1012 1013 // The EFLAGS implicit def is dead. 1014 MI->getOperand(3).setIsDead(); 1015 } 1016 } else { 1017 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 1018 } 1019 1020 // Skip the callee-saved push instructions. 1021 bool PushedRegs = false; 1022 int StackOffset = 2 * stackGrowth; 1023 1024 while (MBBI != MBB.end() && 1025 (MBBI->getOpcode() == X86::PUSH32r || 1026 MBBI->getOpcode() == X86::PUSH64r)) { 1027 PushedRegs = true; 1028 ++MBBI; 1029 1030 if (!HasFP && needsFrameMoves) { 1031 // Mark callee-saved push instruction. 1032 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 1033 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label); 1034 1035 // Define the current CFA rule to use the provided offset. 1036 unsigned Ptr = StackSize ? 1037 MachineLocation::VirtualFP : StackPtr; 1038 MachineLocation SPDst(Ptr); 1039 MachineLocation SPSrc(Ptr, StackOffset); 1040 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1041 StackOffset += stackGrowth; 1042 } 1043 } 1044 1045 DL = MBB.findDebugLoc(MBBI); 1046 1047 // Adjust stack pointer: ESP -= numbytes. 1048 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 1049 // Check, whether EAX is livein for this function. 1050 bool isEAXAlive = false; 1051 for (MachineRegisterInfo::livein_iterator 1052 II = MF.getRegInfo().livein_begin(), 1053 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 1054 unsigned Reg = II->first; 1055 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1056 Reg == X86::AH || Reg == X86::AL); 1057 } 1058 1059 // Function prologue calls _alloca to probe the stack when allocating more 1060 // than 4k bytes in one go. Touching the stack at 4K increments is necessary 1061 // to ensure that the guard pages used by the OS virtual memory manager are 1062 // allocated in correct sequence. 1063 if (!isEAXAlive) { 1064 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1065 .addImm(NumBytes); 1066 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1067 .addExternalSymbol("_alloca") 1068 .addReg(StackPtr, RegState::Define | RegState::Implicit); 1069 } else { 1070 // Save EAX 1071 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1072 .addReg(X86::EAX, RegState::Kill); 1073 1074 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1075 // allocated bytes for EAX. 1076 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1077 .addImm(NumBytes - 4); 1078 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1079 .addExternalSymbol("_alloca") 1080 .addReg(StackPtr, RegState::Define | RegState::Implicit); 1081 1082 // Restore EAX 1083 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 1084 X86::EAX), 1085 StackPtr, false, NumBytes - 4); 1086 MBB.insert(MBBI, MI); 1087 } 1088 } else if (NumBytes) { 1089 // If there is an SUB32ri of ESP immediately before this instruction, merge 1090 // the two. This can be the case when tail call elimination is enabled and 1091 // the callee has more arguments then the caller. 1092 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 1093 1094 // If there is an ADD32ri or SUB32ri of ESP immediately after this 1095 // instruction, merge the two instructions. 1096 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 1097 1098 if (NumBytes) 1099 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1100 } 1101 1102 if ((NumBytes || PushedRegs) && needsFrameMoves) { 1103 // Mark end of stack pointer adjustment. 1104 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 1105 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label); 1106 1107 if (!HasFP && NumBytes) { 1108 // Define the current CFA rule to use the provided offset. 1109 if (StackSize) { 1110 MachineLocation SPDst(MachineLocation::VirtualFP); 1111 MachineLocation SPSrc(MachineLocation::VirtualFP, 1112 -StackSize + stackGrowth); 1113 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1114 } else { 1115 // FIXME: Verify & implement for FP 1116 MachineLocation SPDst(StackPtr); 1117 MachineLocation SPSrc(StackPtr, stackGrowth); 1118 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1119 } 1120 } 1121 1122 // Emit DWARF info specifying the offsets of the callee-saved registers. 1123 if (PushedRegs) 1124 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr); 1125 } 1126} 1127 1128void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1129 MachineBasicBlock &MBB) const { 1130 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1131 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1132 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1133 unsigned RetOpcode = MBBI->getOpcode(); 1134 DebugLoc DL = MBBI->getDebugLoc(); 1135 1136 switch (RetOpcode) { 1137 default: 1138 llvm_unreachable("Can only insert epilog into returning blocks"); 1139 case X86::RET: 1140 case X86::RETI: 1141 case X86::TCRETURNdi: 1142 case X86::TCRETURNri: 1143 case X86::TCRETURNmi: 1144 case X86::TCRETURNdi64: 1145 case X86::TCRETURNri64: 1146 case X86::TCRETURNmi64: 1147 case X86::EH_RETURN: 1148 case X86::EH_RETURN64: 1149 break; // These are ok 1150 } 1151 1152 // Get the number of bytes to allocate from the FrameInfo. 1153 uint64_t StackSize = MFI->getStackSize(); 1154 uint64_t MaxAlign = MFI->getMaxAlignment(); 1155 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1156 uint64_t NumBytes = 0; 1157 1158 if (hasFP(MF)) { 1159 // Calculate required stack adjustment. 1160 uint64_t FrameSize = StackSize - SlotSize; 1161 if (needsStackRealignment(MF)) 1162 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 1163 1164 NumBytes = FrameSize - CSSize; 1165 1166 // Pop EBP. 1167 BuildMI(MBB, MBBI, DL, 1168 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1169 } else { 1170 NumBytes = StackSize - CSSize; 1171 } 1172 1173 // Skip the callee-saved pop instructions. 1174 MachineBasicBlock::iterator LastCSPop = MBBI; 1175 while (MBBI != MBB.begin()) { 1176 MachineBasicBlock::iterator PI = prior(MBBI); 1177 unsigned Opc = PI->getOpcode(); 1178 1179 if (Opc != X86::POP32r && Opc != X86::POP64r && 1180 !PI->getDesc().isTerminator()) 1181 break; 1182 1183 --MBBI; 1184 } 1185 1186 DL = MBBI->getDebugLoc(); 1187 1188 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1189 // instruction, merge the two instructions. 1190 if (NumBytes || MFI->hasVarSizedObjects()) 1191 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1192 1193 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1194 // slot before popping them off! Same applies for the case, when stack was 1195 // realigned. 1196 if (needsStackRealignment(MF)) { 1197 // We cannot use LEA here, because stack pointer was realigned. We need to 1198 // deallocate local frame back. 1199 if (CSSize) { 1200 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1201 MBBI = prior(LastCSPop); 1202 } 1203 1204 BuildMI(MBB, MBBI, DL, 1205 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1206 StackPtr).addReg(FramePtr); 1207 } else if (MFI->hasVarSizedObjects()) { 1208 if (CSSize) { 1209 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1210 MachineInstr *MI = 1211 addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1212 FramePtr, false, -CSSize); 1213 MBB.insert(MBBI, MI); 1214 } else { 1215 BuildMI(MBB, MBBI, DL, 1216 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1217 .addReg(FramePtr); 1218 } 1219 } else if (NumBytes) { 1220 // Adjust stack pointer back: ESP += numbytes. 1221 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1222 } 1223 1224 // We're returning from function via eh_return. 1225 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1226 MBBI = prior(MBB.end()); 1227 MachineOperand &DestAddr = MBBI->getOperand(0); 1228 assert(DestAddr.isReg() && "Offset should be in register!"); 1229 BuildMI(MBB, MBBI, DL, 1230 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1231 StackPtr).addReg(DestAddr.getReg()); 1232 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1233 RetOpcode == X86::TCRETURNmi || 1234 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 || 1235 RetOpcode == X86::TCRETURNmi64) { 1236 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64; 1237 // Tail call return: adjust the stack pointer and jump to callee. 1238 MBBI = prior(MBB.end()); 1239 MachineOperand &JumpTarget = MBBI->getOperand(0); 1240 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); 1241 assert(StackAdjust.isImm() && "Expecting immediate value."); 1242 1243 // Adjust stack pointer. 1244 int StackAdj = StackAdjust.getImm(); 1245 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1246 int Offset = 0; 1247 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1248 1249 // Incoporate the retaddr area. 1250 Offset = StackAdj-MaxTCDelta; 1251 assert(Offset >= 0 && "Offset should never be negative"); 1252 1253 if (Offset) { 1254 // Check for possible merge with preceeding ADD instruction. 1255 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1256 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 1257 } 1258 1259 // Jump to label or value in register. 1260 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) { 1261 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi) 1262 ? X86::TAILJMPd : X86::TAILJMPd64)). 1263 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1264 JumpTarget.getTargetFlags()); 1265 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) { 1266 MachineInstrBuilder MIB = 1267 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi) 1268 ? X86::TAILJMPm : X86::TAILJMPm64)); 1269 for (unsigned i = 0; i != 5; ++i) 1270 MIB.addOperand(MBBI->getOperand(i)); 1271 } else if (RetOpcode == X86::TCRETURNri64) { 1272 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 1273 } else { 1274 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 1275 } 1276 1277 MachineInstr *NewMI = prior(MBBI); 1278 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i) 1279 NewMI->addOperand(MBBI->getOperand(i)); 1280 1281 // Delete the pseudo instruction TCRETURN. 1282 MBB.erase(MBBI); 1283 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1284 (X86FI->getTCReturnAddrDelta() < 0)) { 1285 // Add the return addr area delta back since we are not tail calling. 1286 int delta = -1*X86FI->getTCReturnAddrDelta(); 1287 MBBI = prior(MBB.end()); 1288 1289 // Check for possible merge with preceeding ADD instruction. 1290 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1291 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 1292 } 1293} 1294 1295unsigned X86RegisterInfo::getRARegister() const { 1296 return Is64Bit ? X86::RIP // Should have dwarf #16. 1297 : X86::EIP; // Should have dwarf #8. 1298} 1299 1300unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1301 return hasFP(MF) ? FramePtr : StackPtr; 1302} 1303 1304void 1305X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const { 1306 // Calculate amount of bytes used for return address storing 1307 int stackGrowth = (Is64Bit ? -8 : -4); 1308 1309 // Initial state of the frame pointer is esp+stackGrowth. 1310 MachineLocation Dst(MachineLocation::VirtualFP); 1311 MachineLocation Src(StackPtr, stackGrowth); 1312 Moves.push_back(MachineMove(0, Dst, Src)); 1313 1314 // Add return address to move list 1315 MachineLocation CSDst(StackPtr, stackGrowth); 1316 MachineLocation CSSrc(getRARegister()); 1317 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1318} 1319 1320unsigned X86RegisterInfo::getEHExceptionRegister() const { 1321 llvm_unreachable("What is the exception register"); 1322 return 0; 1323} 1324 1325unsigned X86RegisterInfo::getEHHandlerRegister() const { 1326 llvm_unreachable("What is the exception handler register"); 1327 return 0; 1328} 1329 1330namespace llvm { 1331unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 1332 switch (VT.getSimpleVT().SimpleTy) { 1333 default: return Reg; 1334 case MVT::i8: 1335 if (High) { 1336 switch (Reg) { 1337 default: return 0; 1338 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1339 return X86::AH; 1340 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1341 return X86::DH; 1342 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1343 return X86::CH; 1344 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1345 return X86::BH; 1346 } 1347 } else { 1348 switch (Reg) { 1349 default: return 0; 1350 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1351 return X86::AL; 1352 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1353 return X86::DL; 1354 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1355 return X86::CL; 1356 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1357 return X86::BL; 1358 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1359 return X86::SIL; 1360 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1361 return X86::DIL; 1362 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1363 return X86::BPL; 1364 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1365 return X86::SPL; 1366 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1367 return X86::R8B; 1368 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1369 return X86::R9B; 1370 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1371 return X86::R10B; 1372 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1373 return X86::R11B; 1374 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1375 return X86::R12B; 1376 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1377 return X86::R13B; 1378 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1379 return X86::R14B; 1380 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1381 return X86::R15B; 1382 } 1383 } 1384 case MVT::i16: 1385 switch (Reg) { 1386 default: return Reg; 1387 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1388 return X86::AX; 1389 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1390 return X86::DX; 1391 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1392 return X86::CX; 1393 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1394 return X86::BX; 1395 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1396 return X86::SI; 1397 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1398 return X86::DI; 1399 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1400 return X86::BP; 1401 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1402 return X86::SP; 1403 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1404 return X86::R8W; 1405 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1406 return X86::R9W; 1407 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1408 return X86::R10W; 1409 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1410 return X86::R11W; 1411 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1412 return X86::R12W; 1413 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1414 return X86::R13W; 1415 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1416 return X86::R14W; 1417 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1418 return X86::R15W; 1419 } 1420 case MVT::i32: 1421 switch (Reg) { 1422 default: return Reg; 1423 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1424 return X86::EAX; 1425 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1426 return X86::EDX; 1427 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1428 return X86::ECX; 1429 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1430 return X86::EBX; 1431 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1432 return X86::ESI; 1433 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1434 return X86::EDI; 1435 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1436 return X86::EBP; 1437 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1438 return X86::ESP; 1439 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1440 return X86::R8D; 1441 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1442 return X86::R9D; 1443 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1444 return X86::R10D; 1445 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1446 return X86::R11D; 1447 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1448 return X86::R12D; 1449 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1450 return X86::R13D; 1451 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1452 return X86::R14D; 1453 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1454 return X86::R15D; 1455 } 1456 case MVT::i64: 1457 switch (Reg) { 1458 default: return Reg; 1459 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1460 return X86::RAX; 1461 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1462 return X86::RDX; 1463 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1464 return X86::RCX; 1465 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1466 return X86::RBX; 1467 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1468 return X86::RSI; 1469 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1470 return X86::RDI; 1471 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1472 return X86::RBP; 1473 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1474 return X86::RSP; 1475 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1476 return X86::R8; 1477 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1478 return X86::R9; 1479 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1480 return X86::R10; 1481 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1482 return X86::R11; 1483 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1484 return X86::R12; 1485 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1486 return X86::R13; 1487 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1488 return X86::R14; 1489 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1490 return X86::R15; 1491 } 1492 } 1493 1494 return Reg; 1495} 1496} 1497 1498#include "X86GenRegisterInfo.inc" 1499 1500namespace { 1501 struct MSAH : public MachineFunctionPass { 1502 static char ID; 1503 MSAH() : MachineFunctionPass(&ID) {} 1504 1505 virtual bool runOnMachineFunction(MachineFunction &MF) { 1506 const X86TargetMachine *TM = 1507 static_cast<const X86TargetMachine *>(&MF.getTarget()); 1508 const X86RegisterInfo *X86RI = TM->getRegisterInfo(); 1509 MachineRegisterInfo &RI = MF.getRegInfo(); 1510 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1511 unsigned StackAlignment = X86RI->getStackAlignment(); 1512 1513 // Be over-conservative: scan over all vreg defs and find whether vector 1514 // registers are used. If yes, there is a possibility that vector register 1515 // will be spilled and thus require dynamic stack realignment. 1516 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; 1517 RegNum < RI.getLastVirtReg(); ++RegNum) 1518 if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) { 1519 FuncInfo->setReserveFP(true); 1520 return true; 1521 } 1522 1523 // Nothing to do 1524 return false; 1525 } 1526 1527 virtual const char *getPassName() const { 1528 return "X86 Maximal Stack Alignment Check"; 1529 } 1530 1531 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1532 AU.setPreservesCFG(); 1533 MachineFunctionPass::getAnalysisUsage(AU); 1534 } 1535 }; 1536 1537 char MSAH::ID = 0; 1538} 1539 1540FunctionPass* 1541llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } 1542