X86RegisterInfo.cpp revision e566763b1915c7a4821ce95937b763724d271fec
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42using namespace llvm; 43 44X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 45 const TargetInstrInfo &tii) 46 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 47 X86::ADJCALLSTACKDOWN64 : 48 X86::ADJCALLSTACKDOWN32, 49 tm.getSubtarget<X86Subtarget>().is64Bit() ? 50 X86::ADJCALLSTACKUP64 : 51 X86::ADJCALLSTACKUP32), 52 TM(tm), TII(tii) { 53 // Cache some information. 54 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 55 Is64Bit = Subtarget->is64Bit(); 56 IsWin64 = Subtarget->isTargetWin64(); 57 StackAlign = TM.getFrameInfo()->getStackAlignment(); 58 59 if (Is64Bit) { 60 SlotSize = 8; 61 StackPtr = X86::RSP; 62 FramePtr = X86::RBP; 63 } else { 64 SlotSize = 4; 65 StackPtr = X86::ESP; 66 FramePtr = X86::EBP; 67 } 68} 69 70/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 71/// specific numbering, used in debug info and exception tables. 72int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 73 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 74 unsigned Flavour = DWARFFlavour::X86_64; 75 76 if (!Subtarget->is64Bit()) { 77 if (Subtarget->isTargetDarwin()) { 78 if (isEH) 79 Flavour = DWARFFlavour::X86_32_DarwinEH; 80 else 81 Flavour = DWARFFlavour::X86_32_Generic; 82 } else if (Subtarget->isTargetCygMing()) { 83 // Unsupported by now, just quick fallback 84 Flavour = DWARFFlavour::X86_32_Generic; 85 } else { 86 Flavour = DWARFFlavour::X86_32_Generic; 87 } 88 } 89 90 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 91} 92 93/// getX86RegNum - This function maps LLVM register identifiers to their X86 94/// specific numbering, which is used in various places encoding instructions. 95unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 96 switch(RegNo) { 97 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 98 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 99 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 100 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 101 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 102 return N86::ESP; 103 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 104 return N86::EBP; 105 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 106 return N86::ESI; 107 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 108 return N86::EDI; 109 110 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 111 return N86::EAX; 112 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 113 return N86::ECX; 114 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 115 return N86::EDX; 116 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 117 return N86::EBX; 118 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 119 return N86::ESP; 120 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 121 return N86::EBP; 122 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 123 return N86::ESI; 124 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 125 return N86::EDI; 126 127 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 128 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 129 return RegNo-X86::ST0; 130 131 case X86::XMM0: case X86::XMM8: case X86::MM0: 132 return 0; 133 case X86::XMM1: case X86::XMM9: case X86::MM1: 134 return 1; 135 case X86::XMM2: case X86::XMM10: case X86::MM2: 136 return 2; 137 case X86::XMM3: case X86::XMM11: case X86::MM3: 138 return 3; 139 case X86::XMM4: case X86::XMM12: case X86::MM4: 140 return 4; 141 case X86::XMM5: case X86::XMM13: case X86::MM5: 142 return 5; 143 case X86::XMM6: case X86::XMM14: case X86::MM6: 144 return 6; 145 case X86::XMM7: case X86::XMM15: case X86::MM7: 146 return 7; 147 148 default: 149 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 150 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 151 return 0; 152 } 153} 154 155const TargetRegisterClass * 156X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 157 const TargetRegisterClass *B, 158 unsigned SubIdx) const { 159 switch (SubIdx) { 160 default: return 0; 161 case 1: 162 // 8-bit 163 if (B == &X86::GR8RegClass) { 164 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 165 return A; 166 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 167 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 168 A == &X86::GR64_NOREXRegClass || 169 A == &X86::GR64_NOSPRegClass || 170 A == &X86::GR64_NOREX_NOSPRegClass) 171 return &X86::GR64_ABCDRegClass; 172 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 173 A == &X86::GR32_NOREXRegClass || 174 A == &X86::GR32_NOSPRegClass) 175 return &X86::GR32_ABCDRegClass; 176 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 177 A == &X86::GR16_NOREXRegClass) 178 return &X86::GR16_ABCDRegClass; 179 } else if (B == &X86::GR8_NOREXRegClass) { 180 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 181 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 182 return &X86::GR64_NOREXRegClass; 183 else if (A == &X86::GR64_ABCDRegClass) 184 return &X86::GR64_ABCDRegClass; 185 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 186 A == &X86::GR32_NOSPRegClass) 187 return &X86::GR32_NOREXRegClass; 188 else if (A == &X86::GR32_ABCDRegClass) 189 return &X86::GR32_ABCDRegClass; 190 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 191 return &X86::GR16_NOREXRegClass; 192 else if (A == &X86::GR16_ABCDRegClass) 193 return &X86::GR16_ABCDRegClass; 194 } else if (B == &X86::FR32RegClass) { 195 return A; 196 } 197 break; 198 case 2: 199 // 8-bit hi 200 if (B == &X86::GR8_ABCD_HRegClass) { 201 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 202 A == &X86::GR64_NOREXRegClass || 203 A == &X86::GR64_NOSPRegClass || 204 A == &X86::GR64_NOREX_NOSPRegClass) 205 return &X86::GR64_ABCDRegClass; 206 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 207 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 208 return &X86::GR32_ABCDRegClass; 209 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 210 A == &X86::GR16_NOREXRegClass) 211 return &X86::GR16_ABCDRegClass; 212 } else if (B == &X86::FR64RegClass) { 213 return A; 214 } 215 break; 216 case 3: 217 // 16-bit 218 if (B == &X86::GR16RegClass) { 219 if (A->getSize() == 4 || A->getSize() == 8) 220 return A; 221 } else if (B == &X86::GR16_ABCDRegClass) { 222 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 223 A == &X86::GR64_NOREXRegClass || 224 A == &X86::GR64_NOSPRegClass || 225 A == &X86::GR64_NOREX_NOSPRegClass) 226 return &X86::GR64_ABCDRegClass; 227 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 228 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 229 return &X86::GR32_ABCDRegClass; 230 } else if (B == &X86::GR16_NOREXRegClass) { 231 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 232 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 233 return &X86::GR64_NOREXRegClass; 234 else if (A == &X86::GR64_ABCDRegClass) 235 return &X86::GR64_ABCDRegClass; 236 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 237 A == &X86::GR32_NOSPRegClass) 238 return &X86::GR32_NOREXRegClass; 239 else if (A == &X86::GR32_ABCDRegClass) 240 return &X86::GR64_ABCDRegClass; 241 } else if (B == &X86::VR128RegClass) { 242 return A; 243 } 244 break; 245 case 4: 246 // 32-bit 247 if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) { 248 if (A->getSize() == 8) 249 return A; 250 } else if (B == &X86::GR32_ABCDRegClass) { 251 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 252 A == &X86::GR64_NOREXRegClass || 253 A == &X86::GR64_NOSPRegClass || 254 A == &X86::GR64_NOREX_NOSPRegClass) 255 return &X86::GR64_ABCDRegClass; 256 } else if (B == &X86::GR32_NOREXRegClass) { 257 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 258 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 259 return &X86::GR64_NOREXRegClass; 260 else if (A == &X86::GR64_ABCDRegClass) 261 return &X86::GR64_ABCDRegClass; 262 } 263 break; 264 } 265 return 0; 266} 267 268const TargetRegisterClass * 269X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 270 switch (Kind) { 271 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 272 case 0: // Normal GPRs. 273 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 274 return &X86::GR64RegClass; 275 return &X86::GR32RegClass; 276 case 1: // Normal GRPs except the stack pointer (for encoding reasons). 277 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 278 return &X86::GR64_NOSPRegClass; 279 return &X86::GR32_NOSPRegClass; 280 } 281} 282 283const TargetRegisterClass * 284X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 285 if (RC == &X86::CCRRegClass) { 286 if (Is64Bit) 287 return &X86::GR64RegClass; 288 else 289 return &X86::GR32RegClass; 290 } 291 return NULL; 292} 293 294const unsigned * 295X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 296 bool callsEHReturn = false; 297 bool ghcCall = false; 298 299 if (MF) { 300 callsEHReturn = MF->getMMI().callsEHReturn(); 301 const Function *F = MF->getFunction(); 302 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); 303 } 304 305 static const unsigned GhcCalleeSavedRegs[] = { 306 0 307 }; 308 309 static const unsigned CalleeSavedRegs32Bit[] = { 310 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 311 }; 312 313 static const unsigned CalleeSavedRegs32EHRet[] = { 314 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 315 }; 316 317 static const unsigned CalleeSavedRegs64Bit[] = { 318 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 319 }; 320 321 static const unsigned CalleeSavedRegs64EHRet[] = { 322 X86::RAX, X86::RDX, X86::RBX, X86::R12, 323 X86::R13, X86::R14, X86::R15, X86::RBP, 0 324 }; 325 326 static const unsigned CalleeSavedRegsWin64[] = { 327 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 328 X86::R12, X86::R13, X86::R14, X86::R15, 329 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 330 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 331 X86::XMM14, X86::XMM15, 0 332 }; 333 334 if (ghcCall) { 335 return GhcCalleeSavedRegs; 336 } else if (Is64Bit) { 337 if (IsWin64) 338 return CalleeSavedRegsWin64; 339 else 340 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 341 } else { 342 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 343 } 344} 345 346const TargetRegisterClass* const* 347X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 348 bool callsEHReturn = false; 349 if (MF) 350 callsEHReturn = MF->getMMI().callsEHReturn(); 351 352 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 353 &X86::GR32RegClass, &X86::GR32RegClass, 354 &X86::GR32RegClass, &X86::GR32RegClass, 0 355 }; 356 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 357 &X86::GR32RegClass, &X86::GR32RegClass, 358 &X86::GR32RegClass, &X86::GR32RegClass, 359 &X86::GR32RegClass, &X86::GR32RegClass, 0 360 }; 361 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 362 &X86::GR64RegClass, &X86::GR64RegClass, 363 &X86::GR64RegClass, &X86::GR64RegClass, 364 &X86::GR64RegClass, &X86::GR64RegClass, 0 365 }; 366 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = { 367 &X86::GR64RegClass, &X86::GR64RegClass, 368 &X86::GR64RegClass, &X86::GR64RegClass, 369 &X86::GR64RegClass, &X86::GR64RegClass, 370 &X86::GR64RegClass, &X86::GR64RegClass, 0 371 }; 372 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { 373 &X86::GR64RegClass, &X86::GR64RegClass, 374 &X86::GR64RegClass, &X86::GR64RegClass, 375 &X86::GR64RegClass, &X86::GR64RegClass, 376 &X86::GR64RegClass, &X86::GR64RegClass, 377 &X86::VR128RegClass, &X86::VR128RegClass, 378 &X86::VR128RegClass, &X86::VR128RegClass, 379 &X86::VR128RegClass, &X86::VR128RegClass, 380 &X86::VR128RegClass, &X86::VR128RegClass, 381 &X86::VR128RegClass, &X86::VR128RegClass, 0 382 }; 383 384 if (Is64Bit) { 385 if (IsWin64) 386 return CalleeSavedRegClassesWin64; 387 else 388 return (callsEHReturn ? 389 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit); 390 } else { 391 return (callsEHReturn ? 392 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit); 393 } 394} 395 396BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 397 BitVector Reserved(getNumRegs()); 398 // Set the stack-pointer register and its aliases as reserved. 399 Reserved.set(X86::RSP); 400 Reserved.set(X86::ESP); 401 Reserved.set(X86::SP); 402 Reserved.set(X86::SPL); 403 404 // Set the instruction pointer register and its aliases as reserved. 405 Reserved.set(X86::RIP); 406 Reserved.set(X86::EIP); 407 Reserved.set(X86::IP); 408 409 // Set the frame-pointer register and its aliases as reserved if needed. 410 if (hasFP(MF)) { 411 Reserved.set(X86::RBP); 412 Reserved.set(X86::EBP); 413 Reserved.set(X86::BP); 414 Reserved.set(X86::BPL); 415 } 416 417 // Mark the x87 stack registers as reserved, since they don't behave normally 418 // with respect to liveness. We don't fully model the effects of x87 stack 419 // pushes and pops after stackification. 420 Reserved.set(X86::ST0); 421 Reserved.set(X86::ST1); 422 Reserved.set(X86::ST2); 423 Reserved.set(X86::ST3); 424 Reserved.set(X86::ST4); 425 Reserved.set(X86::ST5); 426 Reserved.set(X86::ST6); 427 Reserved.set(X86::ST7); 428 return Reserved; 429} 430 431//===----------------------------------------------------------------------===// 432// Stack Frame Processing methods 433//===----------------------------------------------------------------------===// 434 435/// hasFP - Return true if the specified function should have a dedicated frame 436/// pointer register. This is true if the function has variable sized allocas 437/// or if frame pointer elimination is disabled. 438bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 439 const MachineFrameInfo *MFI = MF.getFrameInfo(); 440 const MachineModuleInfo &MMI = MF.getMMI(); 441 442 return (DisableFramePointerElim(MF) || 443 needsStackRealignment(MF) || 444 MFI->hasVarSizedObjects() || 445 MFI->isFrameAddressTaken() || 446 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 447 MMI.callsUnwindInit()); 448} 449 450bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 451 const MachineFrameInfo *MFI = MF.getFrameInfo(); 452 return (RealignStack && 453 !MFI->hasVarSizedObjects()); 454} 455 456bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 457 const MachineFrameInfo *MFI = MF.getFrameInfo(); 458 const Function *F = MF.getFunction(); 459 bool requiresRealignment = 460 RealignStack && ((MFI->getMaxAlignment() > StackAlign) || 461 F->hasFnAttr(Attribute::StackAlignment)); 462 463 // FIXME: Currently we don't support stack realignment for functions with 464 // variable-sized allocas. 465 // FIXME: Temporary disable the error - it seems to be too conservative. 466 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 467 report_fatal_error( 468 "Stack realignment in presense of dynamic allocas is not supported"); 469 470 return (requiresRealignment && !MFI->hasVarSizedObjects()); 471} 472 473bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 474 return !MF.getFrameInfo()->hasVarSizedObjects(); 475} 476 477bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, 478 int &FrameIdx) const { 479 if (Reg == FramePtr && hasFP(MF)) { 480 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 481 return true; 482 } 483 return false; 484} 485 486int 487X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 488 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 489 const MachineFrameInfo *MFI = MF.getFrameInfo(); 490 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea(); 491 uint64_t StackSize = MFI->getStackSize(); 492 493 if (needsStackRealignment(MF)) { 494 if (FI < 0) { 495 // Skip the saved EBP. 496 Offset += SlotSize; 497 } else { 498 unsigned Align = MFI->getObjectAlignment(FI); 499 assert((-(Offset + StackSize)) % Align == 0); 500 Align = 0; 501 return Offset + StackSize; 502 } 503 // FIXME: Support tail calls 504 } else { 505 if (!hasFP(MF)) 506 return Offset + StackSize; 507 508 // Skip the saved EBP. 509 Offset += SlotSize; 510 511 // Skip the RETADDR move area 512 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 513 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 514 if (TailCallReturnAddrDelta < 0) 515 Offset -= TailCallReturnAddrDelta; 516 } 517 518 return Offset; 519} 520 521void X86RegisterInfo:: 522eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 523 MachineBasicBlock::iterator I) const { 524 if (!hasReservedCallFrame(MF)) { 525 // If the stack pointer can be changed after prologue, turn the 526 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 527 // adjcallstackdown instruction into 'add ESP, <amt>' 528 // TODO: consider using push / pop instead of sub + store / add 529 MachineInstr *Old = I; 530 uint64_t Amount = Old->getOperand(0).getImm(); 531 if (Amount != 0) { 532 // We need to keep the stack aligned properly. To do this, we round the 533 // amount of space needed for the outgoing arguments up to the next 534 // alignment boundary. 535 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 536 537 MachineInstr *New = 0; 538 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 539 New = BuildMI(MF, Old->getDebugLoc(), 540 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), 541 StackPtr) 542 .addReg(StackPtr) 543 .addImm(Amount); 544 } else { 545 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 546 547 // Factor out the amount the callee already popped. 548 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 549 Amount -= CalleeAmt; 550 551 if (Amount) { 552 unsigned Opc = (Amount < 128) ? 553 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 554 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 555 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 556 .addReg(StackPtr) 557 .addImm(Amount); 558 } 559 } 560 561 if (New) { 562 // The EFLAGS implicit def is dead. 563 New->getOperand(3).setIsDead(); 564 565 // Replace the pseudo instruction with a new instruction. 566 MBB.insert(I, New); 567 } 568 } 569 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 570 // If we are performing frame pointer elimination and if the callee pops 571 // something off the stack pointer, add it back. We do this until we have 572 // more advanced stack pointer tracking ability. 573 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 574 unsigned Opc = (CalleeAmt < 128) ? 575 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 576 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 577 MachineInstr *Old = I; 578 MachineInstr *New = 579 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 580 StackPtr) 581 .addReg(StackPtr) 582 .addImm(CalleeAmt); 583 584 // The EFLAGS implicit def is dead. 585 New->getOperand(3).setIsDead(); 586 MBB.insert(I, New); 587 } 588 } 589 590 MBB.erase(I); 591} 592 593unsigned 594X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 595 int SPAdj, FrameIndexValue *Value, 596 RegScavenger *RS) const{ 597 assert(SPAdj == 0 && "Unexpected"); 598 599 unsigned i = 0; 600 MachineInstr &MI = *II; 601 MachineFunction &MF = *MI.getParent()->getParent(); 602 603 while (!MI.getOperand(i).isFI()) { 604 ++i; 605 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 606 } 607 608 int FrameIndex = MI.getOperand(i).getIndex(); 609 unsigned BasePtr; 610 611 if (needsStackRealignment(MF)) 612 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 613 else 614 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 615 616 // This must be part of a four operand memory reference. Replace the 617 // FrameIndex with base register with EBP. Add an offset to the offset. 618 MI.getOperand(i).ChangeToRegister(BasePtr, false); 619 620 // Now add the frame object offset to the offset from EBP. 621 if (MI.getOperand(i+3).isImm()) { 622 // Offset is a 32-bit integer. 623 int Offset = getFrameIndexOffset(MF, FrameIndex) + 624 (int)(MI.getOperand(i + 3).getImm()); 625 626 MI.getOperand(i + 3).ChangeToImmediate(Offset); 627 } else { 628 // Offset is symbolic. This is extremely rare. 629 uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) + 630 (uint64_t)MI.getOperand(i+3).getOffset(); 631 MI.getOperand(i+3).setOffset(Offset); 632 } 633 return 0; 634} 635 636void 637X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 638 RegScavenger *RS) const { 639 MachineFrameInfo *MFI = MF.getFrameInfo(); 640 641 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 642 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 643 644 if (TailCallReturnAddrDelta < 0) { 645 // create RETURNADDR area 646 // arg 647 // arg 648 // RETADDR 649 // { ... 650 // RETADDR area 651 // ... 652 // } 653 // [EBP] 654 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 655 (-1U*SlotSize)+TailCallReturnAddrDelta, 656 true, false); 657 } 658 659 if (hasFP(MF)) { 660 assert((TailCallReturnAddrDelta <= 0) && 661 "The Delta should always be zero or negative"); 662 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 663 664 // Create a frame entry for the EBP register that must be saved. 665 int FrameIdx = MFI->CreateFixedObject(SlotSize, 666 -(int)SlotSize + 667 TFI.getOffsetOfLocalArea() + 668 TailCallReturnAddrDelta, 669 true, false); 670 assert(FrameIdx == MFI->getObjectIndexBegin() && 671 "Slot for EBP register must be last in order to be found!"); 672 FrameIdx = 0; 673 } 674} 675 676/// emitSPUpdate - Emit a series of instructions to increment / decrement the 677/// stack pointer by a constant value. 678static 679void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 680 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 681 const TargetInstrInfo &TII) { 682 bool isSub = NumBytes < 0; 683 uint64_t Offset = isSub ? -NumBytes : NumBytes; 684 unsigned Opc = isSub 685 ? ((Offset < 128) ? 686 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 687 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 688 : ((Offset < 128) ? 689 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 690 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 691 uint64_t Chunk = (1LL << 31) - 1; 692 DebugLoc DL = MBB.findDebugLoc(MBBI); 693 694 while (Offset) { 695 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 696 MachineInstr *MI = 697 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 698 .addReg(StackPtr) 699 .addImm(ThisVal); 700 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 701 Offset -= ThisVal; 702 } 703} 704 705/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 706static 707void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 708 unsigned StackPtr, uint64_t *NumBytes = NULL) { 709 if (MBBI == MBB.begin()) return; 710 711 MachineBasicBlock::iterator PI = prior(MBBI); 712 unsigned Opc = PI->getOpcode(); 713 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 714 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 715 PI->getOperand(0).getReg() == StackPtr) { 716 if (NumBytes) 717 *NumBytes += PI->getOperand(2).getImm(); 718 MBB.erase(PI); 719 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 720 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 721 PI->getOperand(0).getReg() == StackPtr) { 722 if (NumBytes) 723 *NumBytes -= PI->getOperand(2).getImm(); 724 MBB.erase(PI); 725 } 726} 727 728/// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 729static 730void mergeSPUpdatesDown(MachineBasicBlock &MBB, 731 MachineBasicBlock::iterator &MBBI, 732 unsigned StackPtr, uint64_t *NumBytes = NULL) { 733 // FIXME: THIS ISN'T RUN!!! 734 return; 735 736 if (MBBI == MBB.end()) return; 737 738 MachineBasicBlock::iterator NI = llvm::next(MBBI); 739 if (NI == MBB.end()) return; 740 741 unsigned Opc = NI->getOpcode(); 742 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 743 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 744 NI->getOperand(0).getReg() == StackPtr) { 745 if (NumBytes) 746 *NumBytes -= NI->getOperand(2).getImm(); 747 MBB.erase(NI); 748 MBBI = NI; 749 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 750 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 751 NI->getOperand(0).getReg() == StackPtr) { 752 if (NumBytes) 753 *NumBytes += NI->getOperand(2).getImm(); 754 MBB.erase(NI); 755 MBBI = NI; 756 } 757} 758 759/// mergeSPUpdates - Checks the instruction before/after the passed 760/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 761/// stack adjustment is returned as a positive value for ADD and a negative for 762/// SUB. 763static int mergeSPUpdates(MachineBasicBlock &MBB, 764 MachineBasicBlock::iterator &MBBI, 765 unsigned StackPtr, 766 bool doMergeWithPrevious) { 767 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 768 (!doMergeWithPrevious && MBBI == MBB.end())) 769 return 0; 770 771 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 772 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 773 unsigned Opc = PI->getOpcode(); 774 int Offset = 0; 775 776 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 777 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 778 PI->getOperand(0).getReg() == StackPtr){ 779 Offset += PI->getOperand(2).getImm(); 780 MBB.erase(PI); 781 if (!doMergeWithPrevious) MBBI = NI; 782 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 783 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 784 PI->getOperand(0).getReg() == StackPtr) { 785 Offset -= PI->getOperand(2).getImm(); 786 MBB.erase(PI); 787 if (!doMergeWithPrevious) MBBI = NI; 788 } 789 790 return Offset; 791} 792 793void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, 794 MCSymbol *Label, 795 unsigned FramePtr) const { 796 MachineFrameInfo *MFI = MF.getFrameInfo(); 797 MachineModuleInfo &MMI = MF.getMMI(); 798 799 // Add callee saved registers to move list. 800 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 801 if (CSI.empty()) return; 802 803 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 804 const TargetData *TD = MF.getTarget().getTargetData(); 805 bool HasFP = hasFP(MF); 806 807 // Calculate amount of bytes used for return address storing. 808 int stackGrowth = 809 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 810 TargetFrameInfo::StackGrowsUp ? 811 TD->getPointerSize() : -TD->getPointerSize()); 812 813 // FIXME: This is dirty hack. The code itself is pretty mess right now. 814 // It should be rewritten from scratch and generalized sometimes. 815 816 // Determine maximum offset (minumum due to stack growth). 817 int64_t MaxOffset = 0; 818 for (std::vector<CalleeSavedInfo>::const_iterator 819 I = CSI.begin(), E = CSI.end(); I != E; ++I) 820 MaxOffset = std::min(MaxOffset, 821 MFI->getObjectOffset(I->getFrameIdx())); 822 823 // Calculate offsets. 824 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 825 for (std::vector<CalleeSavedInfo>::const_iterator 826 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 827 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 828 unsigned Reg = I->getReg(); 829 Offset = MaxOffset - Offset + saveAreaOffset; 830 831 // Don't output a new machine move if we're re-saving the frame 832 // pointer. This happens when the PrologEpilogInserter has inserted an extra 833 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 834 // generates one when frame pointers are used. If we generate a "machine 835 // move" for this extra "PUSH", the linker will lose track of the fact that 836 // the frame pointer should have the value of the first "PUSH" when it's 837 // trying to unwind. 838 // 839 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 840 // another bug. I.e., one where we generate a prolog like this: 841 // 842 // pushl %ebp 843 // movl %esp, %ebp 844 // pushl %ebp 845 // pushl %esi 846 // ... 847 // 848 // The immediate re-push of EBP is unnecessary. At the least, it's an 849 // optimization bug. EBP can be used as a scratch register in certain 850 // cases, but probably not when we have a frame pointer. 851 if (HasFP && FramePtr == Reg) 852 continue; 853 854 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 855 MachineLocation CSSrc(Reg); 856 Moves.push_back(MachineMove(Label, CSDst, CSSrc)); 857 } 858} 859 860/// emitPrologue - Push callee-saved registers onto the stack, which 861/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 862/// space for local variables. Also emit labels used by the exception handler to 863/// generate the exception handling frames. 864void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 865 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 866 MachineBasicBlock::iterator MBBI = MBB.begin(); 867 MachineFrameInfo *MFI = MF.getFrameInfo(); 868 const Function *Fn = MF.getFunction(); 869 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 870 MachineModuleInfo &MMI = MF.getMMI(); 871 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 872 bool needsFrameMoves = MMI.hasDebugInfo() || 873 !Fn->doesNotThrow() || UnwindTablesMandatory; 874 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 875 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 876 bool HasFP = hasFP(MF); 877 DebugLoc DL; 878 879 // Add RETADDR move area to callee saved frame size. 880 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 881 if (TailCallReturnAddrDelta < 0) 882 X86FI->setCalleeSavedFrameSize( 883 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 884 885 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 886 // function, and use up to 128 bytes of stack space, don't have a frame 887 // pointer, calls, or dynamic alloca then we do not need to adjust the 888 // stack pointer (we fit in the Red Zone). 889 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 890 !needsStackRealignment(MF) && 891 !MFI->hasVarSizedObjects() && // No dynamic alloca. 892 !MFI->hasCalls() && // No calls. 893 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone 894 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 895 if (HasFP) MinSize += SlotSize; 896 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 897 MFI->setStackSize(StackSize); 898 } else if (Subtarget->isTargetWin64()) { 899 // We need to always allocate 32 bytes as register spill area. 900 // FIXME: We might reuse these 32 bytes for leaf functions. 901 StackSize += 32; 902 MFI->setStackSize(StackSize); 903 } 904 905 // Insert stack pointer adjustment for later moving of return addr. Only 906 // applies to tail call optimized functions where the callee argument stack 907 // size is bigger than the callers. 908 if (TailCallReturnAddrDelta < 0) { 909 MachineInstr *MI = 910 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 911 StackPtr) 912 .addReg(StackPtr) 913 .addImm(-TailCallReturnAddrDelta); 914 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 915 } 916 917 // Mapping for machine moves: 918 // 919 // DST: VirtualFP AND 920 // SRC: VirtualFP => DW_CFA_def_cfa_offset 921 // ELSE => DW_CFA_def_cfa 922 // 923 // SRC: VirtualFP AND 924 // DST: Register => DW_CFA_def_cfa_register 925 // 926 // ELSE 927 // OFFSET < 0 => DW_CFA_offset_extended_sf 928 // REG < 64 => DW_CFA_offset + Reg 929 // ELSE => DW_CFA_offset_extended 930 931 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 932 const TargetData *TD = MF.getTarget().getTargetData(); 933 uint64_t NumBytes = 0; 934 int stackGrowth = -TD->getPointerSize(); 935 936 if (HasFP) { 937 // Calculate required stack adjustment. 938 uint64_t FrameSize = StackSize - SlotSize; 939 if (needsStackRealignment(MF)) 940 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 941 942 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 943 944 // Get the offset of the stack slot for the EBP register, which is 945 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 946 // Update the frame offset adjustment. 947 MFI->setOffsetAdjustment(-NumBytes); 948 949 // Save EBP/RBP into the appropriate stack slot. 950 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 951 .addReg(FramePtr, RegState::Kill); 952 953 if (needsFrameMoves) { 954 // Mark the place where EBP/RBP was saved. 955 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 956 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel); 957 958 // Define the current CFA rule to use the provided offset. 959 if (StackSize) { 960 MachineLocation SPDst(MachineLocation::VirtualFP); 961 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 962 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 963 } else { 964 // FIXME: Verify & implement for FP 965 MachineLocation SPDst(StackPtr); 966 MachineLocation SPSrc(StackPtr, stackGrowth); 967 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 968 } 969 970 // Change the rule for the FramePtr to be an "offset" rule. 971 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); 972 MachineLocation FPSrc(FramePtr); 973 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 974 } 975 976 // Update EBP with the new base value... 977 BuildMI(MBB, MBBI, DL, 978 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 979 .addReg(StackPtr); 980 981 if (needsFrameMoves) { 982 // Mark effective beginning of when frame pointer becomes valid. 983 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 984 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel); 985 986 // Define the current CFA to use the EBP/RBP register. 987 MachineLocation FPDst(FramePtr); 988 MachineLocation FPSrc(MachineLocation::VirtualFP); 989 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 990 } 991 992 // Mark the FramePtr as live-in in every block except the entry. 993 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 994 I != E; ++I) 995 I->addLiveIn(FramePtr); 996 997 // Realign stack 998 if (needsStackRealignment(MF)) { 999 MachineInstr *MI = 1000 BuildMI(MBB, MBBI, DL, 1001 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 1002 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 1003 1004 // The EFLAGS implicit def is dead. 1005 MI->getOperand(3).setIsDead(); 1006 } 1007 } else { 1008 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 1009 } 1010 1011 // Skip the callee-saved push instructions. 1012 bool PushedRegs = false; 1013 int StackOffset = 2 * stackGrowth; 1014 1015 while (MBBI != MBB.end() && 1016 (MBBI->getOpcode() == X86::PUSH32r || 1017 MBBI->getOpcode() == X86::PUSH64r)) { 1018 PushedRegs = true; 1019 ++MBBI; 1020 1021 if (!HasFP && needsFrameMoves) { 1022 // Mark callee-saved push instruction. 1023 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 1024 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label); 1025 1026 // Define the current CFA rule to use the provided offset. 1027 unsigned Ptr = StackSize ? 1028 MachineLocation::VirtualFP : StackPtr; 1029 MachineLocation SPDst(Ptr); 1030 MachineLocation SPSrc(Ptr, StackOffset); 1031 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1032 StackOffset += stackGrowth; 1033 } 1034 } 1035 1036 DL = MBB.findDebugLoc(MBBI); 1037 1038 // Adjust stack pointer: ESP -= numbytes. 1039 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 1040 // Check, whether EAX is livein for this function. 1041 bool isEAXAlive = false; 1042 for (MachineRegisterInfo::livein_iterator 1043 II = MF.getRegInfo().livein_begin(), 1044 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 1045 unsigned Reg = II->first; 1046 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1047 Reg == X86::AH || Reg == X86::AL); 1048 } 1049 1050 // Function prologue calls _alloca to probe the stack when allocating more 1051 // than 4k bytes in one go. Touching the stack at 4K increments is necessary 1052 // to ensure that the guard pages used by the OS virtual memory manager are 1053 // allocated in correct sequence. 1054 if (!isEAXAlive) { 1055 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1056 .addImm(NumBytes); 1057 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1058 .addExternalSymbol("_alloca") 1059 .addReg(StackPtr, RegState::Define | RegState::Implicit); 1060 } else { 1061 // Save EAX 1062 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1063 .addReg(X86::EAX, RegState::Kill); 1064 1065 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1066 // allocated bytes for EAX. 1067 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1068 .addImm(NumBytes - 4); 1069 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1070 .addExternalSymbol("_alloca") 1071 .addReg(StackPtr, RegState::Define | RegState::Implicit); 1072 1073 // Restore EAX 1074 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 1075 X86::EAX), 1076 StackPtr, false, NumBytes - 4); 1077 MBB.insert(MBBI, MI); 1078 } 1079 } else if (NumBytes) { 1080 // If there is an SUB32ri of ESP immediately before this instruction, merge 1081 // the two. This can be the case when tail call elimination is enabled and 1082 // the callee has more arguments then the caller. 1083 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 1084 1085 // If there is an ADD32ri or SUB32ri of ESP immediately after this 1086 // instruction, merge the two instructions. 1087 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 1088 1089 if (NumBytes) 1090 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1091 } 1092 1093 if ((NumBytes || PushedRegs) && needsFrameMoves) { 1094 // Mark end of stack pointer adjustment. 1095 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 1096 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label); 1097 1098 if (!HasFP && NumBytes) { 1099 // Define the current CFA rule to use the provided offset. 1100 if (StackSize) { 1101 MachineLocation SPDst(MachineLocation::VirtualFP); 1102 MachineLocation SPSrc(MachineLocation::VirtualFP, 1103 -StackSize + stackGrowth); 1104 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1105 } else { 1106 // FIXME: Verify & implement for FP 1107 MachineLocation SPDst(StackPtr); 1108 MachineLocation SPSrc(StackPtr, stackGrowth); 1109 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 1110 } 1111 } 1112 1113 // Emit DWARF info specifying the offsets of the callee-saved registers. 1114 if (PushedRegs) 1115 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr); 1116 } 1117} 1118 1119void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1120 MachineBasicBlock &MBB) const { 1121 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1122 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1123 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1124 unsigned RetOpcode = MBBI->getOpcode(); 1125 DebugLoc DL = MBBI->getDebugLoc(); 1126 1127 switch (RetOpcode) { 1128 default: 1129 llvm_unreachable("Can only insert epilog into returning blocks"); 1130 case X86::RET: 1131 case X86::RETI: 1132 case X86::TCRETURNdi: 1133 case X86::TCRETURNri: 1134 case X86::TCRETURNmi: 1135 case X86::TCRETURNdi64: 1136 case X86::TCRETURNri64: 1137 case X86::TCRETURNmi64: 1138 case X86::EH_RETURN: 1139 case X86::EH_RETURN64: 1140 break; // These are ok 1141 } 1142 1143 // Get the number of bytes to allocate from the FrameInfo. 1144 uint64_t StackSize = MFI->getStackSize(); 1145 uint64_t MaxAlign = MFI->getMaxAlignment(); 1146 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1147 uint64_t NumBytes = 0; 1148 1149 if (hasFP(MF)) { 1150 // Calculate required stack adjustment. 1151 uint64_t FrameSize = StackSize - SlotSize; 1152 if (needsStackRealignment(MF)) 1153 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 1154 1155 NumBytes = FrameSize - CSSize; 1156 1157 // Pop EBP. 1158 BuildMI(MBB, MBBI, DL, 1159 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1160 } else { 1161 NumBytes = StackSize - CSSize; 1162 } 1163 1164 // Skip the callee-saved pop instructions. 1165 MachineBasicBlock::iterator LastCSPop = MBBI; 1166 while (MBBI != MBB.begin()) { 1167 MachineBasicBlock::iterator PI = prior(MBBI); 1168 unsigned Opc = PI->getOpcode(); 1169 1170 if (Opc != X86::POP32r && Opc != X86::POP64r && 1171 !PI->getDesc().isTerminator()) 1172 break; 1173 1174 --MBBI; 1175 } 1176 1177 DL = MBBI->getDebugLoc(); 1178 1179 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1180 // instruction, merge the two instructions. 1181 if (NumBytes || MFI->hasVarSizedObjects()) 1182 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1183 1184 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1185 // slot before popping them off! Same applies for the case, when stack was 1186 // realigned. 1187 if (needsStackRealignment(MF)) { 1188 // We cannot use LEA here, because stack pointer was realigned. We need to 1189 // deallocate local frame back. 1190 if (CSSize) { 1191 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1192 MBBI = prior(LastCSPop); 1193 } 1194 1195 BuildMI(MBB, MBBI, DL, 1196 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1197 StackPtr).addReg(FramePtr); 1198 } else if (MFI->hasVarSizedObjects()) { 1199 if (CSSize) { 1200 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1201 MachineInstr *MI = 1202 addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1203 FramePtr, false, -CSSize); 1204 MBB.insert(MBBI, MI); 1205 } else { 1206 BuildMI(MBB, MBBI, DL, 1207 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1208 .addReg(FramePtr); 1209 } 1210 } else if (NumBytes) { 1211 // Adjust stack pointer back: ESP += numbytes. 1212 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1213 } 1214 1215 // We're returning from function via eh_return. 1216 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1217 MBBI = prior(MBB.end()); 1218 MachineOperand &DestAddr = MBBI->getOperand(0); 1219 assert(DestAddr.isReg() && "Offset should be in register!"); 1220 BuildMI(MBB, MBBI, DL, 1221 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1222 StackPtr).addReg(DestAddr.getReg()); 1223 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1224 RetOpcode == X86::TCRETURNmi || 1225 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 || 1226 RetOpcode == X86::TCRETURNmi64) { 1227 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64; 1228 // Tail call return: adjust the stack pointer and jump to callee. 1229 MBBI = prior(MBB.end()); 1230 MachineOperand &JumpTarget = MBBI->getOperand(0); 1231 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); 1232 assert(StackAdjust.isImm() && "Expecting immediate value."); 1233 1234 // Adjust stack pointer. 1235 int StackAdj = StackAdjust.getImm(); 1236 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1237 int Offset = 0; 1238 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1239 1240 // Incoporate the retaddr area. 1241 Offset = StackAdj-MaxTCDelta; 1242 assert(Offset >= 0 && "Offset should never be negative"); 1243 1244 if (Offset) { 1245 // Check for possible merge with preceeding ADD instruction. 1246 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1247 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 1248 } 1249 1250 // Jump to label or value in register. 1251 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) { 1252 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi) 1253 ? X86::TAILJMPd : X86::TAILJMPd64)). 1254 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1255 JumpTarget.getTargetFlags()); 1256 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) { 1257 MachineInstrBuilder MIB = 1258 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi) 1259 ? X86::TAILJMPm : X86::TAILJMPm64)); 1260 for (unsigned i = 0; i != 5; ++i) 1261 MIB.addOperand(MBBI->getOperand(i)); 1262 } else if (RetOpcode == X86::TCRETURNri64) { 1263 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 1264 } else { 1265 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 1266 } 1267 1268 MachineInstr *NewMI = prior(MBBI); 1269 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i) 1270 NewMI->addOperand(MBBI->getOperand(i)); 1271 1272 // Delete the pseudo instruction TCRETURN. 1273 MBB.erase(MBBI); 1274 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1275 (X86FI->getTCReturnAddrDelta() < 0)) { 1276 // Add the return addr area delta back since we are not tail calling. 1277 int delta = -1*X86FI->getTCReturnAddrDelta(); 1278 MBBI = prior(MBB.end()); 1279 1280 // Check for possible merge with preceeding ADD instruction. 1281 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1282 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 1283 } 1284} 1285 1286unsigned X86RegisterInfo::getRARegister() const { 1287 return Is64Bit ? X86::RIP // Should have dwarf #16. 1288 : X86::EIP; // Should have dwarf #8. 1289} 1290 1291unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1292 return hasFP(MF) ? FramePtr : StackPtr; 1293} 1294 1295void 1296X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const { 1297 // Calculate amount of bytes used for return address storing 1298 int stackGrowth = (Is64Bit ? -8 : -4); 1299 1300 // Initial state of the frame pointer is esp+4. 1301 MachineLocation Dst(MachineLocation::VirtualFP); 1302 MachineLocation Src(StackPtr, stackGrowth); 1303 Moves.push_back(MachineMove(0, Dst, Src)); 1304 1305 // Add return address to move list 1306 MachineLocation CSDst(StackPtr, stackGrowth); 1307 MachineLocation CSSrc(getRARegister()); 1308 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1309} 1310 1311unsigned X86RegisterInfo::getEHExceptionRegister() const { 1312 llvm_unreachable("What is the exception register"); 1313 return 0; 1314} 1315 1316unsigned X86RegisterInfo::getEHHandlerRegister() const { 1317 llvm_unreachable("What is the exception handler register"); 1318 return 0; 1319} 1320 1321namespace llvm { 1322unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 1323 switch (VT.getSimpleVT().SimpleTy) { 1324 default: return Reg; 1325 case MVT::i8: 1326 if (High) { 1327 switch (Reg) { 1328 default: return 0; 1329 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1330 return X86::AH; 1331 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1332 return X86::DH; 1333 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1334 return X86::CH; 1335 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1336 return X86::BH; 1337 } 1338 } else { 1339 switch (Reg) { 1340 default: return 0; 1341 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1342 return X86::AL; 1343 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1344 return X86::DL; 1345 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1346 return X86::CL; 1347 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1348 return X86::BL; 1349 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1350 return X86::SIL; 1351 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1352 return X86::DIL; 1353 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1354 return X86::BPL; 1355 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1356 return X86::SPL; 1357 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1358 return X86::R8B; 1359 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1360 return X86::R9B; 1361 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1362 return X86::R10B; 1363 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1364 return X86::R11B; 1365 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1366 return X86::R12B; 1367 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1368 return X86::R13B; 1369 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1370 return X86::R14B; 1371 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1372 return X86::R15B; 1373 } 1374 } 1375 case MVT::i16: 1376 switch (Reg) { 1377 default: return Reg; 1378 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1379 return X86::AX; 1380 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1381 return X86::DX; 1382 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1383 return X86::CX; 1384 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1385 return X86::BX; 1386 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1387 return X86::SI; 1388 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1389 return X86::DI; 1390 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1391 return X86::BP; 1392 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1393 return X86::SP; 1394 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1395 return X86::R8W; 1396 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1397 return X86::R9W; 1398 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1399 return X86::R10W; 1400 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1401 return X86::R11W; 1402 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1403 return X86::R12W; 1404 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1405 return X86::R13W; 1406 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1407 return X86::R14W; 1408 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1409 return X86::R15W; 1410 } 1411 case MVT::i32: 1412 switch (Reg) { 1413 default: return Reg; 1414 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1415 return X86::EAX; 1416 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1417 return X86::EDX; 1418 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1419 return X86::ECX; 1420 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1421 return X86::EBX; 1422 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1423 return X86::ESI; 1424 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1425 return X86::EDI; 1426 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1427 return X86::EBP; 1428 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1429 return X86::ESP; 1430 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1431 return X86::R8D; 1432 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1433 return X86::R9D; 1434 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1435 return X86::R10D; 1436 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1437 return X86::R11D; 1438 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1439 return X86::R12D; 1440 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1441 return X86::R13D; 1442 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1443 return X86::R14D; 1444 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1445 return X86::R15D; 1446 } 1447 case MVT::i64: 1448 switch (Reg) { 1449 default: return Reg; 1450 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1451 return X86::RAX; 1452 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1453 return X86::RDX; 1454 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1455 return X86::RCX; 1456 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1457 return X86::RBX; 1458 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1459 return X86::RSI; 1460 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1461 return X86::RDI; 1462 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1463 return X86::RBP; 1464 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1465 return X86::RSP; 1466 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1467 return X86::R8; 1468 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1469 return X86::R9; 1470 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1471 return X86::R10; 1472 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1473 return X86::R11; 1474 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1475 return X86::R12; 1476 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1477 return X86::R13; 1478 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1479 return X86::R14; 1480 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1481 return X86::R15; 1482 } 1483 } 1484 1485 return Reg; 1486} 1487} 1488 1489#include "X86GenRegisterInfo.inc" 1490 1491namespace { 1492 struct MSAH : public MachineFunctionPass { 1493 static char ID; 1494 MSAH() : MachineFunctionPass(&ID) {} 1495 1496 virtual bool runOnMachineFunction(MachineFunction &MF) { 1497 const X86TargetMachine *TM = 1498 static_cast<const X86TargetMachine *>(&MF.getTarget()); 1499 const X86RegisterInfo *X86RI = TM->getRegisterInfo(); 1500 MachineRegisterInfo &RI = MF.getRegInfo(); 1501 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1502 unsigned StackAlignment = X86RI->getStackAlignment(); 1503 1504 // Be over-conservative: scan over all vreg defs and find whether vector 1505 // registers are used. If yes, there is a possibility that vector register 1506 // will be spilled and thus require dynamic stack realignment. 1507 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; 1508 RegNum < RI.getLastVirtReg(); ++RegNum) 1509 if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) { 1510 FuncInfo->setReserveFP(true); 1511 return true; 1512 } 1513 1514 // Nothing to do 1515 return false; 1516 } 1517 1518 virtual const char *getPassName() const { 1519 return "X86 Maximal Stack Alignment Check"; 1520 } 1521 1522 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1523 AU.setPreservesCFG(); 1524 MachineFunctionPass::getAnalysisUsage(AU); 1525 } 1526 }; 1527 1528 char MSAH::ID = 0; 1529} 1530 1531FunctionPass* 1532llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } 1533