X86RegisterInfo.cpp revision bbe2bbeac834dfe60fe98cfdeff9a9cc94858b7e
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42using namespace llvm; 43 44X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 45 const TargetInstrInfo &tii) 46 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 47 X86::ADJCALLSTACKDOWN64 : 48 X86::ADJCALLSTACKDOWN32, 49 tm.getSubtarget<X86Subtarget>().is64Bit() ? 50 X86::ADJCALLSTACKUP64 : 51 X86::ADJCALLSTACKUP32), 52 TM(tm), TII(tii) { 53 // Cache some information. 54 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 55 Is64Bit = Subtarget->is64Bit(); 56 IsWin64 = Subtarget->isTargetWin64(); 57 StackAlign = TM.getFrameInfo()->getStackAlignment(); 58 59 if (Is64Bit) { 60 SlotSize = 8; 61 StackPtr = X86::RSP; 62 FramePtr = X86::RBP; 63 } else { 64 SlotSize = 4; 65 StackPtr = X86::ESP; 66 FramePtr = X86::EBP; 67 } 68} 69 70/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 71/// specific numbering, used in debug info and exception tables. 72int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 73 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 74 unsigned Flavour = DWARFFlavour::X86_64; 75 76 if (!Subtarget->is64Bit()) { 77 if (Subtarget->isTargetDarwin()) { 78 if (isEH) 79 Flavour = DWARFFlavour::X86_32_DarwinEH; 80 else 81 Flavour = DWARFFlavour::X86_32_Generic; 82 } else if (Subtarget->isTargetCygMing()) { 83 // Unsupported by now, just quick fallback 84 Flavour = DWARFFlavour::X86_32_Generic; 85 } else { 86 Flavour = DWARFFlavour::X86_32_Generic; 87 } 88 } 89 90 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 91} 92 93/// getX86RegNum - This function maps LLVM register identifiers to their X86 94/// specific numbering, which is used in various places encoding instructions. 95unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 96 switch(RegNo) { 97 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 98 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 99 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 100 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 101 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 102 return N86::ESP; 103 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 104 return N86::EBP; 105 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 106 return N86::ESI; 107 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 108 return N86::EDI; 109 110 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 111 return N86::EAX; 112 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 113 return N86::ECX; 114 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 115 return N86::EDX; 116 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 117 return N86::EBX; 118 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 119 return N86::ESP; 120 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 121 return N86::EBP; 122 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 123 return N86::ESI; 124 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 125 return N86::EDI; 126 127 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 128 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 129 return RegNo-X86::ST0; 130 131 case X86::XMM0: case X86::XMM8: case X86::MM0: 132 return 0; 133 case X86::XMM1: case X86::XMM9: case X86::MM1: 134 return 1; 135 case X86::XMM2: case X86::XMM10: case X86::MM2: 136 return 2; 137 case X86::XMM3: case X86::XMM11: case X86::MM3: 138 return 3; 139 case X86::XMM4: case X86::XMM12: case X86::MM4: 140 return 4; 141 case X86::XMM5: case X86::XMM13: case X86::MM5: 142 return 5; 143 case X86::XMM6: case X86::XMM14: case X86::MM6: 144 return 6; 145 case X86::XMM7: case X86::XMM15: case X86::MM7: 146 return 7; 147 148 default: 149 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 150 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 151 return 0; 152 } 153} 154 155const TargetRegisterClass * 156X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 157 const TargetRegisterClass *B, 158 unsigned SubIdx) const { 159 switch (SubIdx) { 160 default: return 0; 161 case 1: 162 // 8-bit 163 if (B == &X86::GR8RegClass) { 164 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 165 return A; 166 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 167 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 168 A == &X86::GR64_NOREXRegClass || 169 A == &X86::GR64_NOSPRegClass || 170 A == &X86::GR64_NOREX_NOSPRegClass) 171 return &X86::GR64_ABCDRegClass; 172 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 173 A == &X86::GR32_NOREXRegClass || 174 A == &X86::GR32_NOSPRegClass) 175 return &X86::GR32_ABCDRegClass; 176 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 177 A == &X86::GR16_NOREXRegClass) 178 return &X86::GR16_ABCDRegClass; 179 } else if (B == &X86::GR8_NOREXRegClass) { 180 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 181 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 182 return &X86::GR64_NOREXRegClass; 183 else if (A == &X86::GR64_ABCDRegClass) 184 return &X86::GR64_ABCDRegClass; 185 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 186 A == &X86::GR32_NOSPRegClass) 187 return &X86::GR32_NOREXRegClass; 188 else if (A == &X86::GR32_ABCDRegClass) 189 return &X86::GR32_ABCDRegClass; 190 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 191 return &X86::GR16_NOREXRegClass; 192 else if (A == &X86::GR16_ABCDRegClass) 193 return &X86::GR16_ABCDRegClass; 194 } 195 break; 196 case 2: 197 // 8-bit hi 198 if (B == &X86::GR8_ABCD_HRegClass) { 199 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 200 A == &X86::GR64_NOREXRegClass || 201 A == &X86::GR64_NOSPRegClass || 202 A == &X86::GR64_NOREX_NOSPRegClass) 203 return &X86::GR64_ABCDRegClass; 204 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 205 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 206 return &X86::GR32_ABCDRegClass; 207 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 208 A == &X86::GR16_NOREXRegClass) 209 return &X86::GR16_ABCDRegClass; 210 } 211 break; 212 case 3: 213 // 16-bit 214 if (B == &X86::GR16RegClass) { 215 if (A->getSize() == 4 || A->getSize() == 8) 216 return A; 217 } else if (B == &X86::GR16_ABCDRegClass) { 218 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 219 A == &X86::GR64_NOREXRegClass || 220 A == &X86::GR64_NOSPRegClass || 221 A == &X86::GR64_NOREX_NOSPRegClass) 222 return &X86::GR64_ABCDRegClass; 223 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 224 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 225 return &X86::GR32_ABCDRegClass; 226 } else if (B == &X86::GR16_NOREXRegClass) { 227 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 228 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 229 return &X86::GR64_NOREXRegClass; 230 else if (A == &X86::GR64_ABCDRegClass) 231 return &X86::GR64_ABCDRegClass; 232 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 233 A == &X86::GR32_NOSPRegClass) 234 return &X86::GR32_NOREXRegClass; 235 else if (A == &X86::GR32_ABCDRegClass) 236 return &X86::GR64_ABCDRegClass; 237 } 238 break; 239 case 4: 240 // 32-bit 241 if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) { 242 if (A->getSize() == 8) 243 return A; 244 } else if (B == &X86::GR32_ABCDRegClass) { 245 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 246 A == &X86::GR64_NOREXRegClass || 247 A == &X86::GR64_NOSPRegClass || 248 A == &X86::GR64_NOREX_NOSPRegClass) 249 return &X86::GR64_ABCDRegClass; 250 } else if (B == &X86::GR32_NOREXRegClass) { 251 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 252 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 253 return &X86::GR64_NOREXRegClass; 254 else if (A == &X86::GR64_ABCDRegClass) 255 return &X86::GR64_ABCDRegClass; 256 } 257 break; 258 } 259 return 0; 260} 261 262const TargetRegisterClass * 263X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 264 switch (Kind) { 265 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 266 case 0: // Normal GPRs. 267 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 268 return &X86::GR64RegClass; 269 return &X86::GR32RegClass; 270 case 1: // Normal GRPs except the stack pointer (for encoding reasons). 271 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 272 return &X86::GR64_NOSPRegClass; 273 return &X86::GR32_NOSPRegClass; 274 } 275} 276 277const TargetRegisterClass * 278X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 279 if (RC == &X86::CCRRegClass) { 280 if (Is64Bit) 281 return &X86::GR64RegClass; 282 else 283 return &X86::GR32RegClass; 284 } 285 return NULL; 286} 287 288const unsigned * 289X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 290 bool callsEHReturn = false; 291 292 if (MF) { 293 const MachineFrameInfo *MFI = MF->getFrameInfo(); 294 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 295 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 296 } 297 298 static const unsigned CalleeSavedRegs32Bit[] = { 299 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 300 }; 301 302 static const unsigned CalleeSavedRegs32EHRet[] = { 303 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 304 }; 305 306 static const unsigned CalleeSavedRegs64Bit[] = { 307 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 308 }; 309 310 static const unsigned CalleeSavedRegs64EHRet[] = { 311 X86::RAX, X86::RDX, X86::RBX, X86::R12, 312 X86::R13, X86::R14, X86::R15, X86::RBP, 0 313 }; 314 315 static const unsigned CalleeSavedRegsWin64[] = { 316 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 317 X86::R12, X86::R13, X86::R14, X86::R15, 318 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 319 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 320 X86::XMM14, X86::XMM15, 0 321 }; 322 323 if (Is64Bit) { 324 if (IsWin64) 325 return CalleeSavedRegsWin64; 326 else 327 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 328 } else { 329 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 330 } 331} 332 333const TargetRegisterClass* const* 334X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 335 bool callsEHReturn = false; 336 337 if (MF) { 338 const MachineFrameInfo *MFI = MF->getFrameInfo(); 339 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 340 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 341 } 342 343 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 344 &X86::GR32RegClass, &X86::GR32RegClass, 345 &X86::GR32RegClass, &X86::GR32RegClass, 0 346 }; 347 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 348 &X86::GR32RegClass, &X86::GR32RegClass, 349 &X86::GR32RegClass, &X86::GR32RegClass, 350 &X86::GR32RegClass, &X86::GR32RegClass, 0 351 }; 352 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 353 &X86::GR64RegClass, &X86::GR64RegClass, 354 &X86::GR64RegClass, &X86::GR64RegClass, 355 &X86::GR64RegClass, &X86::GR64RegClass, 0 356 }; 357 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = { 358 &X86::GR64RegClass, &X86::GR64RegClass, 359 &X86::GR64RegClass, &X86::GR64RegClass, 360 &X86::GR64RegClass, &X86::GR64RegClass, 361 &X86::GR64RegClass, &X86::GR64RegClass, 0 362 }; 363 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { 364 &X86::GR64RegClass, &X86::GR64RegClass, 365 &X86::GR64RegClass, &X86::GR64RegClass, 366 &X86::GR64RegClass, &X86::GR64RegClass, 367 &X86::GR64RegClass, &X86::GR64RegClass, 368 &X86::VR128RegClass, &X86::VR128RegClass, 369 &X86::VR128RegClass, &X86::VR128RegClass, 370 &X86::VR128RegClass, &X86::VR128RegClass, 371 &X86::VR128RegClass, &X86::VR128RegClass, 372 &X86::VR128RegClass, &X86::VR128RegClass, 0 373 }; 374 375 if (Is64Bit) { 376 if (IsWin64) 377 return CalleeSavedRegClassesWin64; 378 else 379 return (callsEHReturn ? 380 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit); 381 } else { 382 return (callsEHReturn ? 383 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit); 384 } 385} 386 387BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 388 BitVector Reserved(getNumRegs()); 389 // Set the stack-pointer register and its aliases as reserved. 390 Reserved.set(X86::RSP); 391 Reserved.set(X86::ESP); 392 Reserved.set(X86::SP); 393 Reserved.set(X86::SPL); 394 395 // Set the instruction pointer register and its aliases as reserved. 396 Reserved.set(X86::RIP); 397 Reserved.set(X86::EIP); 398 Reserved.set(X86::IP); 399 400 // Set the frame-pointer register and its aliases as reserved if needed. 401 if (hasFP(MF)) { 402 Reserved.set(X86::RBP); 403 Reserved.set(X86::EBP); 404 Reserved.set(X86::BP); 405 Reserved.set(X86::BPL); 406 } 407 408 // Mark the x87 stack registers as reserved, since they don't behave normally 409 // with respect to liveness. We don't fully model the effects of x87 stack 410 // pushes and pops after stackification. 411 Reserved.set(X86::ST0); 412 Reserved.set(X86::ST1); 413 Reserved.set(X86::ST2); 414 Reserved.set(X86::ST3); 415 Reserved.set(X86::ST4); 416 Reserved.set(X86::ST5); 417 Reserved.set(X86::ST6); 418 Reserved.set(X86::ST7); 419 return Reserved; 420} 421 422//===----------------------------------------------------------------------===// 423// Stack Frame Processing methods 424//===----------------------------------------------------------------------===// 425 426/// hasFP - Return true if the specified function should have a dedicated frame 427/// pointer register. This is true if the function has variable sized allocas 428/// or if frame pointer elimination is disabled. 429bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 430 const MachineFrameInfo *MFI = MF.getFrameInfo(); 431 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 432 433 return (NoFramePointerElim || 434 needsStackRealignment(MF) || 435 MFI->hasVarSizedObjects() || 436 MFI->isFrameAddressTaken() || 437 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 438 (MMI && MMI->callsUnwindInit())); 439} 440 441bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 442 const MachineFrameInfo *MFI = MF.getFrameInfo(); 443 return (RealignStack && 444 !MFI->hasVarSizedObjects()); 445} 446 447bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 448 const MachineFrameInfo *MFI = MF.getFrameInfo(); 449 bool requiresRealignment = 450 RealignStack && (MFI->getMaxAlignment() > StackAlign); 451 452 // FIXME: Currently we don't support stack realignment for functions with 453 // variable-sized allocas. 454 // FIXME: Temporary disable the error - it seems to be too conservative. 455 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 456 llvm_report_error( 457 "Stack realignment in presense of dynamic allocas is not supported"); 458 459 return (requiresRealignment && !MFI->hasVarSizedObjects()); 460} 461 462bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 463 return !MF.getFrameInfo()->hasVarSizedObjects(); 464} 465 466bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, 467 int &FrameIdx) const { 468 if (Reg == FramePtr && hasFP(MF)) { 469 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 470 return true; 471 } 472 return false; 473} 474 475int 476X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { 477 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 478 MachineFrameInfo *MFI = MF.getFrameInfo(); 479 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea(); 480 uint64_t StackSize = MFI->getStackSize(); 481 482 if (needsStackRealignment(MF)) { 483 if (FI < 0) { 484 // Skip the saved EBP. 485 Offset += SlotSize; 486 } else { 487 unsigned Align = MFI->getObjectAlignment(FI); 488 assert( (-(Offset + StackSize)) % Align == 0); 489 Align = 0; 490 return Offset + StackSize; 491 } 492 // FIXME: Support tail calls 493 } else { 494 if (!hasFP(MF)) 495 return Offset + StackSize; 496 497 // Skip the saved EBP. 498 Offset += SlotSize; 499 500 // Skip the RETADDR move area 501 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 502 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 503 if (TailCallReturnAddrDelta < 0) 504 Offset -= TailCallReturnAddrDelta; 505 } 506 507 return Offset; 508} 509 510void X86RegisterInfo:: 511eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 512 MachineBasicBlock::iterator I) const { 513 if (!hasReservedCallFrame(MF)) { 514 // If the stack pointer can be changed after prologue, turn the 515 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 516 // adjcallstackdown instruction into 'add ESP, <amt>' 517 // TODO: consider using push / pop instead of sub + store / add 518 MachineInstr *Old = I; 519 uint64_t Amount = Old->getOperand(0).getImm(); 520 if (Amount != 0) { 521 // We need to keep the stack aligned properly. To do this, we round the 522 // amount of space needed for the outgoing arguments up to the next 523 // alignment boundary. 524 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 525 526 MachineInstr *New = 0; 527 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 528 New = BuildMI(MF, Old->getDebugLoc(), 529 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), 530 StackPtr) 531 .addReg(StackPtr) 532 .addImm(Amount); 533 } else { 534 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 535 536 // Factor out the amount the callee already popped. 537 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 538 Amount -= CalleeAmt; 539 540 if (Amount) { 541 unsigned Opc = (Amount < 128) ? 542 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 543 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 544 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 545 .addReg(StackPtr) 546 .addImm(Amount); 547 } 548 } 549 550 if (New) { 551 // The EFLAGS implicit def is dead. 552 New->getOperand(3).setIsDead(); 553 554 // Replace the pseudo instruction with a new instruction. 555 MBB.insert(I, New); 556 } 557 } 558 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 559 // If we are performing frame pointer elimination and if the callee pops 560 // something off the stack pointer, add it back. We do this until we have 561 // more advanced stack pointer tracking ability. 562 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 563 unsigned Opc = (CalleeAmt < 128) ? 564 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 565 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 566 MachineInstr *Old = I; 567 MachineInstr *New = 568 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 569 StackPtr) 570 .addReg(StackPtr) 571 .addImm(CalleeAmt); 572 573 // The EFLAGS implicit def is dead. 574 New->getOperand(3).setIsDead(); 575 MBB.insert(I, New); 576 } 577 } 578 579 MBB.erase(I); 580} 581 582unsigned 583X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 584 int SPAdj, int *Value, 585 RegScavenger *RS) const{ 586 assert(SPAdj == 0 && "Unexpected"); 587 588 unsigned i = 0; 589 MachineInstr &MI = *II; 590 MachineFunction &MF = *MI.getParent()->getParent(); 591 592 while (!MI.getOperand(i).isFI()) { 593 ++i; 594 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 595 } 596 597 int FrameIndex = MI.getOperand(i).getIndex(); 598 unsigned BasePtr; 599 600 if (needsStackRealignment(MF)) 601 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 602 else 603 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 604 605 // This must be part of a four operand memory reference. Replace the 606 // FrameIndex with base register with EBP. Add an offset to the offset. 607 MI.getOperand(i).ChangeToRegister(BasePtr, false); 608 609 // Now add the frame object offset to the offset from EBP. 610 if (MI.getOperand(i+3).isImm()) { 611 // Offset is a 32-bit integer. 612 int Offset = getFrameIndexOffset(MF, FrameIndex) + 613 (int)(MI.getOperand(i + 3).getImm()); 614 615 MI.getOperand(i + 3).ChangeToImmediate(Offset); 616 } else { 617 // Offset is symbolic. This is extremely rare. 618 uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) + 619 (uint64_t)MI.getOperand(i+3).getOffset(); 620 MI.getOperand(i+3).setOffset(Offset); 621 } 622 return 0; 623} 624 625void 626X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 627 RegScavenger *RS) const { 628 MachineFrameInfo *MFI = MF.getFrameInfo(); 629 630 // Calculate and set max stack object alignment early, so we can decide 631 // whether we will need stack realignment (and thus FP). 632 MFI->calculateMaxStackAlignment(); 633 634 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 635 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 636 637 if (TailCallReturnAddrDelta < 0) { 638 // create RETURNADDR area 639 // arg 640 // arg 641 // RETADDR 642 // { ... 643 // RETADDR area 644 // ... 645 // } 646 // [EBP] 647 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 648 (-1U*SlotSize)+TailCallReturnAddrDelta, 649 true, false); 650 } 651 652 if (hasFP(MF)) { 653 assert((TailCallReturnAddrDelta <= 0) && 654 "The Delta should always be zero or negative"); 655 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 656 657 // Create a frame entry for the EBP register that must be saved. 658 int FrameIdx = MFI->CreateFixedObject(SlotSize, 659 -(int)SlotSize + 660 TFI.getOffsetOfLocalArea() + 661 TailCallReturnAddrDelta, 662 true, false); 663 assert(FrameIdx == MFI->getObjectIndexBegin() && 664 "Slot for EBP register must be last in order to be found!"); 665 FrameIdx = 0; 666 } 667} 668 669/// findDebugLoc - find the next valid DebugLoc starting at MBBI, skipping 670/// any DEBUG_VALUE instructions. Return UnknownLoc if there is none. 671static 672DebugLoc findDebugLoc(MachineBasicBlock::iterator &MBBI, MachineBasicBlock &MBB) { 673 DebugLoc DL; 674 if (MBBI != MBB.end()) { 675 // Skip debug declarations, we don't want a DebugLoc from them. 676 MachineBasicBlock::iterator MBBI2 = MBBI; 677 while (MBBI2 != MBB.end() && 678 MBBI2->getOpcode()==TargetInstrInfo::DEBUG_VALUE) 679 MBBI2++; 680 if (MBBI2 != MBB.end()) 681 DL = MBBI2->getDebugLoc(); 682 } 683 return DL; 684} 685 686/// emitSPUpdate - Emit a series of instructions to increment / decrement the 687/// stack pointer by a constant value. 688static 689void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 690 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 691 const TargetInstrInfo &TII) { 692 bool isSub = NumBytes < 0; 693 uint64_t Offset = isSub ? -NumBytes : NumBytes; 694 unsigned Opc = isSub 695 ? ((Offset < 128) ? 696 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 697 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 698 : ((Offset < 128) ? 699 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 700 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 701 uint64_t Chunk = (1LL << 31) - 1; 702 DebugLoc DL = findDebugLoc(MBBI, MBB); 703 704 while (Offset) { 705 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 706 MachineInstr *MI = 707 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 708 .addReg(StackPtr) 709 .addImm(ThisVal); 710 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 711 Offset -= ThisVal; 712 } 713} 714 715/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 716static 717void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 718 unsigned StackPtr, uint64_t *NumBytes = NULL) { 719 if (MBBI == MBB.begin()) return; 720 721 MachineBasicBlock::iterator PI = prior(MBBI); 722 unsigned Opc = PI->getOpcode(); 723 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 724 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 725 PI->getOperand(0).getReg() == StackPtr) { 726 if (NumBytes) 727 *NumBytes += PI->getOperand(2).getImm(); 728 MBB.erase(PI); 729 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 730 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 731 PI->getOperand(0).getReg() == StackPtr) { 732 if (NumBytes) 733 *NumBytes -= PI->getOperand(2).getImm(); 734 MBB.erase(PI); 735 } 736} 737 738/// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 739static 740void mergeSPUpdatesDown(MachineBasicBlock &MBB, 741 MachineBasicBlock::iterator &MBBI, 742 unsigned StackPtr, uint64_t *NumBytes = NULL) { 743 // FIXME: THIS ISN'T RUN!!! 744 return; 745 746 if (MBBI == MBB.end()) return; 747 748 MachineBasicBlock::iterator NI = llvm::next(MBBI); 749 if (NI == MBB.end()) return; 750 751 unsigned Opc = NI->getOpcode(); 752 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 753 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 754 NI->getOperand(0).getReg() == StackPtr) { 755 if (NumBytes) 756 *NumBytes -= NI->getOperand(2).getImm(); 757 MBB.erase(NI); 758 MBBI = NI; 759 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 760 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 761 NI->getOperand(0).getReg() == StackPtr) { 762 if (NumBytes) 763 *NumBytes += NI->getOperand(2).getImm(); 764 MBB.erase(NI); 765 MBBI = NI; 766 } 767} 768 769/// mergeSPUpdates - Checks the instruction before/after the passed 770/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 771/// stack adjustment is returned as a positive value for ADD and a negative for 772/// SUB. 773static int mergeSPUpdates(MachineBasicBlock &MBB, 774 MachineBasicBlock::iterator &MBBI, 775 unsigned StackPtr, 776 bool doMergeWithPrevious) { 777 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 778 (!doMergeWithPrevious && MBBI == MBB.end())) 779 return 0; 780 781 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 782 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 783 unsigned Opc = PI->getOpcode(); 784 int Offset = 0; 785 786 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 787 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 788 PI->getOperand(0).getReg() == StackPtr){ 789 Offset += PI->getOperand(2).getImm(); 790 MBB.erase(PI); 791 if (!doMergeWithPrevious) MBBI = NI; 792 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 793 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 794 PI->getOperand(0).getReg() == StackPtr) { 795 Offset -= PI->getOperand(2).getImm(); 796 MBB.erase(PI); 797 if (!doMergeWithPrevious) MBBI = NI; 798 } 799 800 return Offset; 801} 802 803void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, 804 unsigned LabelId, 805 unsigned FramePtr) const { 806 MachineFrameInfo *MFI = MF.getFrameInfo(); 807 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 808 if (!MMI) return; 809 810 // Add callee saved registers to move list. 811 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 812 if (CSI.empty()) return; 813 814 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 815 const TargetData *TD = MF.getTarget().getTargetData(); 816 bool HasFP = hasFP(MF); 817 818 // Calculate amount of bytes used for return address storing. 819 int stackGrowth = 820 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 821 TargetFrameInfo::StackGrowsUp ? 822 TD->getPointerSize() : -TD->getPointerSize()); 823 824 // FIXME: This is dirty hack. The code itself is pretty mess right now. 825 // It should be rewritten from scratch and generalized sometimes. 826 827 // Determine maximum offset (minumum due to stack growth). 828 int64_t MaxOffset = 0; 829 for (std::vector<CalleeSavedInfo>::const_iterator 830 I = CSI.begin(), E = CSI.end(); I != E; ++I) 831 MaxOffset = std::min(MaxOffset, 832 MFI->getObjectOffset(I->getFrameIdx())); 833 834 // Calculate offsets. 835 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 836 for (std::vector<CalleeSavedInfo>::const_iterator 837 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 838 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 839 unsigned Reg = I->getReg(); 840 Offset = MaxOffset - Offset + saveAreaOffset; 841 842 // Don't output a new machine move if we're re-saving the frame 843 // pointer. This happens when the PrologEpilogInserter has inserted an extra 844 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 845 // generates one when frame pointers are used. If we generate a "machine 846 // move" for this extra "PUSH", the linker will lose track of the fact that 847 // the frame pointer should have the value of the first "PUSH" when it's 848 // trying to unwind. 849 // 850 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 851 // another bug. I.e., one where we generate a prolog like this: 852 // 853 // pushl %ebp 854 // movl %esp, %ebp 855 // pushl %ebp 856 // pushl %esi 857 // ... 858 // 859 // The immediate re-push of EBP is unnecessary. At the least, it's an 860 // optimization bug. EBP can be used as a scratch register in certain 861 // cases, but probably not when we have a frame pointer. 862 if (HasFP && FramePtr == Reg) 863 continue; 864 865 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 866 MachineLocation CSSrc(Reg); 867 Moves.push_back(MachineMove(LabelId, CSDst, CSSrc)); 868 } 869} 870 871/// emitPrologue - Push callee-saved registers onto the stack, which 872/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 873/// space for local variables. Also emit labels used by the exception handler to 874/// generate the exception handling frames. 875void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 876 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 877 MachineBasicBlock::iterator MBBI = MBB.begin(); 878 MachineFrameInfo *MFI = MF.getFrameInfo(); 879 const Function *Fn = MF.getFunction(); 880 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 881 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 882 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 883 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) || 884 !Fn->doesNotThrow() || UnwindTablesMandatory; 885 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 886 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 887 bool HasFP = hasFP(MF); 888 DebugLoc DL; 889 890 // Add RETADDR move area to callee saved frame size. 891 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 892 if (TailCallReturnAddrDelta < 0) 893 X86FI->setCalleeSavedFrameSize( 894 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 895 896 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 897 // function, and use up to 128 bytes of stack space, don't have a frame 898 // pointer, calls, or dynamic alloca then we do not need to adjust the 899 // stack pointer (we fit in the Red Zone). 900 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 901 !needsStackRealignment(MF) && 902 !MFI->hasVarSizedObjects() && // No dynamic alloca. 903 !MFI->hasCalls() && // No calls. 904 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone 905 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 906 if (HasFP) MinSize += SlotSize; 907 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 908 MFI->setStackSize(StackSize); 909 } else if (Subtarget->isTargetWin64()) { 910 // We need to always allocate 32 bytes as register spill area. 911 // FIXME: We might reuse these 32 bytes for leaf functions. 912 StackSize += 32; 913 MFI->setStackSize(StackSize); 914 } 915 916 // Insert stack pointer adjustment for later moving of return addr. Only 917 // applies to tail call optimized functions where the callee argument stack 918 // size is bigger than the callers. 919 if (TailCallReturnAddrDelta < 0) { 920 MachineInstr *MI = 921 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 922 StackPtr) 923 .addReg(StackPtr) 924 .addImm(-TailCallReturnAddrDelta); 925 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 926 } 927 928 // Mapping for machine moves: 929 // 930 // DST: VirtualFP AND 931 // SRC: VirtualFP => DW_CFA_def_cfa_offset 932 // ELSE => DW_CFA_def_cfa 933 // 934 // SRC: VirtualFP AND 935 // DST: Register => DW_CFA_def_cfa_register 936 // 937 // ELSE 938 // OFFSET < 0 => DW_CFA_offset_extended_sf 939 // REG < 64 => DW_CFA_offset + Reg 940 // ELSE => DW_CFA_offset_extended 941 942 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 943 const TargetData *TD = MF.getTarget().getTargetData(); 944 uint64_t NumBytes = 0; 945 int stackGrowth = 946 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 947 TargetFrameInfo::StackGrowsUp ? 948 TD->getPointerSize() : -TD->getPointerSize()); 949 950 if (HasFP) { 951 // Calculate required stack adjustment. 952 uint64_t FrameSize = StackSize - SlotSize; 953 if (needsStackRealignment(MF)) 954 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 955 956 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 957 958 // Get the offset of the stack slot for the EBP register, which is 959 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 960 // Update the frame offset adjustment. 961 MFI->setOffsetAdjustment(-NumBytes); 962 963 // Save EBP/RBP into the appropriate stack slot. 964 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 965 .addReg(FramePtr, RegState::Kill); 966 967 if (needsFrameMoves) { 968 // Mark the place where EBP/RBP was saved. 969 unsigned FrameLabelId = MMI->NextLabelID(); 970 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 971 972 // Define the current CFA rule to use the provided offset. 973 if (StackSize) { 974 MachineLocation SPDst(MachineLocation::VirtualFP); 975 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 976 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 977 } else { 978 // FIXME: Verify & implement for FP 979 MachineLocation SPDst(StackPtr); 980 MachineLocation SPSrc(StackPtr, stackGrowth); 981 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 982 } 983 984 // Change the rule for the FramePtr to be an "offset" rule. 985 MachineLocation FPDst(MachineLocation::VirtualFP, 986 2 * stackGrowth); 987 MachineLocation FPSrc(FramePtr); 988 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 989 } 990 991 // Update EBP with the new base value... 992 BuildMI(MBB, MBBI, DL, 993 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 994 .addReg(StackPtr); 995 996 if (needsFrameMoves) { 997 // Mark effective beginning of when frame pointer becomes valid. 998 unsigned FrameLabelId = MMI->NextLabelID(); 999 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 1000 1001 // Define the current CFA to use the EBP/RBP register. 1002 MachineLocation FPDst(FramePtr); 1003 MachineLocation FPSrc(MachineLocation::VirtualFP); 1004 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 1005 } 1006 1007 // Mark the FramePtr as live-in in every block except the entry. 1008 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 1009 I != E; ++I) 1010 I->addLiveIn(FramePtr); 1011 1012 // Realign stack 1013 if (needsStackRealignment(MF)) { 1014 MachineInstr *MI = 1015 BuildMI(MBB, MBBI, DL, 1016 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 1017 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 1018 1019 // The EFLAGS implicit def is dead. 1020 MI->getOperand(3).setIsDead(); 1021 } 1022 } else { 1023 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 1024 } 1025 1026 // Skip the callee-saved push instructions. 1027 bool PushedRegs = false; 1028 int StackOffset = 2 * stackGrowth; 1029 1030 while (MBBI != MBB.end() && 1031 (MBBI->getOpcode() == X86::PUSH32r || 1032 MBBI->getOpcode() == X86::PUSH64r)) { 1033 PushedRegs = true; 1034 ++MBBI; 1035 1036 if (!HasFP && needsFrameMoves) { 1037 // Mark callee-saved push instruction. 1038 unsigned LabelId = MMI->NextLabelID(); 1039 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); 1040 1041 // Define the current CFA rule to use the provided offset. 1042 unsigned Ptr = StackSize ? 1043 MachineLocation::VirtualFP : StackPtr; 1044 MachineLocation SPDst(Ptr); 1045 MachineLocation SPSrc(Ptr, StackOffset); 1046 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1047 StackOffset += stackGrowth; 1048 } 1049 } 1050 1051 DL = findDebugLoc(MBBI, MBB); 1052 1053 // Adjust stack pointer: ESP -= numbytes. 1054 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 1055 // Check, whether EAX is livein for this function. 1056 bool isEAXAlive = false; 1057 for (MachineRegisterInfo::livein_iterator 1058 II = MF.getRegInfo().livein_begin(), 1059 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 1060 unsigned Reg = II->first; 1061 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1062 Reg == X86::AH || Reg == X86::AL); 1063 } 1064 1065 // Function prologue calls _alloca to probe the stack when allocating more 1066 // than 4k bytes in one go. Touching the stack at 4K increments is necessary 1067 // to ensure that the guard pages used by the OS virtual memory manager are 1068 // allocated in correct sequence. 1069 if (!isEAXAlive) { 1070 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1071 .addImm(NumBytes); 1072 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1073 .addExternalSymbol("_alloca"); 1074 } else { 1075 // Save EAX 1076 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1077 .addReg(X86::EAX, RegState::Kill); 1078 1079 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1080 // allocated bytes for EAX. 1081 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1082 .addImm(NumBytes - 4); 1083 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1084 .addExternalSymbol("_alloca"); 1085 1086 // Restore EAX 1087 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 1088 X86::EAX), 1089 StackPtr, false, NumBytes - 4); 1090 MBB.insert(MBBI, MI); 1091 } 1092 } else if (NumBytes) { 1093 // If there is an SUB32ri of ESP immediately before this instruction, merge 1094 // the two. This can be the case when tail call elimination is enabled and 1095 // the callee has more arguments then the caller. 1096 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 1097 1098 // If there is an ADD32ri or SUB32ri of ESP immediately after this 1099 // instruction, merge the two instructions. 1100 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 1101 1102 if (NumBytes) 1103 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1104 } 1105 1106 if ((NumBytes || PushedRegs) && needsFrameMoves) { 1107 // Mark end of stack pointer adjustment. 1108 unsigned LabelId = MMI->NextLabelID(); 1109 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); 1110 1111 if (!HasFP && NumBytes) { 1112 // Define the current CFA rule to use the provided offset. 1113 if (StackSize) { 1114 MachineLocation SPDst(MachineLocation::VirtualFP); 1115 MachineLocation SPSrc(MachineLocation::VirtualFP, 1116 -StackSize + stackGrowth); 1117 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1118 } else { 1119 // FIXME: Verify & implement for FP 1120 MachineLocation SPDst(StackPtr); 1121 MachineLocation SPSrc(StackPtr, stackGrowth); 1122 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1123 } 1124 } 1125 1126 // Emit DWARF info specifying the offsets of the callee-saved registers. 1127 if (PushedRegs) 1128 emitCalleeSavedFrameMoves(MF, LabelId, HasFP ? FramePtr : StackPtr); 1129 } 1130} 1131 1132void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1133 MachineBasicBlock &MBB) const { 1134 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1135 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1136 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1137 unsigned RetOpcode = MBBI->getOpcode(); 1138 DebugLoc DL = MBBI->getDebugLoc(); 1139 1140 switch (RetOpcode) { 1141 default: 1142 llvm_unreachable("Can only insert epilog into returning blocks"); 1143 case X86::RET: 1144 case X86::RETI: 1145 case X86::TCRETURNdi: 1146 case X86::TCRETURNri: 1147 case X86::TCRETURNri64: 1148 case X86::TCRETURNdi64: 1149 case X86::EH_RETURN: 1150 case X86::EH_RETURN64: 1151 case X86::TAILJMPd: 1152 case X86::TAILJMPr: 1153 case X86::TAILJMPm: 1154 break; // These are ok 1155 } 1156 1157 // Get the number of bytes to allocate from the FrameInfo. 1158 uint64_t StackSize = MFI->getStackSize(); 1159 uint64_t MaxAlign = MFI->getMaxAlignment(); 1160 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1161 uint64_t NumBytes = 0; 1162 1163 if (hasFP(MF)) { 1164 // Calculate required stack adjustment. 1165 uint64_t FrameSize = StackSize - SlotSize; 1166 if (needsStackRealignment(MF)) 1167 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 1168 1169 NumBytes = FrameSize - CSSize; 1170 1171 // Pop EBP. 1172 BuildMI(MBB, MBBI, DL, 1173 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1174 } else { 1175 NumBytes = StackSize - CSSize; 1176 } 1177 1178 // Skip the callee-saved pop instructions. 1179 MachineBasicBlock::iterator LastCSPop = MBBI; 1180 while (MBBI != MBB.begin()) { 1181 MachineBasicBlock::iterator PI = prior(MBBI); 1182 unsigned Opc = PI->getOpcode(); 1183 1184 if (Opc != X86::POP32r && Opc != X86::POP64r && 1185 !PI->getDesc().isTerminator()) 1186 break; 1187 1188 --MBBI; 1189 } 1190 1191 DL = MBBI->getDebugLoc(); 1192 1193 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1194 // instruction, merge the two instructions. 1195 if (NumBytes || MFI->hasVarSizedObjects()) 1196 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1197 1198 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1199 // slot before popping them off! Same applies for the case, when stack was 1200 // realigned. 1201 if (needsStackRealignment(MF)) { 1202 // We cannot use LEA here, because stack pointer was realigned. We need to 1203 // deallocate local frame back. 1204 if (CSSize) { 1205 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1206 MBBI = prior(LastCSPop); 1207 } 1208 1209 BuildMI(MBB, MBBI, DL, 1210 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1211 StackPtr).addReg(FramePtr); 1212 } else if (MFI->hasVarSizedObjects()) { 1213 if (CSSize) { 1214 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1215 MachineInstr *MI = 1216 addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1217 FramePtr, false, -CSSize); 1218 MBB.insert(MBBI, MI); 1219 } else { 1220 BuildMI(MBB, MBBI, DL, 1221 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1222 .addReg(FramePtr); 1223 } 1224 } else if (NumBytes) { 1225 // Adjust stack pointer back: ESP += numbytes. 1226 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1227 } 1228 1229 // We're returning from function via eh_return. 1230 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1231 MBBI = prior(MBB.end()); 1232 MachineOperand &DestAddr = MBBI->getOperand(0); 1233 assert(DestAddr.isReg() && "Offset should be in register!"); 1234 BuildMI(MBB, MBBI, DL, 1235 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1236 StackPtr).addReg(DestAddr.getReg()); 1237 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1238 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) { 1239 // Tail call return: adjust the stack pointer and jump to callee. 1240 MBBI = prior(MBB.end()); 1241 MachineOperand &JumpTarget = MBBI->getOperand(0); 1242 MachineOperand &StackAdjust = MBBI->getOperand(1); 1243 assert(StackAdjust.isImm() && "Expecting immediate value."); 1244 1245 // Adjust stack pointer. 1246 int StackAdj = StackAdjust.getImm(); 1247 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1248 int Offset = 0; 1249 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1250 1251 // Incoporate the retaddr area. 1252 Offset = StackAdj-MaxTCDelta; 1253 assert(Offset >= 0 && "Offset should never be negative"); 1254 1255 if (Offset) { 1256 // Check for possible merge with preceeding ADD instruction. 1257 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1258 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 1259 } 1260 1261 // Jump to label or value in register. 1262 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) 1263 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)). 1264 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 1265 else if (RetOpcode== X86::TCRETURNri64) 1266 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 1267 else 1268 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 1269 1270 // Delete the pseudo instruction TCRETURN. 1271 MBB.erase(MBBI); 1272 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1273 (X86FI->getTCReturnAddrDelta() < 0)) { 1274 // Add the return addr area delta back since we are not tail calling. 1275 int delta = -1*X86FI->getTCReturnAddrDelta(); 1276 MBBI = prior(MBB.end()); 1277 1278 // Check for possible merge with preceeding ADD instruction. 1279 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1280 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 1281 } 1282} 1283 1284unsigned X86RegisterInfo::getRARegister() const { 1285 return Is64Bit ? X86::RIP // Should have dwarf #16. 1286 : X86::EIP; // Should have dwarf #8. 1287} 1288 1289unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1290 return hasFP(MF) ? FramePtr : StackPtr; 1291} 1292 1293void 1294X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const { 1295 // Calculate amount of bytes used for return address storing 1296 int stackGrowth = (Is64Bit ? -8 : -4); 1297 1298 // Initial state of the frame pointer is esp+4. 1299 MachineLocation Dst(MachineLocation::VirtualFP); 1300 MachineLocation Src(StackPtr, stackGrowth); 1301 Moves.push_back(MachineMove(0, Dst, Src)); 1302 1303 // Add return address to move list 1304 MachineLocation CSDst(StackPtr, stackGrowth); 1305 MachineLocation CSSrc(getRARegister()); 1306 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1307} 1308 1309unsigned X86RegisterInfo::getEHExceptionRegister() const { 1310 llvm_unreachable("What is the exception register"); 1311 return 0; 1312} 1313 1314unsigned X86RegisterInfo::getEHHandlerRegister() const { 1315 llvm_unreachable("What is the exception handler register"); 1316 return 0; 1317} 1318 1319namespace llvm { 1320unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 1321 switch (VT.getSimpleVT().SimpleTy) { 1322 default: return Reg; 1323 case MVT::i8: 1324 if (High) { 1325 switch (Reg) { 1326 default: return 0; 1327 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1328 return X86::AH; 1329 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1330 return X86::DH; 1331 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1332 return X86::CH; 1333 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1334 return X86::BH; 1335 } 1336 } else { 1337 switch (Reg) { 1338 default: return 0; 1339 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1340 return X86::AL; 1341 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1342 return X86::DL; 1343 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1344 return X86::CL; 1345 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1346 return X86::BL; 1347 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1348 return X86::SIL; 1349 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1350 return X86::DIL; 1351 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1352 return X86::BPL; 1353 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1354 return X86::SPL; 1355 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1356 return X86::R8B; 1357 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1358 return X86::R9B; 1359 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1360 return X86::R10B; 1361 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1362 return X86::R11B; 1363 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1364 return X86::R12B; 1365 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1366 return X86::R13B; 1367 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1368 return X86::R14B; 1369 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1370 return X86::R15B; 1371 } 1372 } 1373 case MVT::i16: 1374 switch (Reg) { 1375 default: return Reg; 1376 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1377 return X86::AX; 1378 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1379 return X86::DX; 1380 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1381 return X86::CX; 1382 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1383 return X86::BX; 1384 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1385 return X86::SI; 1386 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1387 return X86::DI; 1388 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1389 return X86::BP; 1390 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1391 return X86::SP; 1392 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1393 return X86::R8W; 1394 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1395 return X86::R9W; 1396 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1397 return X86::R10W; 1398 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1399 return X86::R11W; 1400 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1401 return X86::R12W; 1402 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1403 return X86::R13W; 1404 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1405 return X86::R14W; 1406 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1407 return X86::R15W; 1408 } 1409 case MVT::i32: 1410 switch (Reg) { 1411 default: return Reg; 1412 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1413 return X86::EAX; 1414 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1415 return X86::EDX; 1416 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1417 return X86::ECX; 1418 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1419 return X86::EBX; 1420 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1421 return X86::ESI; 1422 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1423 return X86::EDI; 1424 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1425 return X86::EBP; 1426 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1427 return X86::ESP; 1428 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1429 return X86::R8D; 1430 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1431 return X86::R9D; 1432 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1433 return X86::R10D; 1434 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1435 return X86::R11D; 1436 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1437 return X86::R12D; 1438 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1439 return X86::R13D; 1440 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1441 return X86::R14D; 1442 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1443 return X86::R15D; 1444 } 1445 case MVT::i64: 1446 switch (Reg) { 1447 default: return Reg; 1448 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1449 return X86::RAX; 1450 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1451 return X86::RDX; 1452 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1453 return X86::RCX; 1454 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1455 return X86::RBX; 1456 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1457 return X86::RSI; 1458 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1459 return X86::RDI; 1460 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1461 return X86::RBP; 1462 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1463 return X86::RSP; 1464 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1465 return X86::R8; 1466 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1467 return X86::R9; 1468 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1469 return X86::R10; 1470 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1471 return X86::R11; 1472 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1473 return X86::R12; 1474 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1475 return X86::R13; 1476 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1477 return X86::R14; 1478 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1479 return X86::R15; 1480 } 1481 } 1482 1483 return Reg; 1484} 1485} 1486 1487#include "X86GenRegisterInfo.inc" 1488