X86RegisterInfo.cpp revision 3752d2fe34a8d576203f8034baeabb02e65c87bc
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42using namespace llvm; 43 44X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 45 const TargetInstrInfo &tii) 46 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 47 X86::ADJCALLSTACKDOWN64 : 48 X86::ADJCALLSTACKDOWN32, 49 tm.getSubtarget<X86Subtarget>().is64Bit() ? 50 X86::ADJCALLSTACKUP64 : 51 X86::ADJCALLSTACKUP32), 52 TM(tm), TII(tii) { 53 // Cache some information. 54 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 55 Is64Bit = Subtarget->is64Bit(); 56 IsWin64 = Subtarget->isTargetWin64(); 57 StackAlign = TM.getFrameInfo()->getStackAlignment(); 58 59 if (Is64Bit) { 60 SlotSize = 8; 61 StackPtr = X86::RSP; 62 FramePtr = X86::RBP; 63 } else { 64 SlotSize = 4; 65 StackPtr = X86::ESP; 66 FramePtr = X86::EBP; 67 } 68} 69 70/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 71/// specific numbering, used in debug info and exception tables. 72int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 73 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 74 unsigned Flavour = DWARFFlavour::X86_64; 75 76 if (!Subtarget->is64Bit()) { 77 if (Subtarget->isTargetDarwin()) { 78 if (isEH) 79 Flavour = DWARFFlavour::X86_32_DarwinEH; 80 else 81 Flavour = DWARFFlavour::X86_32_Generic; 82 } else if (Subtarget->isTargetCygMing()) { 83 // Unsupported by now, just quick fallback 84 Flavour = DWARFFlavour::X86_32_Generic; 85 } else { 86 Flavour = DWARFFlavour::X86_32_Generic; 87 } 88 } 89 90 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 91} 92 93/// getX86RegNum - This function maps LLVM register identifiers to their X86 94/// specific numbering, which is used in various places encoding instructions. 95unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 96 switch(RegNo) { 97 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 98 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 99 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 100 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 101 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 102 return N86::ESP; 103 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 104 return N86::EBP; 105 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 106 return N86::ESI; 107 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 108 return N86::EDI; 109 110 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 111 return N86::EAX; 112 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 113 return N86::ECX; 114 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 115 return N86::EDX; 116 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 117 return N86::EBX; 118 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 119 return N86::ESP; 120 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 121 return N86::EBP; 122 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 123 return N86::ESI; 124 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 125 return N86::EDI; 126 127 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 128 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 129 return RegNo-X86::ST0; 130 131 case X86::XMM0: case X86::XMM8: case X86::MM0: 132 return 0; 133 case X86::XMM1: case X86::XMM9: case X86::MM1: 134 return 1; 135 case X86::XMM2: case X86::XMM10: case X86::MM2: 136 return 2; 137 case X86::XMM3: case X86::XMM11: case X86::MM3: 138 return 3; 139 case X86::XMM4: case X86::XMM12: case X86::MM4: 140 return 4; 141 case X86::XMM5: case X86::XMM13: case X86::MM5: 142 return 5; 143 case X86::XMM6: case X86::XMM14: case X86::MM6: 144 return 6; 145 case X86::XMM7: case X86::XMM15: case X86::MM7: 146 return 7; 147 148 default: 149 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 150 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 151 return 0; 152 } 153} 154 155const TargetRegisterClass * 156X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 157 const TargetRegisterClass *B, 158 unsigned SubIdx) const { 159 switch (SubIdx) { 160 default: return 0; 161 case 1: 162 // 8-bit 163 if (B == &X86::GR8RegClass) { 164 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 165 return A; 166 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 167 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 168 A == &X86::GR64_NOREXRegClass || 169 A == &X86::GR64_NOSPRegClass || 170 A == &X86::GR64_NOREX_NOSPRegClass) 171 return &X86::GR64_ABCDRegClass; 172 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 173 A == &X86::GR32_NOREXRegClass || 174 A == &X86::GR32_NOSPRegClass) 175 return &X86::GR32_ABCDRegClass; 176 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 177 A == &X86::GR16_NOREXRegClass) 178 return &X86::GR16_ABCDRegClass; 179 } else if (B == &X86::GR8_NOREXRegClass) { 180 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 181 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 182 return &X86::GR64_NOREXRegClass; 183 else if (A == &X86::GR64_ABCDRegClass) 184 return &X86::GR64_ABCDRegClass; 185 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 186 A == &X86::GR32_NOSPRegClass) 187 return &X86::GR32_NOREXRegClass; 188 else if (A == &X86::GR32_ABCDRegClass) 189 return &X86::GR32_ABCDRegClass; 190 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 191 return &X86::GR16_NOREXRegClass; 192 else if (A == &X86::GR16_ABCDRegClass) 193 return &X86::GR16_ABCDRegClass; 194 } 195 break; 196 case 2: 197 // 8-bit hi 198 if (B == &X86::GR8_ABCD_HRegClass) { 199 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 200 A == &X86::GR64_NOREXRegClass || 201 A == &X86::GR64_NOSPRegClass || 202 A == &X86::GR64_NOREX_NOSPRegClass) 203 return &X86::GR64_ABCDRegClass; 204 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 205 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 206 return &X86::GR32_ABCDRegClass; 207 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 208 A == &X86::GR16_NOREXRegClass) 209 return &X86::GR16_ABCDRegClass; 210 } 211 break; 212 case 3: 213 // 16-bit 214 if (B == &X86::GR16RegClass) { 215 if (A->getSize() == 4 || A->getSize() == 8) 216 return A; 217 } else if (B == &X86::GR16_ABCDRegClass) { 218 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 219 A == &X86::GR64_NOREXRegClass || 220 A == &X86::GR64_NOSPRegClass || 221 A == &X86::GR64_NOREX_NOSPRegClass) 222 return &X86::GR64_ABCDRegClass; 223 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 224 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 225 return &X86::GR32_ABCDRegClass; 226 } else if (B == &X86::GR16_NOREXRegClass) { 227 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 228 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 229 return &X86::GR64_NOREXRegClass; 230 else if (A == &X86::GR64_ABCDRegClass) 231 return &X86::GR64_ABCDRegClass; 232 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 233 A == &X86::GR32_NOSPRegClass) 234 return &X86::GR32_NOREXRegClass; 235 else if (A == &X86::GR32_ABCDRegClass) 236 return &X86::GR64_ABCDRegClass; 237 } 238 break; 239 case 4: 240 // 32-bit 241 if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) { 242 if (A->getSize() == 8) 243 return A; 244 } else if (B == &X86::GR32_ABCDRegClass) { 245 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 246 A == &X86::GR64_NOREXRegClass || 247 A == &X86::GR64_NOSPRegClass || 248 A == &X86::GR64_NOREX_NOSPRegClass) 249 return &X86::GR64_ABCDRegClass; 250 } else if (B == &X86::GR32_NOREXRegClass) { 251 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 252 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 253 return &X86::GR64_NOREXRegClass; 254 else if (A == &X86::GR64_ABCDRegClass) 255 return &X86::GR64_ABCDRegClass; 256 } 257 break; 258 } 259 return 0; 260} 261 262const TargetRegisterClass * 263X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 264 switch (Kind) { 265 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 266 case 0: // Normal GPRs. 267 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 268 return &X86::GR64RegClass; 269 return &X86::GR32RegClass; 270 case 1: // Normal GRPs except the stack pointer (for encoding reasons). 271 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 272 return &X86::GR64_NOSPRegClass; 273 return &X86::GR32_NOSPRegClass; 274 } 275} 276 277const TargetRegisterClass * 278X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 279 if (RC == &X86::CCRRegClass) { 280 if (Is64Bit) 281 return &X86::GR64RegClass; 282 else 283 return &X86::GR32RegClass; 284 } 285 return NULL; 286} 287 288const unsigned * 289X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 290 bool callsEHReturn = false; 291 292 if (MF) { 293 const MachineFrameInfo *MFI = MF->getFrameInfo(); 294 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 295 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 296 } 297 298 static const unsigned CalleeSavedRegs32Bit[] = { 299 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 300 }; 301 302 static const unsigned CalleeSavedRegs32EHRet[] = { 303 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 304 }; 305 306 static const unsigned CalleeSavedRegs64Bit[] = { 307 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 308 }; 309 310 static const unsigned CalleeSavedRegs64EHRet[] = { 311 X86::RAX, X86::RDX, X86::RBX, X86::R12, 312 X86::R13, X86::R14, X86::R15, X86::RBP, 0 313 }; 314 315 static const unsigned CalleeSavedRegsWin64[] = { 316 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 317 X86::R12, X86::R13, X86::R14, X86::R15, 318 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 319 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 320 X86::XMM14, X86::XMM15, 0 321 }; 322 323 if (Is64Bit) { 324 if (IsWin64) 325 return CalleeSavedRegsWin64; 326 else 327 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 328 } else { 329 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 330 } 331} 332 333const TargetRegisterClass* const* 334X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 335 bool callsEHReturn = false; 336 337 if (MF) { 338 const MachineFrameInfo *MFI = MF->getFrameInfo(); 339 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 340 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 341 } 342 343 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 344 &X86::GR32RegClass, &X86::GR32RegClass, 345 &X86::GR32RegClass, &X86::GR32RegClass, 0 346 }; 347 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 348 &X86::GR32RegClass, &X86::GR32RegClass, 349 &X86::GR32RegClass, &X86::GR32RegClass, 350 &X86::GR32RegClass, &X86::GR32RegClass, 0 351 }; 352 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 353 &X86::GR64RegClass, &X86::GR64RegClass, 354 &X86::GR64RegClass, &X86::GR64RegClass, 355 &X86::GR64RegClass, &X86::GR64RegClass, 0 356 }; 357 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = { 358 &X86::GR64RegClass, &X86::GR64RegClass, 359 &X86::GR64RegClass, &X86::GR64RegClass, 360 &X86::GR64RegClass, &X86::GR64RegClass, 361 &X86::GR64RegClass, &X86::GR64RegClass, 0 362 }; 363 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { 364 &X86::GR64RegClass, &X86::GR64RegClass, 365 &X86::GR64RegClass, &X86::GR64RegClass, 366 &X86::GR64RegClass, &X86::GR64RegClass, 367 &X86::GR64RegClass, &X86::GR64RegClass, 368 &X86::VR128RegClass, &X86::VR128RegClass, 369 &X86::VR128RegClass, &X86::VR128RegClass, 370 &X86::VR128RegClass, &X86::VR128RegClass, 371 &X86::VR128RegClass, &X86::VR128RegClass, 372 &X86::VR128RegClass, &X86::VR128RegClass, 0 373 }; 374 375 if (Is64Bit) { 376 if (IsWin64) 377 return CalleeSavedRegClassesWin64; 378 else 379 return (callsEHReturn ? 380 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit); 381 } else { 382 return (callsEHReturn ? 383 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit); 384 } 385} 386 387BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 388 BitVector Reserved(getNumRegs()); 389 // Set the stack-pointer register and its aliases as reserved. 390 Reserved.set(X86::RSP); 391 Reserved.set(X86::ESP); 392 Reserved.set(X86::SP); 393 Reserved.set(X86::SPL); 394 395 // Set the instruction pointer register and its aliases as reserved. 396 Reserved.set(X86::RIP); 397 Reserved.set(X86::EIP); 398 Reserved.set(X86::IP); 399 400 // Set the frame-pointer register and its aliases as reserved if needed. 401 if (hasFP(MF)) { 402 Reserved.set(X86::RBP); 403 Reserved.set(X86::EBP); 404 Reserved.set(X86::BP); 405 Reserved.set(X86::BPL); 406 } 407 408 // Mark the x87 stack registers as reserved, since they don't behave normally 409 // with respect to liveness. We don't fully model the effects of x87 stack 410 // pushes and pops after stackification. 411 Reserved.set(X86::ST0); 412 Reserved.set(X86::ST1); 413 Reserved.set(X86::ST2); 414 Reserved.set(X86::ST3); 415 Reserved.set(X86::ST4); 416 Reserved.set(X86::ST5); 417 Reserved.set(X86::ST6); 418 Reserved.set(X86::ST7); 419 return Reserved; 420} 421 422//===----------------------------------------------------------------------===// 423// Stack Frame Processing methods 424//===----------------------------------------------------------------------===// 425 426/// hasFP - Return true if the specified function should have a dedicated frame 427/// pointer register. This is true if the function has variable sized allocas 428/// or if frame pointer elimination is disabled. 429bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 430 const MachineFrameInfo *MFI = MF.getFrameInfo(); 431 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 432 433 return (NoFramePointerElim || 434 needsStackRealignment(MF) || 435 MFI->hasVarSizedObjects() || 436 MFI->isFrameAddressTaken() || 437 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 438 (MMI && MMI->callsUnwindInit())); 439} 440 441bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 442 const MachineFrameInfo *MFI = MF.getFrameInfo(); 443 bool requiresRealignment = 444 RealignStack && (MFI->getMaxAlignment() > StackAlign); 445 446 // FIXME: Currently we don't support stack realignment for functions with 447 // variable-sized allocas. 448 // FIXME: Temporary disable the error - it seems to be too conservative. 449 if (0 && requiresRealignment && MFI->hasVarSizedObjects()) 450 llvm_report_error( 451 "Stack realignment in presense of dynamic allocas is not supported"); 452 453 return (requiresRealignment && !MFI->hasVarSizedObjects()); 454} 455 456bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 457 return !MF.getFrameInfo()->hasVarSizedObjects(); 458} 459 460bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, 461 int &FrameIdx) const { 462 if (Reg == FramePtr && hasFP(MF)) { 463 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 464 return true; 465 } 466 return false; 467} 468 469int 470X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { 471 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 472 MachineFrameInfo *MFI = MF.getFrameInfo(); 473 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea(); 474 uint64_t StackSize = MFI->getStackSize(); 475 476 if (needsStackRealignment(MF)) { 477 if (FI < 0) { 478 // Skip the saved EBP. 479 Offset += SlotSize; 480 } else { 481 unsigned Align = MFI->getObjectAlignment(FI); 482 assert( (-(Offset + StackSize)) % Align == 0); 483 Align = 0; 484 return Offset + StackSize; 485 } 486 // FIXME: Support tail calls 487 } else { 488 if (!hasFP(MF)) 489 return Offset + StackSize; 490 491 // Skip the saved EBP. 492 Offset += SlotSize; 493 494 // Skip the RETADDR move area 495 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 496 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 497 if (TailCallReturnAddrDelta < 0) 498 Offset -= TailCallReturnAddrDelta; 499 } 500 501 return Offset; 502} 503 504void X86RegisterInfo:: 505eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 506 MachineBasicBlock::iterator I) const { 507 if (!hasReservedCallFrame(MF)) { 508 // If the stack pointer can be changed after prologue, turn the 509 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 510 // adjcallstackdown instruction into 'add ESP, <amt>' 511 // TODO: consider using push / pop instead of sub + store / add 512 MachineInstr *Old = I; 513 uint64_t Amount = Old->getOperand(0).getImm(); 514 if (Amount != 0) { 515 // We need to keep the stack aligned properly. To do this, we round the 516 // amount of space needed for the outgoing arguments up to the next 517 // alignment boundary. 518 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 519 520 MachineInstr *New = 0; 521 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 522 New = BuildMI(MF, Old->getDebugLoc(), 523 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), 524 StackPtr) 525 .addReg(StackPtr) 526 .addImm(Amount); 527 } else { 528 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 529 530 // Factor out the amount the callee already popped. 531 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 532 Amount -= CalleeAmt; 533 534 if (Amount) { 535 unsigned Opc = (Amount < 128) ? 536 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 537 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 538 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 539 .addReg(StackPtr) 540 .addImm(Amount); 541 } 542 } 543 544 if (New) { 545 // The EFLAGS implicit def is dead. 546 New->getOperand(3).setIsDead(); 547 548 // Replace the pseudo instruction with a new instruction. 549 MBB.insert(I, New); 550 } 551 } 552 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 553 // If we are performing frame pointer elimination and if the callee pops 554 // something off the stack pointer, add it back. We do this until we have 555 // more advanced stack pointer tracking ability. 556 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 557 unsigned Opc = (CalleeAmt < 128) ? 558 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 559 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 560 MachineInstr *Old = I; 561 MachineInstr *New = 562 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 563 StackPtr) 564 .addReg(StackPtr) 565 .addImm(CalleeAmt); 566 567 // The EFLAGS implicit def is dead. 568 New->getOperand(3).setIsDead(); 569 MBB.insert(I, New); 570 } 571 } 572 573 MBB.erase(I); 574} 575 576unsigned 577X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 578 int SPAdj, int *Value, 579 RegScavenger *RS) const{ 580 assert(SPAdj == 0 && "Unexpected"); 581 582 unsigned i = 0; 583 MachineInstr &MI = *II; 584 MachineFunction &MF = *MI.getParent()->getParent(); 585 586 while (!MI.getOperand(i).isFI()) { 587 ++i; 588 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 589 } 590 591 int FrameIndex = MI.getOperand(i).getIndex(); 592 unsigned BasePtr; 593 594 if (needsStackRealignment(MF)) 595 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 596 else 597 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 598 599 // This must be part of a four operand memory reference. Replace the 600 // FrameIndex with base register with EBP. Add an offset to the offset. 601 MI.getOperand(i).ChangeToRegister(BasePtr, false); 602 603 // Now add the frame object offset to the offset from EBP. 604 if (MI.getOperand(i+3).isImm()) { 605 // Offset is a 32-bit integer. 606 int Offset = getFrameIndexOffset(MF, FrameIndex) + 607 (int)(MI.getOperand(i + 3).getImm()); 608 609 MI.getOperand(i + 3).ChangeToImmediate(Offset); 610 } else { 611 // Offset is symbolic. This is extremely rare. 612 uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) + 613 (uint64_t)MI.getOperand(i+3).getOffset(); 614 MI.getOperand(i+3).setOffset(Offset); 615 } 616 return 0; 617} 618 619void 620X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 621 RegScavenger *RS) const { 622 MachineFrameInfo *MFI = MF.getFrameInfo(); 623 624 // Calculate and set max stack object alignment early, so we can decide 625 // whether we will need stack realignment (and thus FP). 626 MFI->calculateMaxStackAlignment(); 627 628 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 629 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 630 631 if (TailCallReturnAddrDelta < 0) { 632 // create RETURNADDR area 633 // arg 634 // arg 635 // RETADDR 636 // { ... 637 // RETADDR area 638 // ... 639 // } 640 // [EBP] 641 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 642 (-1U*SlotSize)+TailCallReturnAddrDelta, 643 true, false); 644 } 645 646 if (hasFP(MF)) { 647 assert((TailCallReturnAddrDelta <= 0) && 648 "The Delta should always be zero or negative"); 649 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 650 651 // Create a frame entry for the EBP register that must be saved. 652 int FrameIdx = MFI->CreateFixedObject(SlotSize, 653 -(int)SlotSize + 654 TFI.getOffsetOfLocalArea() + 655 TailCallReturnAddrDelta, 656 true, false); 657 assert(FrameIdx == MFI->getObjectIndexBegin() && 658 "Slot for EBP register must be last in order to be found!"); 659 FrameIdx = 0; 660 } 661} 662 663/// emitSPUpdate - Emit a series of instructions to increment / decrement the 664/// stack pointer by a constant value. 665static 666void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 667 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 668 const TargetInstrInfo &TII) { 669 bool isSub = NumBytes < 0; 670 uint64_t Offset = isSub ? -NumBytes : NumBytes; 671 unsigned Opc = isSub 672 ? ((Offset < 128) ? 673 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 674 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 675 : ((Offset < 128) ? 676 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 677 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 678 uint64_t Chunk = (1LL << 31) - 1; 679 DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() : 680 DebugLoc::getUnknownLoc()); 681 682 while (Offset) { 683 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 684 MachineInstr *MI = 685 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 686 .addReg(StackPtr) 687 .addImm(ThisVal); 688 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 689 Offset -= ThisVal; 690 } 691} 692 693/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 694static 695void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 696 unsigned StackPtr, uint64_t *NumBytes = NULL) { 697 if (MBBI == MBB.begin()) return; 698 699 MachineBasicBlock::iterator PI = prior(MBBI); 700 unsigned Opc = PI->getOpcode(); 701 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 702 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 703 PI->getOperand(0).getReg() == StackPtr) { 704 if (NumBytes) 705 *NumBytes += PI->getOperand(2).getImm(); 706 MBB.erase(PI); 707 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 708 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 709 PI->getOperand(0).getReg() == StackPtr) { 710 if (NumBytes) 711 *NumBytes -= PI->getOperand(2).getImm(); 712 MBB.erase(PI); 713 } 714} 715 716/// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 717static 718void mergeSPUpdatesDown(MachineBasicBlock &MBB, 719 MachineBasicBlock::iterator &MBBI, 720 unsigned StackPtr, uint64_t *NumBytes = NULL) { 721 // FIXME: THIS ISN'T RUN!!! 722 return; 723 724 if (MBBI == MBB.end()) return; 725 726 MachineBasicBlock::iterator NI = llvm::next(MBBI); 727 if (NI == MBB.end()) return; 728 729 unsigned Opc = NI->getOpcode(); 730 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 731 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 732 NI->getOperand(0).getReg() == StackPtr) { 733 if (NumBytes) 734 *NumBytes -= NI->getOperand(2).getImm(); 735 MBB.erase(NI); 736 MBBI = NI; 737 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 738 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 739 NI->getOperand(0).getReg() == StackPtr) { 740 if (NumBytes) 741 *NumBytes += NI->getOperand(2).getImm(); 742 MBB.erase(NI); 743 MBBI = NI; 744 } 745} 746 747/// mergeSPUpdates - Checks the instruction before/after the passed 748/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 749/// stack adjustment is returned as a positive value for ADD and a negative for 750/// SUB. 751static int mergeSPUpdates(MachineBasicBlock &MBB, 752 MachineBasicBlock::iterator &MBBI, 753 unsigned StackPtr, 754 bool doMergeWithPrevious) { 755 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 756 (!doMergeWithPrevious && MBBI == MBB.end())) 757 return 0; 758 759 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 760 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 761 unsigned Opc = PI->getOpcode(); 762 int Offset = 0; 763 764 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 765 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 766 PI->getOperand(0).getReg() == StackPtr){ 767 Offset += PI->getOperand(2).getImm(); 768 MBB.erase(PI); 769 if (!doMergeWithPrevious) MBBI = NI; 770 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 771 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 772 PI->getOperand(0).getReg() == StackPtr) { 773 Offset -= PI->getOperand(2).getImm(); 774 MBB.erase(PI); 775 if (!doMergeWithPrevious) MBBI = NI; 776 } 777 778 return Offset; 779} 780 781void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, 782 unsigned LabelId, 783 unsigned FramePtr) const { 784 MachineFrameInfo *MFI = MF.getFrameInfo(); 785 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 786 if (!MMI) return; 787 788 // Add callee saved registers to move list. 789 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 790 if (CSI.empty()) return; 791 792 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 793 const TargetData *TD = MF.getTarget().getTargetData(); 794 bool HasFP = hasFP(MF); 795 796 // Calculate amount of bytes used for return address storing. 797 int stackGrowth = 798 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 799 TargetFrameInfo::StackGrowsUp ? 800 TD->getPointerSize() : -TD->getPointerSize()); 801 802 // FIXME: This is dirty hack. The code itself is pretty mess right now. 803 // It should be rewritten from scratch and generalized sometimes. 804 805 // Determine maximum offset (minumum due to stack growth). 806 int64_t MaxOffset = 0; 807 for (std::vector<CalleeSavedInfo>::const_iterator 808 I = CSI.begin(), E = CSI.end(); I != E; ++I) 809 MaxOffset = std::min(MaxOffset, 810 MFI->getObjectOffset(I->getFrameIdx())); 811 812 // Calculate offsets. 813 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 814 for (std::vector<CalleeSavedInfo>::const_iterator 815 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 816 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 817 unsigned Reg = I->getReg(); 818 Offset = MaxOffset - Offset + saveAreaOffset; 819 820 // Don't output a new machine move if we're re-saving the frame 821 // pointer. This happens when the PrologEpilogInserter has inserted an extra 822 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 823 // generates one when frame pointers are used. If we generate a "machine 824 // move" for this extra "PUSH", the linker will lose track of the fact that 825 // the frame pointer should have the value of the first "PUSH" when it's 826 // trying to unwind. 827 // 828 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 829 // another bug. I.e., one where we generate a prolog like this: 830 // 831 // pushl %ebp 832 // movl %esp, %ebp 833 // pushl %ebp 834 // pushl %esi 835 // ... 836 // 837 // The immediate re-push of EBP is unnecessary. At the least, it's an 838 // optimization bug. EBP can be used as a scratch register in certain 839 // cases, but probably not when we have a frame pointer. 840 if (HasFP && FramePtr == Reg) 841 continue; 842 843 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 844 MachineLocation CSSrc(Reg); 845 Moves.push_back(MachineMove(LabelId, CSDst, CSSrc)); 846 } 847} 848 849/// emitPrologue - Push callee-saved registers onto the stack, which 850/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 851/// space for local variables. Also emit labels used by the exception handler to 852/// generate the exception handling frames. 853void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 854 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 855 MachineBasicBlock::iterator MBBI = MBB.begin(); 856 MachineFrameInfo *MFI = MF.getFrameInfo(); 857 const Function *Fn = MF.getFunction(); 858 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 859 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 860 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 861 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) || 862 !Fn->doesNotThrow() || UnwindTablesMandatory; 863 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 864 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 865 bool HasFP = hasFP(MF); 866 DebugLoc DL; 867 868 // Add RETADDR move area to callee saved frame size. 869 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 870 if (TailCallReturnAddrDelta < 0) 871 X86FI->setCalleeSavedFrameSize( 872 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 873 874 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 875 // function, and use up to 128 bytes of stack space, don't have a frame 876 // pointer, calls, or dynamic alloca then we do not need to adjust the 877 // stack pointer (we fit in the Red Zone). 878 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 879 !needsStackRealignment(MF) && 880 !MFI->hasVarSizedObjects() && // No dynamic alloca. 881 !MFI->hasCalls() && // No calls. 882 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone 883 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 884 if (HasFP) MinSize += SlotSize; 885 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 886 MFI->setStackSize(StackSize); 887 } else if (Subtarget->isTargetWin64()) { 888 // We need to always allocate 32 bytes as register spill area. 889 // FIXME: We might reuse these 32 bytes for leaf functions. 890 StackSize += 32; 891 MFI->setStackSize(StackSize); 892 } 893 894 // Insert stack pointer adjustment for later moving of return addr. Only 895 // applies to tail call optimized functions where the callee argument stack 896 // size is bigger than the callers. 897 if (TailCallReturnAddrDelta < 0) { 898 MachineInstr *MI = 899 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 900 StackPtr) 901 .addReg(StackPtr) 902 .addImm(-TailCallReturnAddrDelta); 903 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 904 } 905 906 // Mapping for machine moves: 907 // 908 // DST: VirtualFP AND 909 // SRC: VirtualFP => DW_CFA_def_cfa_offset 910 // ELSE => DW_CFA_def_cfa 911 // 912 // SRC: VirtualFP AND 913 // DST: Register => DW_CFA_def_cfa_register 914 // 915 // ELSE 916 // OFFSET < 0 => DW_CFA_offset_extended_sf 917 // REG < 64 => DW_CFA_offset + Reg 918 // ELSE => DW_CFA_offset_extended 919 920 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 921 const TargetData *TD = MF.getTarget().getTargetData(); 922 uint64_t NumBytes = 0; 923 int stackGrowth = 924 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 925 TargetFrameInfo::StackGrowsUp ? 926 TD->getPointerSize() : -TD->getPointerSize()); 927 928 if (HasFP) { 929 // Calculate required stack adjustment. 930 uint64_t FrameSize = StackSize - SlotSize; 931 if (needsStackRealignment(MF)) 932 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 933 934 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 935 936 // Get the offset of the stack slot for the EBP register, which is 937 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 938 // Update the frame offset adjustment. 939 MFI->setOffsetAdjustment(-NumBytes); 940 941 // Save EBP/RBP into the appropriate stack slot. 942 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 943 .addReg(FramePtr, RegState::Kill); 944 945 if (needsFrameMoves) { 946 // Mark the place where EBP/RBP was saved. 947 unsigned FrameLabelId = MMI->NextLabelID(); 948 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 949 950 // Define the current CFA rule to use the provided offset. 951 if (StackSize) { 952 MachineLocation SPDst(MachineLocation::VirtualFP); 953 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 954 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 955 } else { 956 // FIXME: Verify & implement for FP 957 MachineLocation SPDst(StackPtr); 958 MachineLocation SPSrc(StackPtr, stackGrowth); 959 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 960 } 961 962 // Change the rule for the FramePtr to be an "offset" rule. 963 MachineLocation FPDst(MachineLocation::VirtualFP, 964 2 * stackGrowth); 965 MachineLocation FPSrc(FramePtr); 966 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 967 } 968 969 // Update EBP with the new base value... 970 BuildMI(MBB, MBBI, DL, 971 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 972 .addReg(StackPtr); 973 974 if (needsFrameMoves) { 975 // Mark effective beginning of when frame pointer becomes valid. 976 unsigned FrameLabelId = MMI->NextLabelID(); 977 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 978 979 // Define the current CFA to use the EBP/RBP register. 980 MachineLocation FPDst(FramePtr); 981 MachineLocation FPSrc(MachineLocation::VirtualFP); 982 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 983 } 984 985 // Mark the FramePtr as live-in in every block except the entry. 986 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 987 I != E; ++I) 988 I->addLiveIn(FramePtr); 989 990 // Realign stack 991 if (needsStackRealignment(MF)) { 992 MachineInstr *MI = 993 BuildMI(MBB, MBBI, DL, 994 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 995 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 996 997 // The EFLAGS implicit def is dead. 998 MI->getOperand(3).setIsDead(); 999 } 1000 } else { 1001 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 1002 } 1003 1004 // Skip the callee-saved push instructions. 1005 bool PushedRegs = false; 1006 int StackOffset = 2 * stackGrowth; 1007 1008 while (MBBI != MBB.end() && 1009 (MBBI->getOpcode() == X86::PUSH32r || 1010 MBBI->getOpcode() == X86::PUSH64r)) { 1011 PushedRegs = true; 1012 ++MBBI; 1013 1014 if (!HasFP && needsFrameMoves) { 1015 // Mark callee-saved push instruction. 1016 unsigned LabelId = MMI->NextLabelID(); 1017 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); 1018 1019 // Define the current CFA rule to use the provided offset. 1020 unsigned Ptr = StackSize ? 1021 MachineLocation::VirtualFP : StackPtr; 1022 MachineLocation SPDst(Ptr); 1023 MachineLocation SPSrc(Ptr, StackOffset); 1024 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1025 StackOffset += stackGrowth; 1026 } 1027 } 1028 1029 if (MBBI != MBB.end()) 1030 DL = MBBI->getDebugLoc(); 1031 1032 // Adjust stack pointer: ESP -= numbytes. 1033 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 1034 // Check, whether EAX is livein for this function. 1035 bool isEAXAlive = false; 1036 for (MachineRegisterInfo::livein_iterator 1037 II = MF.getRegInfo().livein_begin(), 1038 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 1039 unsigned Reg = II->first; 1040 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1041 Reg == X86::AH || Reg == X86::AL); 1042 } 1043 1044 // Function prologue calls _alloca to probe the stack when allocating more 1045 // than 4k bytes in one go. Touching the stack at 4K increments is necessary 1046 // to ensure that the guard pages used by the OS virtual memory manager are 1047 // allocated in correct sequence. 1048 if (!isEAXAlive) { 1049 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1050 .addImm(NumBytes); 1051 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1052 .addExternalSymbol("_alloca"); 1053 } else { 1054 // Save EAX 1055 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1056 .addReg(X86::EAX, RegState::Kill); 1057 1058 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1059 // allocated bytes for EAX. 1060 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1061 .addImm(NumBytes - 4); 1062 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1063 .addExternalSymbol("_alloca"); 1064 1065 // Restore EAX 1066 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 1067 X86::EAX), 1068 StackPtr, false, NumBytes - 4); 1069 MBB.insert(MBBI, MI); 1070 } 1071 } else if (NumBytes) { 1072 // If there is an SUB32ri of ESP immediately before this instruction, merge 1073 // the two. This can be the case when tail call elimination is enabled and 1074 // the callee has more arguments then the caller. 1075 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 1076 1077 // If there is an ADD32ri or SUB32ri of ESP immediately after this 1078 // instruction, merge the two instructions. 1079 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 1080 1081 if (NumBytes) 1082 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1083 } 1084 1085 if ((NumBytes || PushedRegs) && needsFrameMoves) { 1086 // Mark end of stack pointer adjustment. 1087 unsigned LabelId = MMI->NextLabelID(); 1088 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); 1089 1090 if (!HasFP && NumBytes) { 1091 // Define the current CFA rule to use the provided offset. 1092 if (StackSize) { 1093 MachineLocation SPDst(MachineLocation::VirtualFP); 1094 MachineLocation SPSrc(MachineLocation::VirtualFP, 1095 -StackSize + stackGrowth); 1096 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1097 } else { 1098 // FIXME: Verify & implement for FP 1099 MachineLocation SPDst(StackPtr); 1100 MachineLocation SPSrc(StackPtr, stackGrowth); 1101 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1102 } 1103 } 1104 1105 // Emit DWARF info specifying the offsets of the callee-saved registers. 1106 if (PushedRegs) 1107 emitCalleeSavedFrameMoves(MF, LabelId, HasFP ? FramePtr : StackPtr); 1108 } 1109} 1110 1111void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1112 MachineBasicBlock &MBB) const { 1113 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1114 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1115 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1116 unsigned RetOpcode = MBBI->getOpcode(); 1117 DebugLoc DL = MBBI->getDebugLoc(); 1118 1119 switch (RetOpcode) { 1120 default: 1121 llvm_unreachable("Can only insert epilog into returning blocks"); 1122 case X86::RET: 1123 case X86::RETI: 1124 case X86::TCRETURNdi: 1125 case X86::TCRETURNri: 1126 case X86::TCRETURNri64: 1127 case X86::TCRETURNdi64: 1128 case X86::EH_RETURN: 1129 case X86::EH_RETURN64: 1130 case X86::TAILJMPd: 1131 case X86::TAILJMPr: 1132 case X86::TAILJMPm: 1133 break; // These are ok 1134 } 1135 1136 // Get the number of bytes to allocate from the FrameInfo. 1137 uint64_t StackSize = MFI->getStackSize(); 1138 uint64_t MaxAlign = MFI->getMaxAlignment(); 1139 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1140 uint64_t NumBytes = 0; 1141 1142 if (hasFP(MF)) { 1143 // Calculate required stack adjustment. 1144 uint64_t FrameSize = StackSize - SlotSize; 1145 if (needsStackRealignment(MF)) 1146 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 1147 1148 NumBytes = FrameSize - CSSize; 1149 1150 // Pop EBP. 1151 BuildMI(MBB, MBBI, DL, 1152 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1153 } else { 1154 NumBytes = StackSize - CSSize; 1155 } 1156 1157 // Skip the callee-saved pop instructions. 1158 MachineBasicBlock::iterator LastCSPop = MBBI; 1159 while (MBBI != MBB.begin()) { 1160 MachineBasicBlock::iterator PI = prior(MBBI); 1161 unsigned Opc = PI->getOpcode(); 1162 1163 if (Opc != X86::POP32r && Opc != X86::POP64r && 1164 !PI->getDesc().isTerminator()) 1165 break; 1166 1167 --MBBI; 1168 } 1169 1170 DL = MBBI->getDebugLoc(); 1171 1172 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1173 // instruction, merge the two instructions. 1174 if (NumBytes || MFI->hasVarSizedObjects()) 1175 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1176 1177 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1178 // slot before popping them off! Same applies for the case, when stack was 1179 // realigned. 1180 if (needsStackRealignment(MF)) { 1181 // We cannot use LEA here, because stack pointer was realigned. We need to 1182 // deallocate local frame back. 1183 if (CSSize) { 1184 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1185 MBBI = prior(LastCSPop); 1186 } 1187 1188 BuildMI(MBB, MBBI, DL, 1189 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1190 StackPtr).addReg(FramePtr); 1191 } else if (MFI->hasVarSizedObjects()) { 1192 if (CSSize) { 1193 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1194 MachineInstr *MI = 1195 addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1196 FramePtr, false, -CSSize); 1197 MBB.insert(MBBI, MI); 1198 } else { 1199 BuildMI(MBB, MBBI, DL, 1200 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1201 .addReg(FramePtr); 1202 } 1203 } else if (NumBytes) { 1204 // Adjust stack pointer back: ESP += numbytes. 1205 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1206 } 1207 1208 // We're returning from function via eh_return. 1209 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1210 MBBI = prior(MBB.end()); 1211 MachineOperand &DestAddr = MBBI->getOperand(0); 1212 assert(DestAddr.isReg() && "Offset should be in register!"); 1213 BuildMI(MBB, MBBI, DL, 1214 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1215 StackPtr).addReg(DestAddr.getReg()); 1216 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1217 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) { 1218 // Tail call return: adjust the stack pointer and jump to callee. 1219 MBBI = prior(MBB.end()); 1220 MachineOperand &JumpTarget = MBBI->getOperand(0); 1221 MachineOperand &StackAdjust = MBBI->getOperand(1); 1222 assert(StackAdjust.isImm() && "Expecting immediate value."); 1223 1224 // Adjust stack pointer. 1225 int StackAdj = StackAdjust.getImm(); 1226 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1227 int Offset = 0; 1228 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1229 1230 // Incoporate the retaddr area. 1231 Offset = StackAdj-MaxTCDelta; 1232 assert(Offset >= 0 && "Offset should never be negative"); 1233 1234 if (Offset) { 1235 // Check for possible merge with preceeding ADD instruction. 1236 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1237 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 1238 } 1239 1240 // Jump to label or value in register. 1241 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) 1242 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)). 1243 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 1244 else if (RetOpcode== X86::TCRETURNri64) 1245 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 1246 else 1247 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 1248 1249 // Delete the pseudo instruction TCRETURN. 1250 MBB.erase(MBBI); 1251 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1252 (X86FI->getTCReturnAddrDelta() < 0)) { 1253 // Add the return addr area delta back since we are not tail calling. 1254 int delta = -1*X86FI->getTCReturnAddrDelta(); 1255 MBBI = prior(MBB.end()); 1256 1257 // Check for possible merge with preceeding ADD instruction. 1258 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1259 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 1260 } 1261} 1262 1263unsigned X86RegisterInfo::getRARegister() const { 1264 return Is64Bit ? X86::RIP // Should have dwarf #16. 1265 : X86::EIP; // Should have dwarf #8. 1266} 1267 1268unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1269 return hasFP(MF) ? FramePtr : StackPtr; 1270} 1271 1272void 1273X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const { 1274 // Calculate amount of bytes used for return address storing 1275 int stackGrowth = (Is64Bit ? -8 : -4); 1276 1277 // Initial state of the frame pointer is esp+4. 1278 MachineLocation Dst(MachineLocation::VirtualFP); 1279 MachineLocation Src(StackPtr, stackGrowth); 1280 Moves.push_back(MachineMove(0, Dst, Src)); 1281 1282 // Add return address to move list 1283 MachineLocation CSDst(StackPtr, stackGrowth); 1284 MachineLocation CSSrc(getRARegister()); 1285 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1286} 1287 1288unsigned X86RegisterInfo::getEHExceptionRegister() const { 1289 llvm_unreachable("What is the exception register"); 1290 return 0; 1291} 1292 1293unsigned X86RegisterInfo::getEHHandlerRegister() const { 1294 llvm_unreachable("What is the exception handler register"); 1295 return 0; 1296} 1297 1298namespace llvm { 1299unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 1300 switch (VT.getSimpleVT().SimpleTy) { 1301 default: return Reg; 1302 case MVT::i8: 1303 if (High) { 1304 switch (Reg) { 1305 default: return 0; 1306 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1307 return X86::AH; 1308 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1309 return X86::DH; 1310 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1311 return X86::CH; 1312 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1313 return X86::BH; 1314 } 1315 } else { 1316 switch (Reg) { 1317 default: return 0; 1318 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1319 return X86::AL; 1320 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1321 return X86::DL; 1322 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1323 return X86::CL; 1324 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1325 return X86::BL; 1326 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1327 return X86::SIL; 1328 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1329 return X86::DIL; 1330 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1331 return X86::BPL; 1332 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1333 return X86::SPL; 1334 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1335 return X86::R8B; 1336 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1337 return X86::R9B; 1338 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1339 return X86::R10B; 1340 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1341 return X86::R11B; 1342 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1343 return X86::R12B; 1344 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1345 return X86::R13B; 1346 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1347 return X86::R14B; 1348 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1349 return X86::R15B; 1350 } 1351 } 1352 case MVT::i16: 1353 switch (Reg) { 1354 default: return Reg; 1355 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1356 return X86::AX; 1357 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1358 return X86::DX; 1359 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1360 return X86::CX; 1361 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1362 return X86::BX; 1363 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1364 return X86::SI; 1365 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1366 return X86::DI; 1367 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1368 return X86::BP; 1369 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1370 return X86::SP; 1371 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1372 return X86::R8W; 1373 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1374 return X86::R9W; 1375 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1376 return X86::R10W; 1377 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1378 return X86::R11W; 1379 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1380 return X86::R12W; 1381 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1382 return X86::R13W; 1383 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1384 return X86::R14W; 1385 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1386 return X86::R15W; 1387 } 1388 case MVT::i32: 1389 switch (Reg) { 1390 default: return Reg; 1391 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1392 return X86::EAX; 1393 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1394 return X86::EDX; 1395 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1396 return X86::ECX; 1397 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1398 return X86::EBX; 1399 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1400 return X86::ESI; 1401 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1402 return X86::EDI; 1403 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1404 return X86::EBP; 1405 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1406 return X86::ESP; 1407 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1408 return X86::R8D; 1409 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1410 return X86::R9D; 1411 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1412 return X86::R10D; 1413 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1414 return X86::R11D; 1415 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1416 return X86::R12D; 1417 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1418 return X86::R13D; 1419 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1420 return X86::R14D; 1421 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1422 return X86::R15D; 1423 } 1424 case MVT::i64: 1425 switch (Reg) { 1426 default: return Reg; 1427 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1428 return X86::RAX; 1429 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1430 return X86::RDX; 1431 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1432 return X86::RCX; 1433 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1434 return X86::RBX; 1435 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1436 return X86::RSI; 1437 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1438 return X86::RDI; 1439 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1440 return X86::RBP; 1441 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1442 return X86::RSP; 1443 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1444 return X86::R8; 1445 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1446 return X86::R9; 1447 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1448 return X86::R10; 1449 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1450 return X86::R11; 1451 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1452 return X86::R12; 1453 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1454 return X86::R13; 1455 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1456 return X86::R14; 1457 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1458 return X86::R15; 1459 } 1460 } 1461 1462 return Reg; 1463} 1464} 1465 1466#include "X86GenRegisterInfo.inc" 1467