X86RegisterInfo.cpp revision c3d505c3c2a8c0e1f1db572f47451cfe2a1a58a3
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/Target/TargetAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/Compiler.h" 41using namespace llvm; 42 43X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 44 const TargetInstrInfo &tii) 45 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 46 X86::ADJCALLSTACKDOWN64 : 47 X86::ADJCALLSTACKDOWN32, 48 tm.getSubtarget<X86Subtarget>().is64Bit() ? 49 X86::ADJCALLSTACKUP64 : 50 X86::ADJCALLSTACKUP32), 51 TM(tm), TII(tii) { 52 // Cache some information. 53 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 54 Is64Bit = Subtarget->is64Bit(); 55 IsWin64 = Subtarget->isTargetWin64(); 56 StackAlign = TM.getFrameInfo()->getStackAlignment(); 57 if (Is64Bit) { 58 SlotSize = 8; 59 StackPtr = X86::RSP; 60 FramePtr = X86::RBP; 61 } else { 62 SlotSize = 4; 63 StackPtr = X86::ESP; 64 FramePtr = X86::EBP; 65 } 66} 67 68// getDwarfRegNum - This function maps LLVM register identifiers to the 69// Dwarf specific numbering, used in debug info and exception tables. 70 71int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 72 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 73 unsigned Flavour = DWARFFlavour::X86_64; 74 if (!Subtarget->is64Bit()) { 75 if (Subtarget->isTargetDarwin()) { 76 if (isEH) 77 Flavour = DWARFFlavour::X86_32_DarwinEH; 78 else 79 Flavour = DWARFFlavour::X86_32_Generic; 80 } else if (Subtarget->isTargetCygMing()) { 81 // Unsupported by now, just quick fallback 82 Flavour = DWARFFlavour::X86_32_Generic; 83 } else { 84 Flavour = DWARFFlavour::X86_32_Generic; 85 } 86 } 87 88 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 89} 90 91// getX86RegNum - This function maps LLVM register identifiers to their X86 92// specific numbering, which is used in various places encoding instructions. 93// 94unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 95 switch(RegNo) { 96 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 97 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 98 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 99 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 100 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 101 return N86::ESP; 102 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 103 return N86::EBP; 104 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 105 return N86::ESI; 106 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 107 return N86::EDI; 108 109 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 110 return N86::EAX; 111 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 112 return N86::ECX; 113 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 114 return N86::EDX; 115 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 116 return N86::EBX; 117 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 118 return N86::ESP; 119 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 120 return N86::EBP; 121 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 122 return N86::ESI; 123 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 124 return N86::EDI; 125 126 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 127 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 128 return RegNo-X86::ST0; 129 130 case X86::XMM0: case X86::XMM8: case X86::MM0: 131 return 0; 132 case X86::XMM1: case X86::XMM9: case X86::MM1: 133 return 1; 134 case X86::XMM2: case X86::XMM10: case X86::MM2: 135 return 2; 136 case X86::XMM3: case X86::XMM11: case X86::MM3: 137 return 3; 138 case X86::XMM4: case X86::XMM12: case X86::MM4: 139 return 4; 140 case X86::XMM5: case X86::XMM13: case X86::MM5: 141 return 5; 142 case X86::XMM6: case X86::XMM14: case X86::MM6: 143 return 6; 144 case X86::XMM7: case X86::XMM15: case X86::MM7: 145 return 7; 146 147 default: 148 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 149 assert(0 && "Register allocator hasn't allocated reg correctly yet!"); 150 return 0; 151 } 152} 153 154const TargetRegisterClass *X86RegisterInfo::getPointerRegClass() const { 155 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 156 if (Subtarget->is64Bit()) 157 return &X86::GR64RegClass; 158 else 159 return &X86::GR32RegClass; 160} 161 162const TargetRegisterClass * 163X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 164 if (RC == &X86::CCRRegClass) { 165 if (Is64Bit) 166 return &X86::GR64RegClass; 167 else 168 return &X86::GR32RegClass; 169 } 170 return NULL; 171} 172 173const unsigned * 174X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 175 bool callsEHReturn = false; 176 177 if (MF) { 178 const MachineFrameInfo *MFI = MF->getFrameInfo(); 179 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 180 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 181 } 182 183 static const unsigned CalleeSavedRegs32Bit[] = { 184 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 185 }; 186 187 static const unsigned CalleeSavedRegs32EHRet[] = { 188 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 189 }; 190 191 static const unsigned CalleeSavedRegs64Bit[] = { 192 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 193 }; 194 195 static const unsigned CalleeSavedRegs64EHRet[] = { 196 X86::RAX, X86::RDX, X86::RBX, X86::R12, 197 X86::R13, X86::R14, X86::R15, X86::RBP, 0 198 }; 199 200 static const unsigned CalleeSavedRegsWin64[] = { 201 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 202 X86::R12, X86::R13, X86::R14, X86::R15, 203 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 204 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 205 X86::XMM14, X86::XMM15, 0 206 }; 207 208 if (Is64Bit) { 209 if (IsWin64) 210 return CalleeSavedRegsWin64; 211 else 212 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 213 } else { 214 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 215 } 216} 217 218const TargetRegisterClass* const* 219X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 220 bool callsEHReturn = false; 221 222 if (MF) { 223 const MachineFrameInfo *MFI = MF->getFrameInfo(); 224 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 225 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 226 } 227 228 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 229 &X86::GR32RegClass, &X86::GR32RegClass, 230 &X86::GR32RegClass, &X86::GR32RegClass, 0 231 }; 232 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 233 &X86::GR32RegClass, &X86::GR32RegClass, 234 &X86::GR32RegClass, &X86::GR32RegClass, 235 &X86::GR32RegClass, &X86::GR32RegClass, 0 236 }; 237 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 238 &X86::GR64RegClass, &X86::GR64RegClass, 239 &X86::GR64RegClass, &X86::GR64RegClass, 240 &X86::GR64RegClass, &X86::GR64RegClass, 0 241 }; 242 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = { 243 &X86::GR64RegClass, &X86::GR64RegClass, 244 &X86::GR64RegClass, &X86::GR64RegClass, 245 &X86::GR64RegClass, &X86::GR64RegClass, 246 &X86::GR64RegClass, &X86::GR64RegClass, 0 247 }; 248 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { 249 &X86::GR64RegClass, &X86::GR64RegClass, 250 &X86::GR64RegClass, &X86::GR64RegClass, 251 &X86::GR64RegClass, &X86::GR64RegClass, 252 &X86::GR64RegClass, &X86::GR64RegClass, 253 &X86::VR128RegClass, &X86::VR128RegClass, 254 &X86::VR128RegClass, &X86::VR128RegClass, 255 &X86::VR128RegClass, &X86::VR128RegClass, 256 &X86::VR128RegClass, &X86::VR128RegClass, 257 &X86::VR128RegClass, &X86::VR128RegClass, 0 258 }; 259 260 if (Is64Bit) { 261 if (IsWin64) 262 return CalleeSavedRegClassesWin64; 263 else 264 return (callsEHReturn ? 265 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit); 266 } else { 267 return (callsEHReturn ? 268 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit); 269 } 270} 271 272BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 273 BitVector Reserved(getNumRegs()); 274 // Set the stack-pointer register and its aliases as reserved. 275 Reserved.set(X86::RSP); 276 Reserved.set(X86::ESP); 277 Reserved.set(X86::SP); 278 Reserved.set(X86::SPL); 279 // Set the frame-pointer register and its aliases as reserved if needed. 280 if (hasFP(MF)) { 281 Reserved.set(X86::RBP); 282 Reserved.set(X86::EBP); 283 Reserved.set(X86::BP); 284 Reserved.set(X86::BPL); 285 } 286 // Mark the x87 stack registers as reserved, since they don't 287 // behave normally with respect to liveness. We don't fully 288 // model the effects of x87 stack pushes and pops after 289 // stackification. 290 Reserved.set(X86::ST0); 291 Reserved.set(X86::ST1); 292 Reserved.set(X86::ST2); 293 Reserved.set(X86::ST3); 294 Reserved.set(X86::ST4); 295 Reserved.set(X86::ST5); 296 Reserved.set(X86::ST6); 297 Reserved.set(X86::ST7); 298 return Reserved; 299} 300 301//===----------------------------------------------------------------------===// 302// Stack Frame Processing methods 303//===----------------------------------------------------------------------===// 304 305static unsigned calculateMaxStackAlignment(const MachineFrameInfo *FFI) { 306 unsigned MaxAlign = 0; 307 for (int i = FFI->getObjectIndexBegin(), 308 e = FFI->getObjectIndexEnd(); i != e; ++i) { 309 if (FFI->isDeadObjectIndex(i)) 310 continue; 311 unsigned Align = FFI->getObjectAlignment(i); 312 MaxAlign = std::max(MaxAlign, Align); 313 } 314 315 return MaxAlign; 316} 317 318// hasFP - Return true if the specified function should have a dedicated frame 319// pointer register. This is true if the function has variable sized allocas or 320// if frame pointer elimination is disabled. 321// 322bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 323 const MachineFrameInfo *MFI = MF.getFrameInfo(); 324 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 325 326 return (NoFramePointerElim || 327 needsStackRealignment(MF) || 328 MFI->hasVarSizedObjects() || 329 MFI->isFrameAddressTaken() || 330 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 331 (MMI && MMI->callsUnwindInit())); 332} 333 334bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 335 const MachineFrameInfo *MFI = MF.getFrameInfo(); 336 337 // FIXME: Currently we don't support stack realignment for functions with 338 // variable-sized allocas 339 return (RealignStack && 340 (MFI->getMaxAlignment() > StackAlign && 341 !MFI->hasVarSizedObjects())); 342} 343 344bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 345 return !MF.getFrameInfo()->hasVarSizedObjects(); 346} 347 348int 349X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { 350 int Offset = MF.getFrameInfo()->getObjectOffset(FI) + SlotSize; 351 uint64_t StackSize = MF.getFrameInfo()->getStackSize(); 352 353 if (needsStackRealignment(MF)) { 354 if (FI < 0) 355 // Skip the saved EBP 356 Offset += SlotSize; 357 else { 358 unsigned Align = MF.getFrameInfo()->getObjectAlignment(FI); 359 assert( (-(Offset + StackSize)) % Align == 0); 360 Align = 0; 361 return Offset + StackSize; 362 } 363 364 // FIXME: Support tail calls 365 } else { 366 if (!hasFP(MF)) 367 return Offset + StackSize; 368 369 // Skip the saved EBP 370 Offset += SlotSize; 371 372 // Skip the RETADDR move area 373 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 374 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 375 if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta; 376 } 377 378 return Offset; 379} 380 381void X86RegisterInfo:: 382eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 383 MachineBasicBlock::iterator I) const { 384 if (!hasReservedCallFrame(MF)) { 385 // If the stack pointer can be changed after prologue, turn the 386 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 387 // adjcallstackdown instruction into 'add ESP, <amt>' 388 // TODO: consider using push / pop instead of sub + store / add 389 MachineInstr *Old = I; 390 uint64_t Amount = Old->getOperand(0).getImm(); 391 if (Amount != 0) { 392 // We need to keep the stack aligned properly. To do this, we round the 393 // amount of space needed for the outgoing arguments up to the next 394 // alignment boundary. 395 Amount = (Amount+StackAlign-1)/StackAlign*StackAlign; 396 397 MachineInstr *New = 0; 398 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 399 New = BuildMI(MF, Old->getDebugLoc(), 400 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), 401 StackPtr).addReg(StackPtr).addImm(Amount); 402 } else { 403 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 404 // factor out the amount the callee already popped. 405 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 406 Amount -= CalleeAmt; 407 if (Amount) { 408 unsigned Opc = (Amount < 128) ? 409 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 410 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 411 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 412 .addReg(StackPtr).addImm(Amount); 413 } 414 } 415 416 if (New) { 417 // The EFLAGS implicit def is dead. 418 New->getOperand(3).setIsDead(); 419 420 // Replace the pseudo instruction with a new instruction... 421 MBB.insert(I, New); 422 } 423 } 424 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 425 // If we are performing frame pointer elimination and if the callee pops 426 // something off the stack pointer, add it back. We do this until we have 427 // more advanced stack pointer tracking ability. 428 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 429 unsigned Opc = (CalleeAmt < 128) ? 430 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 431 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 432 MachineInstr *Old = I; 433 MachineInstr *New = 434 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 435 StackPtr).addReg(StackPtr).addImm(CalleeAmt); 436 // The EFLAGS implicit def is dead. 437 New->getOperand(3).setIsDead(); 438 439 MBB.insert(I, New); 440 } 441 } 442 443 MBB.erase(I); 444} 445 446void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 447 int SPAdj, RegScavenger *RS) const{ 448 assert(SPAdj == 0 && "Unexpected"); 449 450 unsigned i = 0; 451 MachineInstr &MI = *II; 452 MachineFunction &MF = *MI.getParent()->getParent(); 453 while (!MI.getOperand(i).isFI()) { 454 ++i; 455 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 456 } 457 458 int FrameIndex = MI.getOperand(i).getIndex(); 459 460 unsigned BasePtr; 461 if (needsStackRealignment(MF)) 462 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 463 else 464 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 465 466 // This must be part of a four operand memory reference. Replace the 467 // FrameIndex with base register with EBP. Add an offset to the offset. 468 MI.getOperand(i).ChangeToRegister(BasePtr, false); 469 470 // Now add the frame object offset to the offset from EBP. 471 if (MI.getOperand(i+3).isImm()) { 472 // Offset is a 32-bit integer. 473 int Offset = getFrameIndexOffset(MF, FrameIndex) + 474 (int)(MI.getOperand(i+3).getImm()); 475 476 MI.getOperand(i+3).ChangeToImmediate(Offset); 477 } else { 478 // Offset is symbolic. This is extremely rare. 479 uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) + 480 (uint64_t)MI.getOperand(i+3).getOffset(); 481 MI.getOperand(i+3).setOffset(Offset); 482 } 483} 484 485void 486X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 487 RegScavenger *RS) const { 488 MachineFrameInfo *FFI = MF.getFrameInfo(); 489 490 // Calculate and set max stack object alignment early, so we can decide 491 // whether we will need stack realignment (and thus FP). 492 unsigned MaxAlign = std::max(FFI->getMaxAlignment(), 493 calculateMaxStackAlignment(FFI)); 494 495 FFI->setMaxAlignment(MaxAlign); 496} 497 498void 499X86RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const{ 500 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 501 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 502 if (TailCallReturnAddrDelta < 0) { 503 // create RETURNADDR area 504 // arg 505 // arg 506 // RETADDR 507 // { ... 508 // RETADDR area 509 // ... 510 // } 511 // [EBP] 512 MF.getFrameInfo()-> 513 CreateFixedObject(-TailCallReturnAddrDelta, 514 (-1*SlotSize)+TailCallReturnAddrDelta); 515 } 516 if (hasFP(MF)) { 517 assert((TailCallReturnAddrDelta <= 0) && 518 "The Delta should always be zero or negative"); 519 // Create a frame entry for the EBP register that must be saved. 520 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, 521 (int)SlotSize * -2+ 522 TailCallReturnAddrDelta); 523 assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && 524 "Slot for EBP register must be last in order to be found!"); 525 FrameIdx = 0; 526 } 527} 528 529/// emitSPUpdate - Emit a series of instructions to increment / decrement the 530/// stack pointer by a constant value. 531static 532void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 533 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 534 const TargetInstrInfo &TII) { 535 bool isSub = NumBytes < 0; 536 uint64_t Offset = isSub ? -NumBytes : NumBytes; 537 unsigned Opc = isSub 538 ? ((Offset < 128) ? 539 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 540 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 541 : ((Offset < 128) ? 542 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 543 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 544 uint64_t Chunk = (1LL << 31) - 1; 545 DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() : 546 DebugLoc::getUnknownLoc()); 547 548 while (Offset) { 549 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 550 MachineInstr *MI = 551 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 552 .addReg(StackPtr).addImm(ThisVal); 553 // The EFLAGS implicit def is dead. 554 MI->getOperand(3).setIsDead(); 555 Offset -= ThisVal; 556 } 557} 558 559// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 560static 561void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 562 unsigned StackPtr, uint64_t *NumBytes = NULL) { 563 if (MBBI == MBB.begin()) return; 564 565 MachineBasicBlock::iterator PI = prior(MBBI); 566 unsigned Opc = PI->getOpcode(); 567 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 568 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 569 PI->getOperand(0).getReg() == StackPtr) { 570 if (NumBytes) 571 *NumBytes += PI->getOperand(2).getImm(); 572 MBB.erase(PI); 573 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 574 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 575 PI->getOperand(0).getReg() == StackPtr) { 576 if (NumBytes) 577 *NumBytes -= PI->getOperand(2).getImm(); 578 MBB.erase(PI); 579 } 580} 581 582// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 583static 584void mergeSPUpdatesDown(MachineBasicBlock &MBB, 585 MachineBasicBlock::iterator &MBBI, 586 unsigned StackPtr, uint64_t *NumBytes = NULL) { 587 return; 588 589 if (MBBI == MBB.end()) return; 590 591 MachineBasicBlock::iterator NI = next(MBBI); 592 if (NI == MBB.end()) return; 593 594 unsigned Opc = NI->getOpcode(); 595 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 596 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 597 NI->getOperand(0).getReg() == StackPtr) { 598 if (NumBytes) 599 *NumBytes -= NI->getOperand(2).getImm(); 600 MBB.erase(NI); 601 MBBI = NI; 602 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 603 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 604 NI->getOperand(0).getReg() == StackPtr) { 605 if (NumBytes) 606 *NumBytes += NI->getOperand(2).getImm(); 607 MBB.erase(NI); 608 MBBI = NI; 609 } 610} 611 612/// mergeSPUpdates - Checks the instruction before/after the passed 613/// instruction. If it is an ADD/SUB instruction it is deleted 614/// argument and the stack adjustment is returned as a positive value for ADD 615/// and a negative for SUB. 616static int mergeSPUpdates(MachineBasicBlock &MBB, 617 MachineBasicBlock::iterator &MBBI, 618 unsigned StackPtr, 619 bool doMergeWithPrevious) { 620 621 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 622 (!doMergeWithPrevious && MBBI == MBB.end())) 623 return 0; 624 625 int Offset = 0; 626 627 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 628 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI); 629 unsigned Opc = PI->getOpcode(); 630 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 631 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 632 PI->getOperand(0).getReg() == StackPtr){ 633 Offset += PI->getOperand(2).getImm(); 634 MBB.erase(PI); 635 if (!doMergeWithPrevious) MBBI = NI; 636 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 637 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 638 PI->getOperand(0).getReg() == StackPtr) { 639 Offset -= PI->getOperand(2).getImm(); 640 MBB.erase(PI); 641 if (!doMergeWithPrevious) MBBI = NI; 642 } 643 644 return Offset; 645} 646 647void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, 648 unsigned LabelId, 649 unsigned FramePtr) const { 650 MachineFrameInfo *MFI = MF.getFrameInfo(); 651 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 652 if (!MMI) return; 653 654 // Add callee saved registers to move list. 655 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 656 if (CSI.empty()) return; 657 658 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 659 const TargetData *TD = MF.getTarget().getTargetData(); 660 bool HasFP = hasFP(MF); 661 662 // Calculate amount of bytes used for return address storing 663 int stackGrowth = 664 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 665 TargetFrameInfo::StackGrowsUp ? 666 TD->getPointerSize() : -TD->getPointerSize()); 667 668 // FIXME: This is dirty hack. The code itself is pretty mess right now. 669 // It should be rewritten from scratch and generalized sometimes. 670 671 // Determine maximum offset (minumum due to stack growth) 672 int64_t MaxOffset = 0; 673 for (std::vector<CalleeSavedInfo>::const_iterator 674 I = CSI.begin(), E = CSI.end(); I != E; ++I) 675 MaxOffset = std::min(MaxOffset, 676 MFI->getObjectOffset(I->getFrameIdx())); 677 678 // Calculate offsets. 679 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 680 for (std::vector<CalleeSavedInfo>::const_iterator 681 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 682 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 683 unsigned Reg = I->getReg(); 684 Offset = MaxOffset - Offset + saveAreaOffset; 685 686 // Don't output a new machine move if we're re-saving the frame 687 // pointer. This happens when the PrologEpilogInserter has inserted an extra 688 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 689 // generates one when frame pointers are used. If we generate a "machine 690 // move" for this extra "PUSH", the linker will lose track of the fact that 691 // the frame pointer should have the value of the first "PUSH" when it's 692 // trying to unwind. 693 // 694 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 695 // another bug. I.e., one where we generate a prolog like this: 696 // 697 // pushl %ebp 698 // movl %esp, %ebp 699 // pushl %ebp 700 // pushl %esi 701 // ... 702 // 703 // The immediate re-push of EBP is unnecessary. At the least, it's an 704 // optimization bug. EBP can be used as a scratch register in certain 705 // cases, but probably not when we have a frame pointer. 706 if (HasFP && FramePtr == Reg) 707 continue; 708 709 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 710 MachineLocation CSSrc(Reg); 711 Moves.push_back(MachineMove(LabelId, CSDst, CSSrc)); 712 } 713} 714 715void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 716 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB 717 MachineFrameInfo *MFI = MF.getFrameInfo(); 718 const Function* Fn = MF.getFunction(); 719 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 720 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 721 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 722 MachineBasicBlock::iterator MBBI = MBB.begin(); 723 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) || 724 !Fn->doesNotThrow() || 725 UnwindTablesMandatory; 726 bool HasFP = hasFP(MF); 727 DebugLoc DL; 728 729 // Get the number of bytes to allocate from the FrameInfo. 730 uint64_t StackSize = MFI->getStackSize(); 731 732 // Get desired stack alignment 733 uint64_t MaxAlign = MFI->getMaxAlignment(); 734 735 // Add RETADDR move area to callee saved frame size. 736 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 737 if (TailCallReturnAddrDelta < 0) 738 X86FI->setCalleeSavedFrameSize( 739 X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta)); 740 741 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 742 // function, and use up to 128 bytes of stack space, don't have a frame 743 // pointer, calls, or dynamic alloca then we do not need to adjust the 744 // stack pointer (we fit in the Red Zone). 745 bool DisableRedZone = Fn->hasFnAttr(Attribute::NoRedZone); 746 if (Is64Bit && !DisableRedZone && 747 !needsStackRealignment(MF) && 748 !MFI->hasVarSizedObjects() && // No dynamic alloca. 749 !MFI->hasCalls() && // No calls. 750 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone 751 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 752 if (HasFP) MinSize += SlotSize; 753 StackSize = std::max(MinSize, 754 StackSize > 128 ? StackSize - 128 : 0); 755 MFI->setStackSize(StackSize); 756 } 757 758 // Insert stack pointer adjustment for later moving of return addr. Only 759 // applies to tail call optimized functions where the callee argument stack 760 // size is bigger than the callers. 761 if (TailCallReturnAddrDelta < 0) { 762 MachineInstr *MI = 763 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 764 StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta); 765 // The EFLAGS implicit def is dead. 766 MI->getOperand(3).setIsDead(); 767 } 768 769 // uint64_t StackSize = MFI->getStackSize(); 770 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 771 const TargetData *TD = MF.getTarget().getTargetData(); 772 int stackGrowth = 773 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 774 TargetFrameInfo::StackGrowsUp ? 775 TD->getPointerSize() : -TD->getPointerSize()); 776 777 uint64_t NumBytes = 0; 778 if (HasFP) { 779 // Calculate required stack adjustment 780 uint64_t FrameSize = StackSize - SlotSize; 781 if (needsStackRealignment(MF)) 782 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 783 784 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 785 786 // Get the offset of the stack slot for the EBP register, which is 787 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 788 // Update the frame offset adjustment. 789 MFI->setOffsetAdjustment(-NumBytes); 790 791 // Save EBP/RBP into the appropriate stack slot... 792 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 793 .addReg(FramePtr, RegState::Kill); 794 795 if (needsFrameMoves) { 796 // Mark effective beginning of when frame pointer becomes valid. 797 unsigned FrameLabelId = MMI->NextLabelID(); 798 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 799 800 // Define the current CFA rule to use the provided offset. 801 if (StackSize) { 802 MachineLocation SPDst(MachineLocation::VirtualFP); 803 MachineLocation SPSrc(MachineLocation::VirtualFP, 804 HasFP ? 2 * stackGrowth : 805 -StackSize + stackGrowth); 806 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 807 } else { 808 // FIXME: Verify & implement for FP 809 MachineLocation SPDst(StackPtr); 810 MachineLocation SPSrc(StackPtr, stackGrowth); 811 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 812 } 813 814 // Change the rule for the FramePtr to be an "offset" rule. 815 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); 816 MachineLocation FPSrc(FramePtr); 817 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 818 } 819 820 // Update EBP with the new base value... 821 BuildMI(MBB, MBBI, DL, 822 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 823 .addReg(StackPtr); 824 825 if (needsFrameMoves) { 826 unsigned FrameLabelId = MMI->NextLabelID(); 827 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 828 829 // Define the current CFA to use the EBP/RBP register. 830 MachineLocation FPDst(FramePtr); 831 MachineLocation FPSrc(MachineLocation::VirtualFP); 832 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 833 } 834 835 // Mark the FramePtr as live-in in every block except the entry. 836 for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); 837 I != E; ++I) 838 I->addLiveIn(FramePtr); 839 840 // Realign stack 841 if (needsStackRealignment(MF)) { 842 MachineInstr *MI = 843 BuildMI(MBB, MBBI, DL, 844 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 845 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 846 847 // The EFLAGS implicit def is dead. 848 MI->getOperand(3).setIsDead(); 849 } 850 } else { 851 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 852 } 853 854 // Skip the callee-saved push instructions. 855 bool RegsSaved = false; 856 while (MBBI != MBB.end() && 857 (MBBI->getOpcode() == X86::PUSH32r || 858 MBBI->getOpcode() == X86::PUSH64r)) { 859 RegsSaved = true; 860 ++MBBI; 861 } 862 863 if (RegsSaved && needsFrameMoves) { 864 // Mark end of callee-saved push instructions. 865 unsigned LabelId = MMI->NextLabelID(); 866 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); 867 868 // Emit DWARF info specifying the offsets of the callee-saved registers. 869 emitCalleeSavedFrameMoves(MF, LabelId, FramePtr); 870 } 871 872 if (MBBI != MBB.end()) 873 DL = MBBI->getDebugLoc(); 874 875 // Adjust stack pointer: ESP -= numbytes. 876 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 877 // Check, whether EAX is livein for this function. 878 bool isEAXAlive = false; 879 for (MachineRegisterInfo::livein_iterator 880 II = MF.getRegInfo().livein_begin(), 881 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 882 unsigned Reg = II->first; 883 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 884 Reg == X86::AH || Reg == X86::AL); 885 } 886 887 // Function prologue calls _alloca to probe the stack when allocating more 888 // than 4k bytes in one go. Touching the stack at 4K increments is necessary 889 // to ensure that the guard pages used by the OS virtual memory manager are 890 // allocated in correct sequence. 891 if (!isEAXAlive) { 892 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 893 .addImm(NumBytes); 894 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 895 .addExternalSymbol("_alloca"); 896 } else { 897 // Save EAX 898 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 899 .addReg(X86::EAX, RegState::Kill); 900 901 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 902 // allocated bytes for EAX. 903 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 904 .addImm(NumBytes - 4); 905 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 906 .addExternalSymbol("_alloca"); 907 908 // Restore EAX 909 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 910 X86::EAX), 911 StackPtr, false, NumBytes - 4); 912 MBB.insert(MBBI, MI); 913 } 914 } else if (NumBytes) { 915 // If there is an SUB32ri of ESP immediately before this instruction, merge 916 // the two. This can be the case when tail call elimination is enabled and 917 // the callee has more arguments then the caller. 918 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 919 920 // If there is an ADD32ri or SUB32ri of ESP immediately after this 921 // instruction, merge the two instructions. 922 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 923 924 if (NumBytes) 925 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 926 } 927} 928 929void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 930 MachineBasicBlock &MBB) const { 931 const MachineFrameInfo *MFI = MF.getFrameInfo(); 932 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 933 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 934 unsigned RetOpcode = MBBI->getOpcode(); 935 DebugLoc DL = MBBI->getDebugLoc(); 936 937 switch (RetOpcode) { 938 case X86::RET: 939 case X86::RETI: 940 case X86::TCRETURNdi: 941 case X86::TCRETURNri: 942 case X86::TCRETURNri64: 943 case X86::TCRETURNdi64: 944 case X86::EH_RETURN: 945 case X86::EH_RETURN64: 946 case X86::TAILJMPd: 947 case X86::TAILJMPr: 948 case X86::TAILJMPm: break; // These are ok 949 default: 950 assert(0 && "Can only insert epilog into returning blocks"); 951 } 952 953 // Get the number of bytes to allocate from the FrameInfo 954 uint64_t StackSize = MFI->getStackSize(); 955 uint64_t MaxAlign = MFI->getMaxAlignment(); 956 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 957 uint64_t NumBytes = 0; 958 959 if (hasFP(MF)) { 960 // Calculate required stack adjustment 961 uint64_t FrameSize = StackSize - SlotSize; 962 if (needsStackRealignment(MF)) 963 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 964 965 NumBytes = FrameSize - CSSize; 966 967 // pop EBP. 968 BuildMI(MBB, MBBI, DL, 969 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 970 } else { 971 NumBytes = StackSize - CSSize; 972 } 973 974 // Skip the callee-saved pop instructions. 975 MachineBasicBlock::iterator LastCSPop = MBBI; 976 while (MBBI != MBB.begin()) { 977 MachineBasicBlock::iterator PI = prior(MBBI); 978 unsigned Opc = PI->getOpcode(); 979 if (Opc != X86::POP32r && Opc != X86::POP64r && 980 !PI->getDesc().isTerminator()) 981 break; 982 --MBBI; 983 } 984 985 DL = MBBI->getDebugLoc(); 986 987 // If there is an ADD32ri or SUB32ri of ESP immediately before this 988 // instruction, merge the two instructions. 989 if (NumBytes || MFI->hasVarSizedObjects()) 990 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 991 992 // If dynamic alloca is used, then reset esp to point to the last callee-saved 993 // slot before popping them off! Same applies for the case, when stack was 994 // realigned 995 if (needsStackRealignment(MF)) { 996 // We cannot use LEA here, because stack pointer was realigned. We need to 997 // deallocate local frame back 998 if (CSSize) { 999 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1000 MBBI = prior(LastCSPop); 1001 } 1002 1003 BuildMI(MBB, MBBI, DL, 1004 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1005 StackPtr).addReg(FramePtr); 1006 } else if (MFI->hasVarSizedObjects()) { 1007 if (CSSize) { 1008 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1009 MachineInstr *MI = addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1010 FramePtr, false, -CSSize); 1011 MBB.insert(MBBI, MI); 1012 } else 1013 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1014 StackPtr).addReg(FramePtr); 1015 1016 } else { 1017 // adjust stack pointer back: ESP += numbytes 1018 if (NumBytes) 1019 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1020 } 1021 1022 // We're returning from function via eh_return. 1023 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1024 MBBI = prior(MBB.end()); 1025 MachineOperand &DestAddr = MBBI->getOperand(0); 1026 assert(DestAddr.isReg() && "Offset should be in register!"); 1027 BuildMI(MBB, MBBI, DL, 1028 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1029 StackPtr).addReg(DestAddr.getReg()); 1030 // Tail call return: adjust the stack pointer and jump to callee 1031 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1032 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) { 1033 MBBI = prior(MBB.end()); 1034 MachineOperand &JumpTarget = MBBI->getOperand(0); 1035 MachineOperand &StackAdjust = MBBI->getOperand(1); 1036 assert(StackAdjust.isImm() && "Expecting immediate value."); 1037 1038 // Adjust stack pointer. 1039 int StackAdj = StackAdjust.getImm(); 1040 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1041 int Offset = 0; 1042 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1043 // Incoporate the retaddr area. 1044 Offset = StackAdj-MaxTCDelta; 1045 assert(Offset >= 0 && "Offset should never be negative"); 1046 1047 if (Offset) { 1048 // Check for possible merge with preceeding ADD instruction. 1049 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1050 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 1051 } 1052 1053 // Jump to label or value in register. 1054 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) 1055 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)). 1056 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 1057 else if (RetOpcode== X86::TCRETURNri64) 1058 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 1059 else 1060 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 1061 1062 // Delete the pseudo instruction TCRETURN. 1063 MBB.erase(MBBI); 1064 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1065 (X86FI->getTCReturnAddrDelta() < 0)) { 1066 // Add the return addr area delta back since we are not tail calling. 1067 int delta = -1*X86FI->getTCReturnAddrDelta(); 1068 MBBI = prior(MBB.end()); 1069 // Check for possible merge with preceeding ADD instruction. 1070 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1071 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 1072 } 1073} 1074 1075unsigned X86RegisterInfo::getRARegister() const { 1076 if (Is64Bit) 1077 return X86::RIP; // Should have dwarf #16 1078 else 1079 return X86::EIP; // Should have dwarf #8 1080} 1081 1082unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const { 1083 return hasFP(MF) ? FramePtr : StackPtr; 1084} 1085 1086void X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) 1087 const { 1088 // Calculate amount of bytes used for return address storing 1089 int stackGrowth = (Is64Bit ? -8 : -4); 1090 1091 // Initial state of the frame pointer is esp+4. 1092 MachineLocation Dst(MachineLocation::VirtualFP); 1093 MachineLocation Src(StackPtr, stackGrowth); 1094 Moves.push_back(MachineMove(0, Dst, Src)); 1095 1096 // Add return address to move list 1097 MachineLocation CSDst(StackPtr, stackGrowth); 1098 MachineLocation CSSrc(getRARegister()); 1099 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1100} 1101 1102unsigned X86RegisterInfo::getEHExceptionRegister() const { 1103 assert(0 && "What is the exception register"); 1104 return 0; 1105} 1106 1107unsigned X86RegisterInfo::getEHHandlerRegister() const { 1108 assert(0 && "What is the exception handler register"); 1109 return 0; 1110} 1111 1112namespace llvm { 1113unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) { 1114 switch (VT.getSimpleVT()) { 1115 default: return Reg; 1116 case MVT::i8: 1117 if (High) { 1118 switch (Reg) { 1119 default: return 0; 1120 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1121 return X86::AH; 1122 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1123 return X86::DH; 1124 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1125 return X86::CH; 1126 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1127 return X86::BH; 1128 } 1129 } else { 1130 switch (Reg) { 1131 default: return 0; 1132 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1133 return X86::AL; 1134 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1135 return X86::DL; 1136 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1137 return X86::CL; 1138 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1139 return X86::BL; 1140 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1141 return X86::SIL; 1142 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1143 return X86::DIL; 1144 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1145 return X86::BPL; 1146 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1147 return X86::SPL; 1148 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1149 return X86::R8B; 1150 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1151 return X86::R9B; 1152 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1153 return X86::R10B; 1154 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1155 return X86::R11B; 1156 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1157 return X86::R12B; 1158 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1159 return X86::R13B; 1160 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1161 return X86::R14B; 1162 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1163 return X86::R15B; 1164 } 1165 } 1166 case MVT::i16: 1167 switch (Reg) { 1168 default: return Reg; 1169 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1170 return X86::AX; 1171 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1172 return X86::DX; 1173 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1174 return X86::CX; 1175 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1176 return X86::BX; 1177 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1178 return X86::SI; 1179 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1180 return X86::DI; 1181 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1182 return X86::BP; 1183 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1184 return X86::SP; 1185 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1186 return X86::R8W; 1187 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1188 return X86::R9W; 1189 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1190 return X86::R10W; 1191 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1192 return X86::R11W; 1193 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1194 return X86::R12W; 1195 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1196 return X86::R13W; 1197 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1198 return X86::R14W; 1199 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1200 return X86::R15W; 1201 } 1202 case MVT::i32: 1203 switch (Reg) { 1204 default: return Reg; 1205 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1206 return X86::EAX; 1207 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1208 return X86::EDX; 1209 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1210 return X86::ECX; 1211 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1212 return X86::EBX; 1213 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1214 return X86::ESI; 1215 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1216 return X86::EDI; 1217 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1218 return X86::EBP; 1219 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1220 return X86::ESP; 1221 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1222 return X86::R8D; 1223 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1224 return X86::R9D; 1225 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1226 return X86::R10D; 1227 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1228 return X86::R11D; 1229 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1230 return X86::R12D; 1231 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1232 return X86::R13D; 1233 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1234 return X86::R14D; 1235 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1236 return X86::R15D; 1237 } 1238 case MVT::i64: 1239 switch (Reg) { 1240 default: return Reg; 1241 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1242 return X86::RAX; 1243 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1244 return X86::RDX; 1245 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1246 return X86::RCX; 1247 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1248 return X86::RBX; 1249 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1250 return X86::RSI; 1251 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1252 return X86::RDI; 1253 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1254 return X86::RBP; 1255 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1256 return X86::RSP; 1257 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1258 return X86::R8; 1259 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1260 return X86::R9; 1261 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1262 return X86::R10; 1263 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1264 return X86::R11; 1265 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1266 return X86::R12; 1267 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1268 return X86::R13; 1269 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1270 return X86::R14; 1271 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1272 return X86::R15; 1273 } 1274 } 1275 1276 return Reg; 1277} 1278} 1279 1280#include "X86GenRegisterInfo.inc" 1281 1282namespace { 1283 struct VISIBILITY_HIDDEN MSAC : public MachineFunctionPass { 1284 static char ID; 1285 MSAC() : MachineFunctionPass(&ID) {} 1286 1287 virtual bool runOnMachineFunction(MachineFunction &MF) { 1288 MachineFrameInfo *FFI = MF.getFrameInfo(); 1289 MachineRegisterInfo &RI = MF.getRegInfo(); 1290 1291 // Calculate max stack alignment of all already allocated stack objects. 1292 unsigned MaxAlign = calculateMaxStackAlignment(FFI); 1293 1294 // Be over-conservative: scan over all vreg defs and find, whether vector 1295 // registers are used. If yes - there is probability, that vector register 1296 // will be spilled and thus stack needs to be aligned properly. 1297 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; 1298 RegNum < RI.getLastVirtReg(); ++RegNum) 1299 MaxAlign = std::max(MaxAlign, RI.getRegClass(RegNum)->getAlignment()); 1300 1301 FFI->setMaxAlignment(MaxAlign); 1302 1303 return false; 1304 } 1305 1306 virtual const char *getPassName() const { 1307 return "X86 Maximal Stack Alignment Calculator"; 1308 } 1309 }; 1310 1311 char MSAC::ID = 0; 1312} 1313 1314FunctionPass* 1315llvm::createX86MaxStackAlignmentCalculatorPass() { return new MSAC(); } 1316