X86FrameLowering.cpp revision 84d518af1991f581b748c4d11dbeb1c54573556b
1//=======- X86FrameLowering.cpp - X86 Frame Information --------*- C++ -*-====// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of TargetFrameLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86FrameLowering.h" 15#include "X86InstrBuilder.h" 16#include "X86InstrInfo.h" 17#include "X86MachineFunctionInfo.h" 18#include "X86Subtarget.h" 19#include "X86TargetMachine.h" 20#include "llvm/Function.h" 21#include "llvm/CodeGen/MachineFrameInfo.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/MachineModuleInfo.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/MC/MCAsmInfo.h" 27#include "llvm/MC/MCSymbol.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/Target/TargetOptions.h" 30#include "llvm/Support/CommandLine.h" 31#include "llvm/ADT/SmallSet.h" 32 33using namespace llvm; 34 35// FIXME: completely move here. 36extern cl::opt<bool> ForceStackAlign; 37 38bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 39 return !MF.getFrameInfo()->hasVarSizedObjects(); 40} 41 42/// hasFP - Return true if the specified function should have a dedicated frame 43/// pointer register. This is true if the function has variable sized allocas 44/// or if frame pointer elimination is disabled. 45bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 46 const MachineFrameInfo *MFI = MF.getFrameInfo(); 47 const MachineModuleInfo &MMI = MF.getMMI(); 48 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 49 50 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 51 RI->needsStackRealignment(MF) || 52 MFI->hasVarSizedObjects() || 53 MFI->isFrameAddressTaken() || 54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 55 MMI.callsUnwindInit()); 56} 57 58static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 59 if (is64Bit) { 60 if (isInt<8>(Imm)) 61 return X86::SUB64ri8; 62 return X86::SUB64ri32; 63 } else { 64 if (isInt<8>(Imm)) 65 return X86::SUB32ri8; 66 return X86::SUB32ri; 67 } 68} 69 70static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 71 if (is64Bit) { 72 if (isInt<8>(Imm)) 73 return X86::ADD64ri8; 74 return X86::ADD64ri32; 75 } else { 76 if (isInt<8>(Imm)) 77 return X86::ADD32ri8; 78 return X86::ADD32ri; 79 } 80} 81 82/// findDeadCallerSavedReg - Return a caller-saved register that isn't live 83/// when it reaches the "return" instruction. We can then pop a stack object 84/// to this register without worry about clobbering it. 85static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, 86 MachineBasicBlock::iterator &MBBI, 87 const TargetRegisterInfo &TRI, 88 bool Is64Bit) { 89 const MachineFunction *MF = MBB.getParent(); 90 const Function *F = MF->getFunction(); 91 if (!F || MF->getMMI().callsEHReturn()) 92 return 0; 93 94 static const unsigned CallerSavedRegs32Bit[] = { 95 X86::EAX, X86::EDX, X86::ECX, 0 96 }; 97 98 static const unsigned CallerSavedRegs64Bit[] = { 99 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI, 100 X86::R8, X86::R9, X86::R10, X86::R11, 0 101 }; 102 103 unsigned Opc = MBBI->getOpcode(); 104 switch (Opc) { 105 default: return 0; 106 case X86::RET: 107 case X86::RETI: 108 case X86::TCRETURNdi: 109 case X86::TCRETURNri: 110 case X86::TCRETURNmi: 111 case X86::TCRETURNdi64: 112 case X86::TCRETURNri64: 113 case X86::TCRETURNmi64: 114 case X86::EH_RETURN: 115 case X86::EH_RETURN64: { 116 SmallSet<unsigned, 8> Uses; 117 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) { 118 MachineOperand &MO = MBBI->getOperand(i); 119 if (!MO.isReg() || MO.isDef()) 120 continue; 121 unsigned Reg = MO.getReg(); 122 if (!Reg) 123 continue; 124 for (const unsigned *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI) 125 Uses.insert(*AsI); 126 } 127 128 const unsigned *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit; 129 for (; *CS; ++CS) 130 if (!Uses.count(*CS)) 131 return *CS; 132 } 133 } 134 135 return 0; 136} 137 138 139/// emitSPUpdate - Emit a series of instructions to increment / decrement the 140/// stack pointer by a constant value. 141static 142void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 143 unsigned StackPtr, int64_t NumBytes, 144 bool Is64Bit, const TargetInstrInfo &TII, 145 const TargetRegisterInfo &TRI) { 146 bool isSub = NumBytes < 0; 147 uint64_t Offset = isSub ? -NumBytes : NumBytes; 148 unsigned Opc = isSub ? 149 getSUBriOpcode(Is64Bit, Offset) : 150 getADDriOpcode(Is64Bit, Offset); 151 uint64_t Chunk = (1LL << 31) - 1; 152 DebugLoc DL = MBB.findDebugLoc(MBBI); 153 154 while (Offset) { 155 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 156 if (ThisVal == (Is64Bit ? 8 : 4)) { 157 // Use push / pop instead. 158 unsigned Reg = isSub 159 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 160 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit); 161 if (Reg) { 162 Opc = isSub 163 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 164 : (Is64Bit ? X86::POP64r : X86::POP32r); 165 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc)) 166 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)); 167 if (isSub) 168 MI->setFlag(MachineInstr::FrameSetup); 169 Offset -= ThisVal; 170 continue; 171 } 172 } 173 174 MachineInstr *MI = 175 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 176 .addReg(StackPtr) 177 .addImm(ThisVal); 178 if (isSub) 179 MI->setFlag(MachineInstr::FrameSetup); 180 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 181 Offset -= ThisVal; 182 } 183} 184 185/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 186static 187void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 188 unsigned StackPtr, uint64_t *NumBytes = NULL) { 189 if (MBBI == MBB.begin()) return; 190 191 MachineBasicBlock::iterator PI = prior(MBBI); 192 unsigned Opc = PI->getOpcode(); 193 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 194 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 195 PI->getOperand(0).getReg() == StackPtr) { 196 if (NumBytes) 197 *NumBytes += PI->getOperand(2).getImm(); 198 MBB.erase(PI); 199 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 200 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 201 PI->getOperand(0).getReg() == StackPtr) { 202 if (NumBytes) 203 *NumBytes -= PI->getOperand(2).getImm(); 204 MBB.erase(PI); 205 } 206} 207 208/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator. 209static 210void mergeSPUpdatesDown(MachineBasicBlock &MBB, 211 MachineBasicBlock::iterator &MBBI, 212 unsigned StackPtr, uint64_t *NumBytes = NULL) { 213 // FIXME: THIS ISN'T RUN!!! 214 return; 215 216 if (MBBI == MBB.end()) return; 217 218 MachineBasicBlock::iterator NI = llvm::next(MBBI); 219 if (NI == MBB.end()) return; 220 221 unsigned Opc = NI->getOpcode(); 222 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 223 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 224 NI->getOperand(0).getReg() == StackPtr) { 225 if (NumBytes) 226 *NumBytes -= NI->getOperand(2).getImm(); 227 MBB.erase(NI); 228 MBBI = NI; 229 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 230 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 231 NI->getOperand(0).getReg() == StackPtr) { 232 if (NumBytes) 233 *NumBytes += NI->getOperand(2).getImm(); 234 MBB.erase(NI); 235 MBBI = NI; 236 } 237} 238 239/// mergeSPUpdates - Checks the instruction before/after the passed 240/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 241/// stack adjustment is returned as a positive value for ADD and a negative for 242/// SUB. 243static int mergeSPUpdates(MachineBasicBlock &MBB, 244 MachineBasicBlock::iterator &MBBI, 245 unsigned StackPtr, 246 bool doMergeWithPrevious) { 247 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 248 (!doMergeWithPrevious && MBBI == MBB.end())) 249 return 0; 250 251 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 252 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 253 unsigned Opc = PI->getOpcode(); 254 int Offset = 0; 255 256 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 257 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 258 PI->getOperand(0).getReg() == StackPtr){ 259 Offset += PI->getOperand(2).getImm(); 260 MBB.erase(PI); 261 if (!doMergeWithPrevious) MBBI = NI; 262 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 263 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 264 PI->getOperand(0).getReg() == StackPtr) { 265 Offset -= PI->getOperand(2).getImm(); 266 MBB.erase(PI); 267 if (!doMergeWithPrevious) MBBI = NI; 268 } 269 270 return Offset; 271} 272 273static bool isEAXLiveIn(MachineFunction &MF) { 274 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(), 275 EE = MF.getRegInfo().livein_end(); II != EE; ++II) { 276 unsigned Reg = II->first; 277 278 if (Reg == X86::EAX || Reg == X86::AX || 279 Reg == X86::AH || Reg == X86::AL) 280 return true; 281 } 282 283 return false; 284} 285 286void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, 287 MCSymbol *Label, 288 unsigned FramePtr) const { 289 MachineFrameInfo *MFI = MF.getFrameInfo(); 290 MachineModuleInfo &MMI = MF.getMMI(); 291 292 // Add callee saved registers to move list. 293 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 294 if (CSI.empty()) return; 295 296 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 297 const TargetData *TD = TM.getTargetData(); 298 bool HasFP = hasFP(MF); 299 300 // Calculate amount of bytes used for return address storing. 301 int stackGrowth = -TD->getPointerSize(); 302 303 // FIXME: This is dirty hack. The code itself is pretty mess right now. 304 // It should be rewritten from scratch and generalized sometimes. 305 306 // Determine maximum offset (minimum due to stack growth). 307 int64_t MaxOffset = 0; 308 for (std::vector<CalleeSavedInfo>::const_iterator 309 I = CSI.begin(), E = CSI.end(); I != E; ++I) 310 MaxOffset = std::min(MaxOffset, 311 MFI->getObjectOffset(I->getFrameIdx())); 312 313 // Calculate offsets. 314 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 315 for (std::vector<CalleeSavedInfo>::const_iterator 316 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 317 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 318 unsigned Reg = I->getReg(); 319 Offset = MaxOffset - Offset + saveAreaOffset; 320 321 // Don't output a new machine move if we're re-saving the frame 322 // pointer. This happens when the PrologEpilogInserter has inserted an extra 323 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 324 // generates one when frame pointers are used. If we generate a "machine 325 // move" for this extra "PUSH", the linker will lose track of the fact that 326 // the frame pointer should have the value of the first "PUSH" when it's 327 // trying to unwind. 328 // 329 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 330 // another bug. I.e., one where we generate a prolog like this: 331 // 332 // pushl %ebp 333 // movl %esp, %ebp 334 // pushl %ebp 335 // pushl %esi 336 // ... 337 // 338 // The immediate re-push of EBP is unnecessary. At the least, it's an 339 // optimization bug. EBP can be used as a scratch register in certain 340 // cases, but probably not when we have a frame pointer. 341 if (HasFP && FramePtr == Reg) 342 continue; 343 344 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 345 MachineLocation CSSrc(Reg); 346 Moves.push_back(MachineMove(Label, CSDst, CSSrc)); 347 } 348} 349 350/// getCompactUnwindRegNum - Get the compact unwind number for a given 351/// register. The number corresponds to the enum lists in 352/// compact_unwind_encoding.h. 353static int getCompactUnwindRegNum(const unsigned *CURegs, unsigned Reg) { 354 int Idx = 1; 355 for (; *CURegs; ++CURegs, ++Idx) 356 if (*CURegs == Reg) 357 return Idx; 358 359 return -1; 360} 361 362// Number of registers that can be saved in a compact unwind encoding. 363#define CU_NUM_SAVED_REGS 6 364 365/// encodeCompactUnwindRegistersWithoutFrame - Create the permutation encoding 366/// used with frameless stacks. It is passed the number of registers to be saved 367/// and an array of the registers saved. 368static uint32_t 369encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], 370 unsigned RegCount, bool Is64Bit) { 371 // The saved registers are numbered from 1 to 6. In order to encode the order 372 // in which they were saved, we re-number them according to their place in the 373 // register order. The re-numbering is relative to the last re-numbered 374 // register. E.g., if we have registers {6, 2, 4, 5} saved in that order: 375 // 376 // Orig Re-Num 377 // ---- ------ 378 // 6 6 379 // 2 2 380 // 4 3 381 // 5 3 382 // 383 static const unsigned CU32BitRegs[] = { 384 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 385 }; 386 static const unsigned CU64BitRegs[] = { 387 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 388 }; 389 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); 390 391 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i) { 392 int CUReg = getCompactUnwindRegNum(CURegs, SavedRegs[i]); 393 if (CUReg == -1) return ~0U; 394 SavedRegs[i] = CUReg; 395 } 396 397 uint32_t RenumRegs[CU_NUM_SAVED_REGS]; 398 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i) { 399 unsigned Countless = 0; 400 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) 401 if (SavedRegs[j] < SavedRegs[i]) 402 ++Countless; 403 404 RenumRegs[i] = SavedRegs[i] - Countless - 1; 405 } 406 407 // Take the renumbered values and encode them into a 10-bit number. 408 uint32_t permutationEncoding = 0; 409 switch (RegCount) { 410 case 6: 411 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 412 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 413 + RenumRegs[4]; 414 break; 415 case 5: 416 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 417 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 418 + RenumRegs[5]; 419 break; 420 case 4: 421 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 422 + 3 * RenumRegs[4] + RenumRegs[5]; 423 break; 424 case 3: 425 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 426 + RenumRegs[5]; 427 break; 428 case 2: 429 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 430 break; 431 case 1: 432 permutationEncoding |= RenumRegs[5]; 433 break; 434 } 435 436 assert((permutationEncoding & 0x3FF) == permutationEncoding && 437 "Invalid compact register encoding!"); 438 return permutationEncoding; 439} 440 441/// encodeCompactUnwindRegistersWithFrame - Return the registers encoded for a 442/// compact encoding with a frame pointer. 443static uint32_t 444encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], 445 bool Is64Bit) { 446 static const unsigned CU32BitRegs[] = { 447 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 448 }; 449 static const unsigned CU64BitRegs[] = { 450 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 451 }; 452 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); 453 454 // Encode the registers in the order they were saved, 3-bits per register. The 455 // registers are numbered from 1 to 6. 456 uint32_t RegEnc = 0; 457 for (int I = 5; I >= 0; --I) { 458 unsigned Reg = SavedRegs[I]; 459 if (Reg == 0) break; 460 int CURegNum = getCompactUnwindRegNum(CURegs, Reg); 461 if (CURegNum == -1) 462 return ~0U; 463 464 // Encode the 3-bit register number in order, skipping over 3-bits for each 465 // register. 466 RegEnc |= (CURegNum & 0x7) << ((5 - I) * 3); 467 } 468 469 assert((RegEnc & 0x7FFF) == RegEnc && "Invalid compact register encoding!"); 470 return RegEnc; 471} 472 473uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { 474 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 475 unsigned FramePtr = RegInfo->getFrameRegister(MF); 476 unsigned StackPtr = RegInfo->getStackRegister(); 477 478 bool Is64Bit = STI.is64Bit(); 479 bool HasFP = hasFP(MF); 480 481 unsigned SavedRegs[CU_NUM_SAVED_REGS] = { 0, 0, 0, 0, 0, 0 }; 482 int SavedRegIdx = CU_NUM_SAVED_REGS; 483 484 unsigned OffsetSize = (Is64Bit ? 8 : 4); 485 486 unsigned PushInstr = (Is64Bit ? X86::PUSH64r : X86::PUSH32r); 487 unsigned PushInstrSize = 1; 488 unsigned MoveInstr = (Is64Bit ? X86::MOV64rr : X86::MOV32rr); 489 unsigned MoveInstrSize = (Is64Bit ? 3 : 2); 490 unsigned SubtractInstrIdx = (Is64Bit ? 3 : 2); 491 492 unsigned StackDivide = (Is64Bit ? 8 : 4); 493 494 unsigned InstrOffset = 0; 495 unsigned StackAdjust = 0; 496 unsigned StackSize = 0; 497 498 MachineBasicBlock &MBB = MF.front(); // Prologue is in entry BB. 499 bool ExpectEnd = false; 500 for (MachineBasicBlock::iterator 501 MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) { 502 MachineInstr &MI = *MBBI; 503 unsigned Opc = MI.getOpcode(); 504 if (Opc == X86::PROLOG_LABEL) continue; 505 if (!MI.getFlag(MachineInstr::FrameSetup)) break; 506 507 // We don't exect any more prolog instructions. 508 if (ExpectEnd) return 0; 509 510 if (Opc == PushInstr) { 511 // If there are too many saved registers, we cannot use compact encoding. 512 if (--SavedRegIdx < 0) return 0; 513 514 SavedRegs[SavedRegIdx] = MI.getOperand(0).getReg(); 515 StackAdjust += OffsetSize; 516 InstrOffset += PushInstrSize; 517 } else if (Opc == MoveInstr) { 518 unsigned SrcReg = MI.getOperand(1).getReg(); 519 unsigned DstReg = MI.getOperand(0).getReg(); 520 521 if (DstReg != FramePtr || SrcReg != StackPtr) 522 return 0; 523 524 StackAdjust = 0; 525 memset(SavedRegs, 0, sizeof(SavedRegs)); 526 SavedRegIdx = CU_NUM_SAVED_REGS; 527 InstrOffset += MoveInstrSize; 528 } else if (Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 529 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) { 530 if (StackSize) 531 // We already have a stack size. 532 return 0; 533 534 if (!MI.getOperand(0).isReg() || 535 MI.getOperand(0).getReg() != MI.getOperand(1).getReg() || 536 MI.getOperand(0).getReg() != StackPtr || !MI.getOperand(2).isImm()) 537 // We need this to be a stack adjustment pointer. Something like: 538 // 539 // %RSP<def> = SUB64ri8 %RSP, 48 540 return 0; 541 542 StackSize = MI.getOperand(2).getImm() / StackDivide; 543 SubtractInstrIdx += InstrOffset; 544 ExpectEnd = true; 545 } 546 } 547 548 // Encode that we are using EBP/RBP as the frame pointer. 549 uint32_t CompactUnwindEncoding = 0; 550 StackAdjust /= StackDivide; 551 if (HasFP) { 552 if ((StackAdjust & 0xFF) != StackAdjust) 553 // Offset was too big for compact encoding. 554 return 0; 555 556 // Get the encoding of the saved registers when we have a frame pointer. 557 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit); 558 if (RegEnc == ~0U) return 0; 559 560 CompactUnwindEncoding |= 0x01000000; 561 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; 562 CompactUnwindEncoding |= RegEnc & 0x7FFF; 563 } else { 564 uint32_t TotalStackSize = StackAdjust + StackSize; 565 if ((TotalStackSize & 0xFF) == TotalStackSize) { 566 // Frameless stack with a small stack size. 567 CompactUnwindEncoding |= 0x02000000; 568 569 // Encode the stack size. 570 CompactUnwindEncoding |= (TotalStackSize & 0xFF) << 16; 571 } else { 572 if ((StackAdjust & 0x7) != StackAdjust) 573 // The extra stack adjustments are too big for us to handle. 574 return 0; 575 576 // Frameless stack with an offset too large for us to encode compactly. 577 CompactUnwindEncoding |= 0x03000000; 578 579 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 580 // instruction. 581 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 582 583 // Encode any extra stack stack adjustments (done via push instructions). 584 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; 585 } 586 587 // Encode the number of registers saved. 588 CompactUnwindEncoding |= ((CU_NUM_SAVED_REGS - SavedRegIdx) & 0x7) << 10; 589 590 // Get the encoding of the saved registers when we don't have a frame 591 // pointer. 592 uint32_t RegEnc = 593 encodeCompactUnwindRegistersWithoutFrame(SavedRegs, 594 CU_NUM_SAVED_REGS - SavedRegIdx, 595 Is64Bit); 596 if (RegEnc == ~0U) return 0; 597 598 // Encode the register encoding. 599 CompactUnwindEncoding |= RegEnc & 0x3FF; 600 } 601 602 return CompactUnwindEncoding; 603} 604 605/// emitPrologue - Push callee-saved registers onto the stack, which 606/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 607/// space for local variables. Also emit labels used by the exception handler to 608/// generate the exception handling frames. 609void X86FrameLowering::emitPrologue(MachineFunction &MF) const { 610 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 611 MachineBasicBlock::iterator MBBI = MBB.begin(); 612 MachineFrameInfo *MFI = MF.getFrameInfo(); 613 const Function *Fn = MF.getFunction(); 614 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 615 const X86InstrInfo &TII = *TM.getInstrInfo(); 616 MachineModuleInfo &MMI = MF.getMMI(); 617 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 618 bool needsFrameMoves = MMI.hasDebugInfo() || 619 Fn->needsUnwindTableEntry(); 620 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 621 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 622 bool HasFP = hasFP(MF); 623 bool Is64Bit = STI.is64Bit(); 624 bool IsWin64 = STI.isTargetWin64(); 625 unsigned StackAlign = getStackAlignment(); 626 unsigned SlotSize = RegInfo->getSlotSize(); 627 unsigned FramePtr = RegInfo->getFrameRegister(MF); 628 unsigned StackPtr = RegInfo->getStackRegister(); 629 DebugLoc DL; 630 631 // If we're forcing a stack realignment we can't rely on just the frame 632 // info, we need to know the ABI stack alignment as well in case we 633 // have a call out. Otherwise just make sure we have some alignment - we'll 634 // go with the minimum SlotSize. 635 if (ForceStackAlign) { 636 if (MFI->hasCalls()) 637 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 638 else if (MaxAlign < SlotSize) 639 MaxAlign = SlotSize; 640 } 641 642 // Add RETADDR move area to callee saved frame size. 643 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 644 if (TailCallReturnAddrDelta < 0) 645 X86FI->setCalleeSavedFrameSize( 646 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 647 648 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 649 // function, and use up to 128 bytes of stack space, don't have a frame 650 // pointer, calls, or dynamic alloca then we do not need to adjust the 651 // stack pointer (we fit in the Red Zone). 652 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 653 !RegInfo->needsStackRealignment(MF) && 654 !MFI->hasVarSizedObjects() && // No dynamic alloca. 655 !MFI->adjustsStack() && // No calls. 656 !IsWin64 && // Win64 has no Red Zone 657 !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack 658 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 659 if (HasFP) MinSize += SlotSize; 660 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 661 MFI->setStackSize(StackSize); 662 } 663 664 // Insert stack pointer adjustment for later moving of return addr. Only 665 // applies to tail call optimized functions where the callee argument stack 666 // size is bigger than the callers. 667 if (TailCallReturnAddrDelta < 0) { 668 MachineInstr *MI = 669 BuildMI(MBB, MBBI, DL, 670 TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)), 671 StackPtr) 672 .addReg(StackPtr) 673 .addImm(-TailCallReturnAddrDelta) 674 .setMIFlag(MachineInstr::FrameSetup); 675 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 676 } 677 678 // Mapping for machine moves: 679 // 680 // DST: VirtualFP AND 681 // SRC: VirtualFP => DW_CFA_def_cfa_offset 682 // ELSE => DW_CFA_def_cfa 683 // 684 // SRC: VirtualFP AND 685 // DST: Register => DW_CFA_def_cfa_register 686 // 687 // ELSE 688 // OFFSET < 0 => DW_CFA_offset_extended_sf 689 // REG < 64 => DW_CFA_offset + Reg 690 // ELSE => DW_CFA_offset_extended 691 692 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 693 const TargetData *TD = MF.getTarget().getTargetData(); 694 uint64_t NumBytes = 0; 695 int stackGrowth = -TD->getPointerSize(); 696 697 if (HasFP) { 698 // Calculate required stack adjustment. 699 uint64_t FrameSize = StackSize - SlotSize; 700 if (RegInfo->needsStackRealignment(MF)) 701 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 702 703 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 704 705 // Get the offset of the stack slot for the EBP register, which is 706 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 707 // Update the frame offset adjustment. 708 MFI->setOffsetAdjustment(-NumBytes); 709 710 // Save EBP/RBP into the appropriate stack slot. 711 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 712 .addReg(FramePtr, RegState::Kill) 713 .setMIFlag(MachineInstr::FrameSetup); 714 715 if (needsFrameMoves) { 716 // Mark the place where EBP/RBP was saved. 717 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 718 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 719 .addSym(FrameLabel); 720 721 // Define the current CFA rule to use the provided offset. 722 if (StackSize) { 723 MachineLocation SPDst(MachineLocation::VirtualFP); 724 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 725 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 726 } else { 727 MachineLocation SPDst(StackPtr); 728 MachineLocation SPSrc(StackPtr, stackGrowth); 729 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 730 } 731 732 // Change the rule for the FramePtr to be an "offset" rule. 733 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); 734 MachineLocation FPSrc(FramePtr); 735 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 736 } 737 738 // Update EBP with the new base value. 739 BuildMI(MBB, MBBI, DL, 740 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 741 .addReg(StackPtr) 742 .setMIFlag(MachineInstr::FrameSetup); 743 744 if (needsFrameMoves) { 745 // Mark effective beginning of when frame pointer becomes valid. 746 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 747 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 748 .addSym(FrameLabel); 749 750 // Define the current CFA to use the EBP/RBP register. 751 MachineLocation FPDst(FramePtr); 752 MachineLocation FPSrc(MachineLocation::VirtualFP); 753 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 754 } 755 756 // Mark the FramePtr as live-in in every block except the entry. 757 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 758 I != E; ++I) 759 I->addLiveIn(FramePtr); 760 761 // Realign stack 762 if (RegInfo->needsStackRealignment(MF)) { 763 MachineInstr *MI = 764 BuildMI(MBB, MBBI, DL, 765 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr) 766 .addReg(StackPtr) 767 .addImm(-MaxAlign) 768 .setMIFlag(MachineInstr::FrameSetup); 769 770 // The EFLAGS implicit def is dead. 771 MI->getOperand(3).setIsDead(); 772 } 773 } else { 774 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 775 } 776 777 // Skip the callee-saved push instructions. 778 bool PushedRegs = false; 779 int StackOffset = 2 * stackGrowth; 780 781 while (MBBI != MBB.end() && 782 (MBBI->getOpcode() == X86::PUSH32r || 783 MBBI->getOpcode() == X86::PUSH64r)) { 784 PushedRegs = true; 785 MBBI->setFlag(MachineInstr::FrameSetup); 786 ++MBBI; 787 788 if (!HasFP && needsFrameMoves) { 789 // Mark callee-saved push instruction. 790 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 791 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); 792 793 // Define the current CFA rule to use the provided offset. 794 unsigned Ptr = StackSize ? MachineLocation::VirtualFP : StackPtr; 795 MachineLocation SPDst(Ptr); 796 MachineLocation SPSrc(Ptr, StackOffset); 797 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 798 StackOffset += stackGrowth; 799 } 800 } 801 802 DL = MBB.findDebugLoc(MBBI); 803 804 // If there is an SUB32ri of ESP immediately before this instruction, merge 805 // the two. This can be the case when tail call elimination is enabled and 806 // the callee has more arguments then the caller. 807 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 808 809 // If there is an ADD32ri or SUB32ri of ESP immediately after this 810 // instruction, merge the two instructions. 811 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 812 813 // Adjust stack pointer: ESP -= numbytes. 814 815 // Windows and cygwin/mingw require a prologue helper routine when allocating 816 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 817 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 818 // stack and adjust the stack pointer in one go. The 64-bit version of 819 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 820 // responsible for adjusting the stack pointer. Touching the stack at 4K 821 // increments is necessary to ensure that the guard pages used by the OS 822 // virtual memory manager are allocated in correct sequence. 823 if (NumBytes >= 4096 && STI.isTargetCOFF() && !STI.isTargetEnvMacho()) { 824 const char *StackProbeSymbol; 825 bool isSPUpdateNeeded = false; 826 827 if (Is64Bit) { 828 if (STI.isTargetCygMing()) 829 StackProbeSymbol = "___chkstk"; 830 else { 831 StackProbeSymbol = "__chkstk"; 832 isSPUpdateNeeded = true; 833 } 834 } else if (STI.isTargetCygMing()) 835 StackProbeSymbol = "_alloca"; 836 else 837 StackProbeSymbol = "_chkstk"; 838 839 // Check whether EAX is livein for this function. 840 bool isEAXAlive = isEAXLiveIn(MF); 841 842 if (isEAXAlive) { 843 // Sanity check that EAX is not livein for this function. 844 // It should not be, so throw an assert. 845 assert(!Is64Bit && "EAX is livein in x64 case!"); 846 847 // Save EAX 848 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 849 .addReg(X86::EAX, RegState::Kill) 850 .setMIFlag(MachineInstr::FrameSetup); 851 } 852 853 if (Is64Bit) { 854 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 855 // Function prologue is responsible for adjusting the stack pointer. 856 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) 857 .addImm(NumBytes) 858 .setMIFlag(MachineInstr::FrameSetup); 859 } else { 860 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 861 // We'll also use 4 already allocated bytes for EAX. 862 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 863 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 864 .setMIFlag(MachineInstr::FrameSetup); 865 } 866 867 BuildMI(MBB, MBBI, DL, 868 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32)) 869 .addExternalSymbol(StackProbeSymbol) 870 .addReg(StackPtr, RegState::Define | RegState::Implicit) 871 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit) 872 .setMIFlag(MachineInstr::FrameSetup); 873 874 // MSVC x64's __chkstk needs to adjust %rsp. 875 // FIXME: %rax preserves the offset and should be available. 876 if (isSPUpdateNeeded) 877 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, 878 TII, *RegInfo); 879 880 if (isEAXAlive) { 881 // Restore EAX 882 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 883 X86::EAX), 884 StackPtr, false, NumBytes - 4); 885 MI->setFlag(MachineInstr::FrameSetup); 886 MBB.insert(MBBI, MI); 887 } 888 } else if (NumBytes) 889 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, 890 TII, *RegInfo); 891 892 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) { 893 // Mark end of stack pointer adjustment. 894 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 895 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 896 .addSym(Label); 897 898 if (!HasFP && NumBytes) { 899 // Define the current CFA rule to use the provided offset. 900 if (StackSize) { 901 MachineLocation SPDst(MachineLocation::VirtualFP); 902 MachineLocation SPSrc(MachineLocation::VirtualFP, 903 -StackSize + stackGrowth); 904 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 905 } else { 906 MachineLocation SPDst(StackPtr); 907 MachineLocation SPSrc(StackPtr, stackGrowth); 908 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 909 } 910 } 911 912 // Emit DWARF info specifying the offsets of the callee-saved registers. 913 if (PushedRegs) 914 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr); 915 } 916 917 // Darwin 10.7 and greater has support for compact unwind encoding. 918 if (STI.getTargetTriple().isMacOSX() && 919 !STI.getTargetTriple().isMacOSXVersionLT(10, 7)) 920 MMI.setCompactUnwindEncoding(getCompactUnwindEncoding(MF)); 921} 922 923void X86FrameLowering::emitEpilogue(MachineFunction &MF, 924 MachineBasicBlock &MBB) const { 925 const MachineFrameInfo *MFI = MF.getFrameInfo(); 926 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 927 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 928 const X86InstrInfo &TII = *TM.getInstrInfo(); 929 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 930 assert(MBBI != MBB.end() && "Returning block has no instructions"); 931 unsigned RetOpcode = MBBI->getOpcode(); 932 DebugLoc DL = MBBI->getDebugLoc(); 933 bool Is64Bit = STI.is64Bit(); 934 unsigned StackAlign = getStackAlignment(); 935 unsigned SlotSize = RegInfo->getSlotSize(); 936 unsigned FramePtr = RegInfo->getFrameRegister(MF); 937 unsigned StackPtr = RegInfo->getStackRegister(); 938 939 switch (RetOpcode) { 940 default: 941 llvm_unreachable("Can only insert epilog into returning blocks"); 942 case X86::RET: 943 case X86::RETI: 944 case X86::TCRETURNdi: 945 case X86::TCRETURNri: 946 case X86::TCRETURNmi: 947 case X86::TCRETURNdi64: 948 case X86::TCRETURNri64: 949 case X86::TCRETURNmi64: 950 case X86::EH_RETURN: 951 case X86::EH_RETURN64: 952 break; // These are ok 953 } 954 955 // Get the number of bytes to allocate from the FrameInfo. 956 uint64_t StackSize = MFI->getStackSize(); 957 uint64_t MaxAlign = MFI->getMaxAlignment(); 958 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 959 uint64_t NumBytes = 0; 960 961 // If we're forcing a stack realignment we can't rely on just the frame 962 // info, we need to know the ABI stack alignment as well in case we 963 // have a call out. Otherwise just make sure we have some alignment - we'll 964 // go with the minimum. 965 if (ForceStackAlign) { 966 if (MFI->hasCalls()) 967 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 968 else 969 MaxAlign = MaxAlign ? MaxAlign : 4; 970 } 971 972 if (hasFP(MF)) { 973 // Calculate required stack adjustment. 974 uint64_t FrameSize = StackSize - SlotSize; 975 if (RegInfo->needsStackRealignment(MF)) 976 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 977 978 NumBytes = FrameSize - CSSize; 979 980 // Pop EBP. 981 BuildMI(MBB, MBBI, DL, 982 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 983 } else { 984 NumBytes = StackSize - CSSize; 985 } 986 987 // Skip the callee-saved pop instructions. 988 MachineBasicBlock::iterator LastCSPop = MBBI; 989 while (MBBI != MBB.begin()) { 990 MachineBasicBlock::iterator PI = prior(MBBI); 991 unsigned Opc = PI->getOpcode(); 992 993 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE && 994 !PI->getDesc().isTerminator()) 995 break; 996 997 --MBBI; 998 } 999 1000 DL = MBBI->getDebugLoc(); 1001 1002 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1003 // instruction, merge the two instructions. 1004 if (NumBytes || MFI->hasVarSizedObjects()) 1005 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1006 1007 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1008 // slot before popping them off! Same applies for the case, when stack was 1009 // realigned. 1010 if (RegInfo->needsStackRealignment(MF)) { 1011 // We cannot use LEA here, because stack pointer was realigned. We need to 1012 // deallocate local frame back. 1013 if (CSSize) { 1014 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo); 1015 MBBI = prior(LastCSPop); 1016 } 1017 1018 BuildMI(MBB, MBBI, DL, 1019 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1020 StackPtr).addReg(FramePtr); 1021 } else if (MFI->hasVarSizedObjects()) { 1022 if (CSSize) { 1023 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1024 MachineInstr *MI = 1025 addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1026 FramePtr, false, -CSSize); 1027 MBB.insert(MBBI, MI); 1028 } else { 1029 BuildMI(MBB, MBBI, DL, 1030 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1031 .addReg(FramePtr); 1032 } 1033 } else if (NumBytes) { 1034 // Adjust stack pointer back: ESP += numbytes. 1035 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo); 1036 } 1037 1038 // We're returning from function via eh_return. 1039 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1040 MBBI = MBB.getLastNonDebugInstr(); 1041 MachineOperand &DestAddr = MBBI->getOperand(0); 1042 assert(DestAddr.isReg() && "Offset should be in register!"); 1043 BuildMI(MBB, MBBI, DL, 1044 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1045 StackPtr).addReg(DestAddr.getReg()); 1046 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1047 RetOpcode == X86::TCRETURNmi || 1048 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 || 1049 RetOpcode == X86::TCRETURNmi64) { 1050 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64; 1051 // Tail call return: adjust the stack pointer and jump to callee. 1052 MBBI = MBB.getLastNonDebugInstr(); 1053 MachineOperand &JumpTarget = MBBI->getOperand(0); 1054 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); 1055 assert(StackAdjust.isImm() && "Expecting immediate value."); 1056 1057 // Adjust stack pointer. 1058 int StackAdj = StackAdjust.getImm(); 1059 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1060 int Offset = 0; 1061 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1062 1063 // Incoporate the retaddr area. 1064 Offset = StackAdj-MaxTCDelta; 1065 assert(Offset >= 0 && "Offset should never be negative"); 1066 1067 if (Offset) { 1068 // Check for possible merge with preceding ADD instruction. 1069 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1070 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII, *RegInfo); 1071 } 1072 1073 // Jump to label or value in register. 1074 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) { 1075 MachineInstrBuilder MIB = 1076 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi) 1077 ? X86::TAILJMPd : X86::TAILJMPd64)); 1078 if (JumpTarget.isGlobal()) 1079 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1080 JumpTarget.getTargetFlags()); 1081 else { 1082 assert(JumpTarget.isSymbol()); 1083 MIB.addExternalSymbol(JumpTarget.getSymbolName(), 1084 JumpTarget.getTargetFlags()); 1085 } 1086 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) { 1087 MachineInstrBuilder MIB = 1088 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi) 1089 ? X86::TAILJMPm : X86::TAILJMPm64)); 1090 for (unsigned i = 0; i != 5; ++i) 1091 MIB.addOperand(MBBI->getOperand(i)); 1092 } else if (RetOpcode == X86::TCRETURNri64) { 1093 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)). 1094 addReg(JumpTarget.getReg(), RegState::Kill); 1095 } else { 1096 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)). 1097 addReg(JumpTarget.getReg(), RegState::Kill); 1098 } 1099 1100 MachineInstr *NewMI = prior(MBBI); 1101 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i) 1102 NewMI->addOperand(MBBI->getOperand(i)); 1103 1104 // Delete the pseudo instruction TCRETURN. 1105 MBB.erase(MBBI); 1106 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1107 (X86FI->getTCReturnAddrDelta() < 0)) { 1108 // Add the return addr area delta back since we are not tail calling. 1109 int delta = -1*X86FI->getTCReturnAddrDelta(); 1110 MBBI = MBB.getLastNonDebugInstr(); 1111 1112 // Check for possible merge with preceding ADD instruction. 1113 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1114 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII, *RegInfo); 1115 } 1116} 1117 1118int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 1119 const X86RegisterInfo *RI = 1120 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo()); 1121 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1122 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea(); 1123 uint64_t StackSize = MFI->getStackSize(); 1124 1125 if (RI->needsStackRealignment(MF)) { 1126 if (FI < 0) { 1127 // Skip the saved EBP. 1128 Offset += RI->getSlotSize(); 1129 } else { 1130 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); 1131 return Offset + StackSize; 1132 } 1133 // FIXME: Support tail calls 1134 } else { 1135 if (!hasFP(MF)) 1136 return Offset + StackSize; 1137 1138 // Skip the saved EBP. 1139 Offset += RI->getSlotSize(); 1140 1141 // Skip the RETADDR move area 1142 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1143 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1144 if (TailCallReturnAddrDelta < 0) 1145 Offset -= TailCallReturnAddrDelta; 1146 } 1147 1148 return Offset; 1149} 1150 1151bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1152 MachineBasicBlock::iterator MI, 1153 const std::vector<CalleeSavedInfo> &CSI, 1154 const TargetRegisterInfo *TRI) const { 1155 if (CSI.empty()) 1156 return false; 1157 1158 DebugLoc DL = MBB.findDebugLoc(MI); 1159 1160 MachineFunction &MF = *MBB.getParent(); 1161 1162 unsigned SlotSize = STI.is64Bit() ? 8 : 4; 1163 unsigned FPReg = TRI->getFrameRegister(MF); 1164 unsigned CalleeFrameSize = 0; 1165 1166 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1167 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1168 1169 // Push GPRs. It increases frame size. 1170 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 1171 for (unsigned i = CSI.size(); i != 0; --i) { 1172 unsigned Reg = CSI[i-1].getReg(); 1173 if (!X86::GR64RegClass.contains(Reg) && 1174 !X86::GR32RegClass.contains(Reg)) 1175 continue; 1176 // Add the callee-saved register as live-in. It's killed at the spill. 1177 MBB.addLiveIn(Reg); 1178 if (Reg == FPReg) 1179 // X86RegisterInfo::emitPrologue will handle spilling of frame register. 1180 continue; 1181 CalleeFrameSize += SlotSize; 1182 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill) 1183 .setMIFlag(MachineInstr::FrameSetup); 1184 } 1185 1186 X86FI->setCalleeSavedFrameSize(CalleeFrameSize); 1187 1188 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 1189 // It can be done by spilling XMMs to stack frame. 1190 // Note that only Win64 ABI might spill XMMs. 1191 for (unsigned i = CSI.size(); i != 0; --i) { 1192 unsigned Reg = CSI[i-1].getReg(); 1193 if (X86::GR64RegClass.contains(Reg) || 1194 X86::GR32RegClass.contains(Reg)) 1195 continue; 1196 // Add the callee-saved register as live-in. It's killed at the spill. 1197 MBB.addLiveIn(Reg); 1198 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1199 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), 1200 RC, TRI); 1201 } 1202 1203 return true; 1204} 1205 1206bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1207 MachineBasicBlock::iterator MI, 1208 const std::vector<CalleeSavedInfo> &CSI, 1209 const TargetRegisterInfo *TRI) const { 1210 if (CSI.empty()) 1211 return false; 1212 1213 DebugLoc DL = MBB.findDebugLoc(MI); 1214 1215 MachineFunction &MF = *MBB.getParent(); 1216 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1217 1218 // Reload XMMs from stack frame. 1219 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1220 unsigned Reg = CSI[i].getReg(); 1221 if (X86::GR64RegClass.contains(Reg) || 1222 X86::GR32RegClass.contains(Reg)) 1223 continue; 1224 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1225 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), 1226 RC, TRI); 1227 } 1228 1229 // POP GPRs. 1230 unsigned FPReg = TRI->getFrameRegister(MF); 1231 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 1232 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1233 unsigned Reg = CSI[i].getReg(); 1234 if (!X86::GR64RegClass.contains(Reg) && 1235 !X86::GR32RegClass.contains(Reg)) 1236 continue; 1237 if (Reg == FPReg) 1238 // X86RegisterInfo::emitEpilogue will handle restoring of frame register. 1239 continue; 1240 BuildMI(MBB, MI, DL, TII.get(Opc), Reg); 1241 } 1242 return true; 1243} 1244 1245void 1246X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 1247 RegScavenger *RS) const { 1248 MachineFrameInfo *MFI = MF.getFrameInfo(); 1249 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 1250 unsigned SlotSize = RegInfo->getSlotSize(); 1251 1252 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1253 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1254 1255 if (TailCallReturnAddrDelta < 0) { 1256 // create RETURNADDR area 1257 // arg 1258 // arg 1259 // RETADDR 1260 // { ... 1261 // RETADDR area 1262 // ... 1263 // } 1264 // [EBP] 1265 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 1266 (-1U*SlotSize)+TailCallReturnAddrDelta, true); 1267 } 1268 1269 if (hasFP(MF)) { 1270 assert((TailCallReturnAddrDelta <= 0) && 1271 "The Delta should always be zero or negative"); 1272 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering(); 1273 1274 // Create a frame entry for the EBP register that must be saved. 1275 int FrameIdx = MFI->CreateFixedObject(SlotSize, 1276 -(int)SlotSize + 1277 TFI.getOffsetOfLocalArea() + 1278 TailCallReturnAddrDelta, 1279 true); 1280 assert(FrameIdx == MFI->getObjectIndexBegin() && 1281 "Slot for EBP register must be last in order to be found!"); 1282 (void)FrameIdx; 1283 } 1284} 1285 1286static bool 1287HasNestArgument(const MachineFunction *MF) { 1288 const Function *F = MF->getFunction(); 1289 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 1290 I != E; I++) { 1291 if (I->hasNestAttr()) 1292 return true; 1293 } 1294 return false; 1295} 1296 1297static unsigned 1298GetScratchRegister(bool Is64Bit, const MachineFunction &MF) { 1299 if (Is64Bit) { 1300 return X86::R11; 1301 } else { 1302 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv(); 1303 bool IsNested = HasNestArgument(&MF); 1304 1305 if (CallingConvention == CallingConv::X86_FastCall) { 1306 if (IsNested) { 1307 report_fatal_error("Segmented stacks does not support fastcall with " 1308 "nested function."); 1309 return -1; 1310 } else { 1311 return X86::EAX; 1312 } 1313 } else { 1314 if (IsNested) 1315 return X86::EDX; 1316 else 1317 return X86::ECX; 1318 } 1319 } 1320} 1321 1322// The stack limit in the TCB is set to this many bytes above the actual stack 1323// limit. 1324static const uint64_t kSplitStackAvailable = 256; 1325 1326void 1327X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { 1328 MachineBasicBlock &prologueMBB = MF.front(); 1329 MachineFrameInfo *MFI = MF.getFrameInfo(); 1330 const X86InstrInfo &TII = *TM.getInstrInfo(); 1331 uint64_t StackSize; 1332 bool Is64Bit = STI.is64Bit(); 1333 unsigned TlsReg, TlsOffset; 1334 DebugLoc DL; 1335 const X86Subtarget *ST = &MF.getTarget().getSubtarget<X86Subtarget>(); 1336 1337 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF); 1338 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 1339 "Scratch register is live-in"); 1340 1341 if (MF.getFunction()->isVarArg()) 1342 report_fatal_error("Segmented stacks do not support vararg functions."); 1343 if (!ST->isTargetLinux()) 1344 report_fatal_error("Segmented stacks supported only on linux."); 1345 1346 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 1347 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 1348 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1349 bool IsNested = false; 1350 1351 // We need to know if the function has a nest argument only in 64 bit mode. 1352 if (Is64Bit) 1353 IsNested = HasNestArgument(&MF); 1354 1355 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 1356 // allocMBB needs to be last (terminating) instruction. 1357 1358 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(), 1359 e = prologueMBB.livein_end(); i != e; i++) { 1360 allocMBB->addLiveIn(*i); 1361 checkMBB->addLiveIn(*i); 1362 } 1363 1364 if (IsNested) 1365 allocMBB->addLiveIn(X86::R10); 1366 1367 MF.push_front(allocMBB); 1368 MF.push_front(checkMBB); 1369 1370 // Eventually StackSize will be calculated by a link-time pass; which will 1371 // also decide whether checking code needs to be injected into this particular 1372 // prologue. 1373 StackSize = MFI->getStackSize(); 1374 1375 // Read the limit off the current stacklet off the stack_guard location. 1376 if (Is64Bit) { 1377 TlsReg = X86::FS; 1378 TlsOffset = 0x70; 1379 1380 if (StackSize < kSplitStackAvailable) 1381 ScratchReg = X86::RSP; 1382 else 1383 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP) 1384 .addImm(0).addReg(0).addImm(-StackSize).addReg(0); 1385 1386 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg) 1387 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1388 } else { 1389 TlsReg = X86::GS; 1390 TlsOffset = 0x30; 1391 1392 if (StackSize < kSplitStackAvailable) 1393 ScratchReg = X86::ESP; 1394 else 1395 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 1396 .addImm(0).addReg(0).addImm(-StackSize).addReg(0); 1397 1398 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 1399 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1400 } 1401 1402 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 1403 // It jumps to normal execution of the function body. 1404 BuildMI(checkMBB, DL, TII.get(X86::JG_4)).addMBB(&prologueMBB); 1405 1406 // On 32 bit we first push the arguments size and then the frame size. On 64 1407 // bit, we pass the stack frame size in r10 and the argument size in r11. 1408 if (Is64Bit) { 1409 // Functions with nested arguments use R10, so it needs to be saved across 1410 // the call to _morestack 1411 1412 if (IsNested) 1413 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10); 1414 1415 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10) 1416 .addImm(StackSize); 1417 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11) 1418 .addImm(X86FI->getArgumentStackSize()); 1419 MF.getRegInfo().setPhysRegUsed(X86::R10); 1420 MF.getRegInfo().setPhysRegUsed(X86::R11); 1421 } else { 1422 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1423 .addImm(X86FI->getArgumentStackSize()); 1424 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1425 .addImm(StackSize); 1426 } 1427 1428 // __morestack is in libgcc 1429 if (Is64Bit) 1430 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 1431 .addExternalSymbol("__morestack"); 1432 else 1433 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 1434 .addExternalSymbol("__morestack"); 1435 1436 if (IsNested) 1437 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); 1438 else 1439 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); 1440 1441 allocMBB->addSuccessor(&prologueMBB); 1442 1443 checkMBB->addSuccessor(allocMBB); 1444 checkMBB->addSuccessor(&prologueMBB); 1445 1446#ifdef XDEBUG 1447 MF.verify(); 1448#endif 1449} 1450