X86FrameLowering.cpp revision dec1f996152d4292133e81527ad710fbc1280946
1//=======- X86FrameLowering.cpp - X86 Frame Information --------*- C++ -*-====// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of TargetFrameLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86FrameLowering.h" 15#include "X86InstrBuilder.h" 16#include "X86InstrInfo.h" 17#include "X86MachineFunctionInfo.h" 18#include "X86Subtarget.h" 19#include "X86TargetMachine.h" 20#include "llvm/Function.h" 21#include "llvm/CodeGen/MachineFrameInfo.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/MachineModuleInfo.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/MC/MCAsmInfo.h" 27#include "llvm/MC/MCSymbol.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/Target/TargetOptions.h" 30#include "llvm/Support/CommandLine.h" 31#include "llvm/ADT/SmallSet.h" 32 33using namespace llvm; 34 35// FIXME: completely move here. 36extern cl::opt<bool> ForceStackAlign; 37 38bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 39 return !MF.getFrameInfo()->hasVarSizedObjects(); 40} 41 42/// hasFP - Return true if the specified function should have a dedicated frame 43/// pointer register. This is true if the function has variable sized allocas 44/// or if frame pointer elimination is disabled. 45bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 46 const MachineFrameInfo *MFI = MF.getFrameInfo(); 47 const MachineModuleInfo &MMI = MF.getMMI(); 48 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 49 50 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 51 RI->needsStackRealignment(MF) || 52 MFI->hasVarSizedObjects() || 53 MFI->isFrameAddressTaken() || 54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 55 MMI.callsUnwindInit()); 56} 57 58static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 59 if (is64Bit) { 60 if (isInt<8>(Imm)) 61 return X86::SUB64ri8; 62 return X86::SUB64ri32; 63 } else { 64 if (isInt<8>(Imm)) 65 return X86::SUB32ri8; 66 return X86::SUB32ri; 67 } 68} 69 70static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 71 if (is64Bit) { 72 if (isInt<8>(Imm)) 73 return X86::ADD64ri8; 74 return X86::ADD64ri32; 75 } else { 76 if (isInt<8>(Imm)) 77 return X86::ADD32ri8; 78 return X86::ADD32ri; 79 } 80} 81 82/// findDeadCallerSavedReg - Return a caller-saved register that isn't live 83/// when it reaches the "return" instruction. We can then pop a stack object 84/// to this register without worry about clobbering it. 85static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, 86 MachineBasicBlock::iterator &MBBI, 87 const TargetRegisterInfo &TRI, 88 bool Is64Bit) { 89 const MachineFunction *MF = MBB.getParent(); 90 const Function *F = MF->getFunction(); 91 if (!F || MF->getMMI().callsEHReturn()) 92 return 0; 93 94 static const unsigned CallerSavedRegs32Bit[] = { 95 X86::EAX, X86::EDX, X86::ECX, 0 96 }; 97 98 static const unsigned CallerSavedRegs64Bit[] = { 99 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI, 100 X86::R8, X86::R9, X86::R10, X86::R11, 0 101 }; 102 103 unsigned Opc = MBBI->getOpcode(); 104 switch (Opc) { 105 default: return 0; 106 case X86::RET: 107 case X86::RETI: 108 case X86::TCRETURNdi: 109 case X86::TCRETURNri: 110 case X86::TCRETURNmi: 111 case X86::TCRETURNdi64: 112 case X86::TCRETURNri64: 113 case X86::TCRETURNmi64: 114 case X86::EH_RETURN: 115 case X86::EH_RETURN64: { 116 SmallSet<unsigned, 8> Uses; 117 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) { 118 MachineOperand &MO = MBBI->getOperand(i); 119 if (!MO.isReg() || MO.isDef()) 120 continue; 121 unsigned Reg = MO.getReg(); 122 if (!Reg) 123 continue; 124 for (const unsigned *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI) 125 Uses.insert(*AsI); 126 } 127 128 const unsigned *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit; 129 for (; *CS; ++CS) 130 if (!Uses.count(*CS)) 131 return *CS; 132 } 133 } 134 135 return 0; 136} 137 138 139/// emitSPUpdate - Emit a series of instructions to increment / decrement the 140/// stack pointer by a constant value. 141static 142void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 143 unsigned StackPtr, int64_t NumBytes, 144 bool Is64Bit, const TargetInstrInfo &TII, 145 const TargetRegisterInfo &TRI) { 146 bool isSub = NumBytes < 0; 147 uint64_t Offset = isSub ? -NumBytes : NumBytes; 148 unsigned Opc = isSub ? 149 getSUBriOpcode(Is64Bit, Offset) : 150 getADDriOpcode(Is64Bit, Offset); 151 uint64_t Chunk = (1LL << 31) - 1; 152 DebugLoc DL = MBB.findDebugLoc(MBBI); 153 154 while (Offset) { 155 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 156 if (ThisVal == (Is64Bit ? 8 : 4)) { 157 // Use push / pop instead. 158 unsigned Reg = isSub 159 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 160 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit); 161 if (Reg) { 162 Opc = isSub 163 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 164 : (Is64Bit ? X86::POP64r : X86::POP32r); 165 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc)) 166 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)); 167 if (isSub) 168 MI->setFlag(MachineInstr::FrameSetup); 169 Offset -= ThisVal; 170 continue; 171 } 172 } 173 174 MachineInstr *MI = 175 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 176 .addReg(StackPtr) 177 .addImm(ThisVal); 178 if (isSub) 179 MI->setFlag(MachineInstr::FrameSetup); 180 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 181 Offset -= ThisVal; 182 } 183} 184 185/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 186static 187void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 188 unsigned StackPtr, uint64_t *NumBytes = NULL) { 189 if (MBBI == MBB.begin()) return; 190 191 MachineBasicBlock::iterator PI = prior(MBBI); 192 unsigned Opc = PI->getOpcode(); 193 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 194 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 195 PI->getOperand(0).getReg() == StackPtr) { 196 if (NumBytes) 197 *NumBytes += PI->getOperand(2).getImm(); 198 MBB.erase(PI); 199 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 200 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 201 PI->getOperand(0).getReg() == StackPtr) { 202 if (NumBytes) 203 *NumBytes -= PI->getOperand(2).getImm(); 204 MBB.erase(PI); 205 } 206} 207 208/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator. 209static 210void mergeSPUpdatesDown(MachineBasicBlock &MBB, 211 MachineBasicBlock::iterator &MBBI, 212 unsigned StackPtr, uint64_t *NumBytes = NULL) { 213 // FIXME: THIS ISN'T RUN!!! 214 return; 215 216 if (MBBI == MBB.end()) return; 217 218 MachineBasicBlock::iterator NI = llvm::next(MBBI); 219 if (NI == MBB.end()) return; 220 221 unsigned Opc = NI->getOpcode(); 222 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 223 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 224 NI->getOperand(0).getReg() == StackPtr) { 225 if (NumBytes) 226 *NumBytes -= NI->getOperand(2).getImm(); 227 MBB.erase(NI); 228 MBBI = NI; 229 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 230 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 231 NI->getOperand(0).getReg() == StackPtr) { 232 if (NumBytes) 233 *NumBytes += NI->getOperand(2).getImm(); 234 MBB.erase(NI); 235 MBBI = NI; 236 } 237} 238 239/// mergeSPUpdates - Checks the instruction before/after the passed 240/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 241/// stack adjustment is returned as a positive value for ADD and a negative for 242/// SUB. 243static int mergeSPUpdates(MachineBasicBlock &MBB, 244 MachineBasicBlock::iterator &MBBI, 245 unsigned StackPtr, 246 bool doMergeWithPrevious) { 247 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 248 (!doMergeWithPrevious && MBBI == MBB.end())) 249 return 0; 250 251 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 252 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 253 unsigned Opc = PI->getOpcode(); 254 int Offset = 0; 255 256 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 257 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 258 PI->getOperand(0).getReg() == StackPtr){ 259 Offset += PI->getOperand(2).getImm(); 260 MBB.erase(PI); 261 if (!doMergeWithPrevious) MBBI = NI; 262 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 263 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 264 PI->getOperand(0).getReg() == StackPtr) { 265 Offset -= PI->getOperand(2).getImm(); 266 MBB.erase(PI); 267 if (!doMergeWithPrevious) MBBI = NI; 268 } 269 270 return Offset; 271} 272 273static bool isEAXLiveIn(MachineFunction &MF) { 274 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(), 275 EE = MF.getRegInfo().livein_end(); II != EE; ++II) { 276 unsigned Reg = II->first; 277 278 if (Reg == X86::EAX || Reg == X86::AX || 279 Reg == X86::AH || Reg == X86::AL) 280 return true; 281 } 282 283 return false; 284} 285 286void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, 287 MCSymbol *Label, 288 unsigned FramePtr) const { 289 MachineFrameInfo *MFI = MF.getFrameInfo(); 290 MachineModuleInfo &MMI = MF.getMMI(); 291 292 // Add callee saved registers to move list. 293 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 294 if (CSI.empty()) return; 295 296 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 297 const TargetData *TD = TM.getTargetData(); 298 bool HasFP = hasFP(MF); 299 300 // Calculate amount of bytes used for return address storing. 301 int stackGrowth = -TD->getPointerSize(); 302 303 // FIXME: This is dirty hack. The code itself is pretty mess right now. 304 // It should be rewritten from scratch and generalized sometimes. 305 306 // Determine maximum offset (minimum due to stack growth). 307 int64_t MaxOffset = 0; 308 for (std::vector<CalleeSavedInfo>::const_iterator 309 I = CSI.begin(), E = CSI.end(); I != E; ++I) 310 MaxOffset = std::min(MaxOffset, 311 MFI->getObjectOffset(I->getFrameIdx())); 312 313 // Calculate offsets. 314 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 315 for (std::vector<CalleeSavedInfo>::const_iterator 316 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 317 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 318 unsigned Reg = I->getReg(); 319 Offset = MaxOffset - Offset + saveAreaOffset; 320 321 // Don't output a new machine move if we're re-saving the frame 322 // pointer. This happens when the PrologEpilogInserter has inserted an extra 323 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 324 // generates one when frame pointers are used. If we generate a "machine 325 // move" for this extra "PUSH", the linker will lose track of the fact that 326 // the frame pointer should have the value of the first "PUSH" when it's 327 // trying to unwind. 328 // 329 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 330 // another bug. I.e., one where we generate a prolog like this: 331 // 332 // pushl %ebp 333 // movl %esp, %ebp 334 // pushl %ebp 335 // pushl %esi 336 // ... 337 // 338 // The immediate re-push of EBP is unnecessary. At the least, it's an 339 // optimization bug. EBP can be used as a scratch register in certain 340 // cases, but probably not when we have a frame pointer. 341 if (HasFP && FramePtr == Reg) 342 continue; 343 344 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 345 MachineLocation CSSrc(Reg); 346 Moves.push_back(MachineMove(Label, CSDst, CSSrc)); 347 } 348} 349 350/// getCompactUnwindRegNum - Get the compact unwind number for a given 351/// register. The number corresponds to the enum lists in 352/// compact_unwind_encoding.h. 353static int getCompactUnwindRegNum(const unsigned *CURegs, unsigned Reg) { 354 for (int Idx = 1; *CURegs; ++CURegs, ++Idx) 355 if (*CURegs == Reg) 356 return Idx; 357 358 return -1; 359} 360 361// Number of registers that can be saved in a compact unwind encoding. 362#define CU_NUM_SAVED_REGS 6 363 364/// encodeCompactUnwindRegistersWithoutFrame - Create the permutation encoding 365/// used with frameless stacks. It is passed the number of registers to be saved 366/// and an array of the registers saved. 367static uint32_t 368encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], 369 unsigned RegCount, bool Is64Bit) { 370 // The saved registers are numbered from 1 to 6. In order to encode the order 371 // in which they were saved, we re-number them according to their place in the 372 // register order. The re-numbering is relative to the last re-numbered 373 // register. E.g., if we have registers {6, 2, 4, 5} saved in that order: 374 // 375 // Orig Re-Num 376 // ---- ------ 377 // 6 6 378 // 2 2 379 // 4 3 380 // 5 3 381 // 382 static const unsigned CU32BitRegs[] = { 383 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 384 }; 385 static const unsigned CU64BitRegs[] = { 386 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 387 }; 388 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); 389 390 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { 391 int CUReg = getCompactUnwindRegNum(CURegs, SavedRegs[i]); 392 if (CUReg == -1) return ~0U; 393 SavedRegs[i] = CUReg; 394 } 395 396 // Reverse the list. 397 std::swap(SavedRegs[0], SavedRegs[5]); 398 std::swap(SavedRegs[1], SavedRegs[4]); 399 std::swap(SavedRegs[2], SavedRegs[3]); 400 401 uint32_t RenumRegs[CU_NUM_SAVED_REGS]; 402 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i) { 403 unsigned Countless = 0; 404 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) 405 if (SavedRegs[j] < SavedRegs[i]) 406 ++Countless; 407 408 RenumRegs[i] = SavedRegs[i] - Countless - 1; 409 } 410 411 // Take the renumbered values and encode them into a 10-bit number. 412 uint32_t permutationEncoding = 0; 413 switch (RegCount) { 414 case 6: 415 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 416 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 417 + RenumRegs[4]; 418 break; 419 case 5: 420 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 421 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 422 + RenumRegs[5]; 423 break; 424 case 4: 425 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 426 + 3 * RenumRegs[4] + RenumRegs[5]; 427 break; 428 case 3: 429 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 430 + RenumRegs[5]; 431 break; 432 case 2: 433 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 434 break; 435 case 1: 436 permutationEncoding |= RenumRegs[5]; 437 break; 438 } 439 440 assert((permutationEncoding & 0x3FF) == permutationEncoding && 441 "Invalid compact register encoding!"); 442 return permutationEncoding; 443} 444 445/// encodeCompactUnwindRegistersWithFrame - Return the registers encoded for a 446/// compact encoding with a frame pointer. 447static uint32_t 448encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], 449 bool Is64Bit) { 450 static const unsigned CU32BitRegs[] = { 451 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 452 }; 453 static const unsigned CU64BitRegs[] = { 454 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 455 }; 456 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); 457 458 // Encode the registers in the order they were saved, 3-bits per register. The 459 // registers are numbered from 1 to 6. 460 uint32_t RegEnc = 0; 461 for (int I = 0; I != 6; ++I) { 462 unsigned Reg = SavedRegs[I]; 463 if (Reg == 0) break; 464 int CURegNum = getCompactUnwindRegNum(CURegs, Reg); 465 if (CURegNum == -1) 466 return ~0U; 467 468 // Encode the 3-bit register number in order, skipping over 3-bits for each 469 // register. 470 RegEnc |= (CURegNum & 0x7) << ((5 - I) * 3); 471 } 472 473 assert((RegEnc & 0x3FFFF) == RegEnc && "Invalid compact register encoding!"); 474 return RegEnc; 475} 476 477uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { 478 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 479 unsigned FramePtr = RegInfo->getFrameRegister(MF); 480 unsigned StackPtr = RegInfo->getStackRegister(); 481 482 bool Is64Bit = STI.is64Bit(); 483 bool HasFP = hasFP(MF); 484 485 unsigned SavedRegs[CU_NUM_SAVED_REGS] = { 0, 0, 0, 0, 0, 0 }; 486 unsigned SavedRegIdx = 0; 487 488 unsigned OffsetSize = (Is64Bit ? 8 : 4); 489 490 unsigned PushInstr = (Is64Bit ? X86::PUSH64r : X86::PUSH32r); 491 unsigned PushInstrSize = 1; 492 unsigned MoveInstr = (Is64Bit ? X86::MOV64rr : X86::MOV32rr); 493 unsigned MoveInstrSize = (Is64Bit ? 3 : 2); 494 unsigned SubtractInstrIdx = (Is64Bit ? 3 : 2); 495 496 unsigned StackDivide = (Is64Bit ? 8 : 4); 497 498 unsigned InstrOffset = 0; 499 unsigned StackAdjust = 0; 500 unsigned StackSize = 0; 501 502 MachineBasicBlock &MBB = MF.front(); // Prologue is in entry BB. 503 bool ExpectEnd = false; 504 for (MachineBasicBlock::iterator 505 MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) { 506 MachineInstr &MI = *MBBI; 507 unsigned Opc = MI.getOpcode(); 508 if (Opc == X86::PROLOG_LABEL) continue; 509 if (!MI.getFlag(MachineInstr::FrameSetup)) break; 510 511 // We don't exect any more prolog instructions. 512 if (ExpectEnd) return 0; 513 514 if (Opc == PushInstr) { 515 // If there are too many saved registers, we cannot use compact encoding. 516 if (SavedRegIdx >= CU_NUM_SAVED_REGS) return 0; 517 518 SavedRegs[SavedRegIdx++] = MI.getOperand(0).getReg(); 519 StackAdjust += OffsetSize; 520 InstrOffset += PushInstrSize; 521 } else if (Opc == MoveInstr) { 522 unsigned SrcReg = MI.getOperand(1).getReg(); 523 unsigned DstReg = MI.getOperand(0).getReg(); 524 525 if (DstReg != FramePtr || SrcReg != StackPtr) 526 return 0; 527 528 StackAdjust = 0; 529 memset(SavedRegs, 0, sizeof(SavedRegs)); 530 SavedRegIdx = 0; 531 InstrOffset += MoveInstrSize; 532 } else if (Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 533 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) { 534 if (StackSize) 535 // We already have a stack size. 536 return 0; 537 538 if (!MI.getOperand(0).isReg() || 539 MI.getOperand(0).getReg() != MI.getOperand(1).getReg() || 540 MI.getOperand(0).getReg() != StackPtr || !MI.getOperand(2).isImm()) 541 // We need this to be a stack adjustment pointer. Something like: 542 // 543 // %RSP<def> = SUB64ri8 %RSP, 48 544 return 0; 545 546 StackSize = MI.getOperand(2).getImm() / StackDivide; 547 SubtractInstrIdx += InstrOffset; 548 ExpectEnd = true; 549 } 550 } 551 552 // Encode that we are using EBP/RBP as the frame pointer. 553 uint32_t CompactUnwindEncoding = 0; 554 StackAdjust /= StackDivide; 555 if (HasFP) { 556 if ((StackAdjust & 0xFF) != StackAdjust) 557 // Offset was too big for compact encoding. 558 return 0; 559 560 // Get the encoding of the saved registers when we have a frame pointer. 561 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit); 562 if (RegEnc == ~0U) return 0; 563 564 CompactUnwindEncoding |= 0x01000000; 565 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; 566 CompactUnwindEncoding |= RegEnc & 0x7FFF; 567 } else { 568 ++StackAdjust; 569 uint32_t TotalStackSize = StackAdjust + StackSize; 570 if ((TotalStackSize & 0xFF) == TotalStackSize) { 571 // Frameless stack with a small stack size. 572 CompactUnwindEncoding |= 0x02000000; 573 574 // Encode the stack size. 575 CompactUnwindEncoding |= (TotalStackSize & 0xFF) << 16; 576 } else { 577 if ((StackAdjust & 0x7) != StackAdjust) 578 // The extra stack adjustments are too big for us to handle. 579 return 0; 580 581 // Frameless stack with an offset too large for us to encode compactly. 582 CompactUnwindEncoding |= 0x03000000; 583 584 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 585 // instruction. 586 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 587 588 // Encode any extra stack stack adjustments (done via push instructions). 589 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; 590 } 591 592 // Encode the number of registers saved. 593 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; 594 595 // Get the encoding of the saved registers when we don't have a frame 596 // pointer. 597 uint32_t RegEnc = 598 encodeCompactUnwindRegistersWithoutFrame(SavedRegs, SavedRegIdx, 599 Is64Bit); 600 if (RegEnc == ~0U) return 0; 601 602 // Encode the register encoding. 603 CompactUnwindEncoding |= RegEnc & 0x3FF; 604 } 605 606 return CompactUnwindEncoding; 607} 608 609/// emitPrologue - Push callee-saved registers onto the stack, which 610/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 611/// space for local variables. Also emit labels used by the exception handler to 612/// generate the exception handling frames. 613void X86FrameLowering::emitPrologue(MachineFunction &MF) const { 614 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 615 MachineBasicBlock::iterator MBBI = MBB.begin(); 616 MachineFrameInfo *MFI = MF.getFrameInfo(); 617 const Function *Fn = MF.getFunction(); 618 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 619 const X86InstrInfo &TII = *TM.getInstrInfo(); 620 MachineModuleInfo &MMI = MF.getMMI(); 621 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 622 bool needsFrameMoves = MMI.hasDebugInfo() || 623 Fn->needsUnwindTableEntry(); 624 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 625 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 626 bool HasFP = hasFP(MF); 627 bool Is64Bit = STI.is64Bit(); 628 bool IsWin64 = STI.isTargetWin64(); 629 unsigned StackAlign = getStackAlignment(); 630 unsigned SlotSize = RegInfo->getSlotSize(); 631 unsigned FramePtr = RegInfo->getFrameRegister(MF); 632 unsigned StackPtr = RegInfo->getStackRegister(); 633 DebugLoc DL; 634 635 // If we're forcing a stack realignment we can't rely on just the frame 636 // info, we need to know the ABI stack alignment as well in case we 637 // have a call out. Otherwise just make sure we have some alignment - we'll 638 // go with the minimum SlotSize. 639 if (ForceStackAlign) { 640 if (MFI->hasCalls()) 641 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 642 else if (MaxAlign < SlotSize) 643 MaxAlign = SlotSize; 644 } 645 646 // Add RETADDR move area to callee saved frame size. 647 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 648 if (TailCallReturnAddrDelta < 0) 649 X86FI->setCalleeSavedFrameSize( 650 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 651 652 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 653 // function, and use up to 128 bytes of stack space, don't have a frame 654 // pointer, calls, or dynamic alloca then we do not need to adjust the 655 // stack pointer (we fit in the Red Zone). 656 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 657 !RegInfo->needsStackRealignment(MF) && 658 !MFI->hasVarSizedObjects() && // No dynamic alloca. 659 !MFI->adjustsStack() && // No calls. 660 !IsWin64 && // Win64 has no Red Zone 661 !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack 662 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 663 if (HasFP) MinSize += SlotSize; 664 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 665 MFI->setStackSize(StackSize); 666 } 667 668 // Insert stack pointer adjustment for later moving of return addr. Only 669 // applies to tail call optimized functions where the callee argument stack 670 // size is bigger than the callers. 671 if (TailCallReturnAddrDelta < 0) { 672 MachineInstr *MI = 673 BuildMI(MBB, MBBI, DL, 674 TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)), 675 StackPtr) 676 .addReg(StackPtr) 677 .addImm(-TailCallReturnAddrDelta) 678 .setMIFlag(MachineInstr::FrameSetup); 679 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 680 } 681 682 // Mapping for machine moves: 683 // 684 // DST: VirtualFP AND 685 // SRC: VirtualFP => DW_CFA_def_cfa_offset 686 // ELSE => DW_CFA_def_cfa 687 // 688 // SRC: VirtualFP AND 689 // DST: Register => DW_CFA_def_cfa_register 690 // 691 // ELSE 692 // OFFSET < 0 => DW_CFA_offset_extended_sf 693 // REG < 64 => DW_CFA_offset + Reg 694 // ELSE => DW_CFA_offset_extended 695 696 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 697 const TargetData *TD = MF.getTarget().getTargetData(); 698 uint64_t NumBytes = 0; 699 int stackGrowth = -TD->getPointerSize(); 700 701 if (HasFP) { 702 // Calculate required stack adjustment. 703 uint64_t FrameSize = StackSize - SlotSize; 704 if (RegInfo->needsStackRealignment(MF)) 705 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 706 707 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 708 709 // Get the offset of the stack slot for the EBP register, which is 710 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 711 // Update the frame offset adjustment. 712 MFI->setOffsetAdjustment(-NumBytes); 713 714 // Save EBP/RBP into the appropriate stack slot. 715 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 716 .addReg(FramePtr, RegState::Kill) 717 .setMIFlag(MachineInstr::FrameSetup); 718 719 if (needsFrameMoves) { 720 // Mark the place where EBP/RBP was saved. 721 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 722 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 723 .addSym(FrameLabel); 724 725 // Define the current CFA rule to use the provided offset. 726 if (StackSize) { 727 MachineLocation SPDst(MachineLocation::VirtualFP); 728 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 729 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 730 } else { 731 MachineLocation SPDst(StackPtr); 732 MachineLocation SPSrc(StackPtr, stackGrowth); 733 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 734 } 735 736 // Change the rule for the FramePtr to be an "offset" rule. 737 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); 738 MachineLocation FPSrc(FramePtr); 739 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 740 } 741 742 // Update EBP with the new base value. 743 BuildMI(MBB, MBBI, DL, 744 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 745 .addReg(StackPtr) 746 .setMIFlag(MachineInstr::FrameSetup); 747 748 if (needsFrameMoves) { 749 // Mark effective beginning of when frame pointer becomes valid. 750 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 751 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 752 .addSym(FrameLabel); 753 754 // Define the current CFA to use the EBP/RBP register. 755 MachineLocation FPDst(FramePtr); 756 MachineLocation FPSrc(MachineLocation::VirtualFP); 757 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 758 } 759 760 // Mark the FramePtr as live-in in every block except the entry. 761 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 762 I != E; ++I) 763 I->addLiveIn(FramePtr); 764 765 // Realign stack 766 if (RegInfo->needsStackRealignment(MF)) { 767 MachineInstr *MI = 768 BuildMI(MBB, MBBI, DL, 769 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr) 770 .addReg(StackPtr) 771 .addImm(-MaxAlign) 772 .setMIFlag(MachineInstr::FrameSetup); 773 774 // The EFLAGS implicit def is dead. 775 MI->getOperand(3).setIsDead(); 776 } 777 } else { 778 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 779 } 780 781 // Skip the callee-saved push instructions. 782 bool PushedRegs = false; 783 int StackOffset = 2 * stackGrowth; 784 785 while (MBBI != MBB.end() && 786 (MBBI->getOpcode() == X86::PUSH32r || 787 MBBI->getOpcode() == X86::PUSH64r)) { 788 PushedRegs = true; 789 MBBI->setFlag(MachineInstr::FrameSetup); 790 ++MBBI; 791 792 if (!HasFP && needsFrameMoves) { 793 // Mark callee-saved push instruction. 794 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 795 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); 796 797 // Define the current CFA rule to use the provided offset. 798 unsigned Ptr = StackSize ? MachineLocation::VirtualFP : StackPtr; 799 MachineLocation SPDst(Ptr); 800 MachineLocation SPSrc(Ptr, StackOffset); 801 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 802 StackOffset += stackGrowth; 803 } 804 } 805 806 DL = MBB.findDebugLoc(MBBI); 807 808 // If there is an SUB32ri of ESP immediately before this instruction, merge 809 // the two. This can be the case when tail call elimination is enabled and 810 // the callee has more arguments then the caller. 811 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 812 813 // If there is an ADD32ri or SUB32ri of ESP immediately after this 814 // instruction, merge the two instructions. 815 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 816 817 // Adjust stack pointer: ESP -= numbytes. 818 819 // Windows and cygwin/mingw require a prologue helper routine when allocating 820 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 821 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 822 // stack and adjust the stack pointer in one go. The 64-bit version of 823 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 824 // responsible for adjusting the stack pointer. Touching the stack at 4K 825 // increments is necessary to ensure that the guard pages used by the OS 826 // virtual memory manager are allocated in correct sequence. 827 if (NumBytes >= 4096 && STI.isTargetCOFF() && !STI.isTargetEnvMacho()) { 828 const char *StackProbeSymbol; 829 bool isSPUpdateNeeded = false; 830 831 if (Is64Bit) { 832 if (STI.isTargetCygMing()) 833 StackProbeSymbol = "___chkstk"; 834 else { 835 StackProbeSymbol = "__chkstk"; 836 isSPUpdateNeeded = true; 837 } 838 } else if (STI.isTargetCygMing()) 839 StackProbeSymbol = "_alloca"; 840 else 841 StackProbeSymbol = "_chkstk"; 842 843 // Check whether EAX is livein for this function. 844 bool isEAXAlive = isEAXLiveIn(MF); 845 846 if (isEAXAlive) { 847 // Sanity check that EAX is not livein for this function. 848 // It should not be, so throw an assert. 849 assert(!Is64Bit && "EAX is livein in x64 case!"); 850 851 // Save EAX 852 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 853 .addReg(X86::EAX, RegState::Kill) 854 .setMIFlag(MachineInstr::FrameSetup); 855 } 856 857 if (Is64Bit) { 858 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 859 // Function prologue is responsible for adjusting the stack pointer. 860 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) 861 .addImm(NumBytes) 862 .setMIFlag(MachineInstr::FrameSetup); 863 } else { 864 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 865 // We'll also use 4 already allocated bytes for EAX. 866 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 867 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 868 .setMIFlag(MachineInstr::FrameSetup); 869 } 870 871 BuildMI(MBB, MBBI, DL, 872 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32)) 873 .addExternalSymbol(StackProbeSymbol) 874 .addReg(StackPtr, RegState::Define | RegState::Implicit) 875 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit) 876 .setMIFlag(MachineInstr::FrameSetup); 877 878 // MSVC x64's __chkstk needs to adjust %rsp. 879 // FIXME: %rax preserves the offset and should be available. 880 if (isSPUpdateNeeded) 881 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, 882 TII, *RegInfo); 883 884 if (isEAXAlive) { 885 // Restore EAX 886 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 887 X86::EAX), 888 StackPtr, false, NumBytes - 4); 889 MI->setFlag(MachineInstr::FrameSetup); 890 MBB.insert(MBBI, MI); 891 } 892 } else if (NumBytes) 893 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, 894 TII, *RegInfo); 895 896 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) { 897 // Mark end of stack pointer adjustment. 898 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 899 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 900 .addSym(Label); 901 902 if (!HasFP && NumBytes) { 903 // Define the current CFA rule to use the provided offset. 904 if (StackSize) { 905 MachineLocation SPDst(MachineLocation::VirtualFP); 906 MachineLocation SPSrc(MachineLocation::VirtualFP, 907 -StackSize + stackGrowth); 908 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 909 } else { 910 MachineLocation SPDst(StackPtr); 911 MachineLocation SPSrc(StackPtr, stackGrowth); 912 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 913 } 914 } 915 916 // Emit DWARF info specifying the offsets of the callee-saved registers. 917 if (PushedRegs) 918 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr); 919 } 920 921 // Darwin 10.7 and greater has support for compact unwind encoding. 922 if (STI.getTargetTriple().isMacOSX() && 923 !STI.getTargetTriple().isMacOSXVersionLT(10, 7)) 924 MMI.setCompactUnwindEncoding(getCompactUnwindEncoding(MF)); 925} 926 927void X86FrameLowering::emitEpilogue(MachineFunction &MF, 928 MachineBasicBlock &MBB) const { 929 const MachineFrameInfo *MFI = MF.getFrameInfo(); 930 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 931 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 932 const X86InstrInfo &TII = *TM.getInstrInfo(); 933 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 934 assert(MBBI != MBB.end() && "Returning block has no instructions"); 935 unsigned RetOpcode = MBBI->getOpcode(); 936 DebugLoc DL = MBBI->getDebugLoc(); 937 bool Is64Bit = STI.is64Bit(); 938 unsigned StackAlign = getStackAlignment(); 939 unsigned SlotSize = RegInfo->getSlotSize(); 940 unsigned FramePtr = RegInfo->getFrameRegister(MF); 941 unsigned StackPtr = RegInfo->getStackRegister(); 942 943 switch (RetOpcode) { 944 default: 945 llvm_unreachable("Can only insert epilog into returning blocks"); 946 case X86::RET: 947 case X86::RETI: 948 case X86::TCRETURNdi: 949 case X86::TCRETURNri: 950 case X86::TCRETURNmi: 951 case X86::TCRETURNdi64: 952 case X86::TCRETURNri64: 953 case X86::TCRETURNmi64: 954 case X86::EH_RETURN: 955 case X86::EH_RETURN64: 956 break; // These are ok 957 } 958 959 // Get the number of bytes to allocate from the FrameInfo. 960 uint64_t StackSize = MFI->getStackSize(); 961 uint64_t MaxAlign = MFI->getMaxAlignment(); 962 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 963 uint64_t NumBytes = 0; 964 965 // If we're forcing a stack realignment we can't rely on just the frame 966 // info, we need to know the ABI stack alignment as well in case we 967 // have a call out. Otherwise just make sure we have some alignment - we'll 968 // go with the minimum. 969 if (ForceStackAlign) { 970 if (MFI->hasCalls()) 971 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 972 else 973 MaxAlign = MaxAlign ? MaxAlign : 4; 974 } 975 976 if (hasFP(MF)) { 977 // Calculate required stack adjustment. 978 uint64_t FrameSize = StackSize - SlotSize; 979 if (RegInfo->needsStackRealignment(MF)) 980 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 981 982 NumBytes = FrameSize - CSSize; 983 984 // Pop EBP. 985 BuildMI(MBB, MBBI, DL, 986 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 987 } else { 988 NumBytes = StackSize - CSSize; 989 } 990 991 // Skip the callee-saved pop instructions. 992 MachineBasicBlock::iterator LastCSPop = MBBI; 993 while (MBBI != MBB.begin()) { 994 MachineBasicBlock::iterator PI = prior(MBBI); 995 unsigned Opc = PI->getOpcode(); 996 997 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE && 998 !PI->isTerminator()) 999 break; 1000 1001 --MBBI; 1002 } 1003 1004 DL = MBBI->getDebugLoc(); 1005 1006 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1007 // instruction, merge the two instructions. 1008 if (NumBytes || MFI->hasVarSizedObjects()) 1009 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1010 1011 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1012 // slot before popping them off! Same applies for the case, when stack was 1013 // realigned. 1014 if (RegInfo->needsStackRealignment(MF)) { 1015 // We cannot use LEA here, because stack pointer was realigned. We need to 1016 // deallocate local frame back. 1017 if (CSSize) { 1018 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo); 1019 MBBI = prior(LastCSPop); 1020 } 1021 1022 BuildMI(MBB, MBBI, DL, 1023 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1024 StackPtr).addReg(FramePtr); 1025 } else if (MFI->hasVarSizedObjects()) { 1026 if (CSSize) { 1027 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1028 MachineInstr *MI = 1029 addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1030 FramePtr, false, -CSSize); 1031 MBB.insert(MBBI, MI); 1032 } else { 1033 BuildMI(MBB, MBBI, DL, 1034 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1035 .addReg(FramePtr); 1036 } 1037 } else if (NumBytes) { 1038 // Adjust stack pointer back: ESP += numbytes. 1039 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo); 1040 } 1041 1042 // We're returning from function via eh_return. 1043 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1044 MBBI = MBB.getLastNonDebugInstr(); 1045 MachineOperand &DestAddr = MBBI->getOperand(0); 1046 assert(DestAddr.isReg() && "Offset should be in register!"); 1047 BuildMI(MBB, MBBI, DL, 1048 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1049 StackPtr).addReg(DestAddr.getReg()); 1050 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1051 RetOpcode == X86::TCRETURNmi || 1052 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 || 1053 RetOpcode == X86::TCRETURNmi64) { 1054 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64; 1055 // Tail call return: adjust the stack pointer and jump to callee. 1056 MBBI = MBB.getLastNonDebugInstr(); 1057 MachineOperand &JumpTarget = MBBI->getOperand(0); 1058 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); 1059 assert(StackAdjust.isImm() && "Expecting immediate value."); 1060 1061 // Adjust stack pointer. 1062 int StackAdj = StackAdjust.getImm(); 1063 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1064 int Offset = 0; 1065 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1066 1067 // Incoporate the retaddr area. 1068 Offset = StackAdj-MaxTCDelta; 1069 assert(Offset >= 0 && "Offset should never be negative"); 1070 1071 if (Offset) { 1072 // Check for possible merge with preceding ADD instruction. 1073 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1074 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII, *RegInfo); 1075 } 1076 1077 // Jump to label or value in register. 1078 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) { 1079 MachineInstrBuilder MIB = 1080 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi) 1081 ? X86::TAILJMPd : X86::TAILJMPd64)); 1082 if (JumpTarget.isGlobal()) 1083 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1084 JumpTarget.getTargetFlags()); 1085 else { 1086 assert(JumpTarget.isSymbol()); 1087 MIB.addExternalSymbol(JumpTarget.getSymbolName(), 1088 JumpTarget.getTargetFlags()); 1089 } 1090 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) { 1091 MachineInstrBuilder MIB = 1092 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi) 1093 ? X86::TAILJMPm : X86::TAILJMPm64)); 1094 for (unsigned i = 0; i != 5; ++i) 1095 MIB.addOperand(MBBI->getOperand(i)); 1096 } else if (RetOpcode == X86::TCRETURNri64) { 1097 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)). 1098 addReg(JumpTarget.getReg(), RegState::Kill); 1099 } else { 1100 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)). 1101 addReg(JumpTarget.getReg(), RegState::Kill); 1102 } 1103 1104 MachineInstr *NewMI = prior(MBBI); 1105 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i) 1106 NewMI->addOperand(MBBI->getOperand(i)); 1107 1108 // Delete the pseudo instruction TCRETURN. 1109 MBB.erase(MBBI); 1110 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1111 (X86FI->getTCReturnAddrDelta() < 0)) { 1112 // Add the return addr area delta back since we are not tail calling. 1113 int delta = -1*X86FI->getTCReturnAddrDelta(); 1114 MBBI = MBB.getLastNonDebugInstr(); 1115 1116 // Check for possible merge with preceding ADD instruction. 1117 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1118 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII, *RegInfo); 1119 } 1120} 1121 1122int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 1123 const X86RegisterInfo *RI = 1124 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo()); 1125 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1126 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea(); 1127 uint64_t StackSize = MFI->getStackSize(); 1128 1129 if (RI->needsStackRealignment(MF)) { 1130 if (FI < 0) { 1131 // Skip the saved EBP. 1132 Offset += RI->getSlotSize(); 1133 } else { 1134 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); 1135 return Offset + StackSize; 1136 } 1137 // FIXME: Support tail calls 1138 } else { 1139 if (!hasFP(MF)) 1140 return Offset + StackSize; 1141 1142 // Skip the saved EBP. 1143 Offset += RI->getSlotSize(); 1144 1145 // Skip the RETADDR move area 1146 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1147 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1148 if (TailCallReturnAddrDelta < 0) 1149 Offset -= TailCallReturnAddrDelta; 1150 } 1151 1152 return Offset; 1153} 1154 1155bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1156 MachineBasicBlock::iterator MI, 1157 const std::vector<CalleeSavedInfo> &CSI, 1158 const TargetRegisterInfo *TRI) const { 1159 if (CSI.empty()) 1160 return false; 1161 1162 DebugLoc DL = MBB.findDebugLoc(MI); 1163 1164 MachineFunction &MF = *MBB.getParent(); 1165 1166 unsigned SlotSize = STI.is64Bit() ? 8 : 4; 1167 unsigned FPReg = TRI->getFrameRegister(MF); 1168 unsigned CalleeFrameSize = 0; 1169 1170 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1171 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1172 1173 // Push GPRs. It increases frame size. 1174 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 1175 for (unsigned i = CSI.size(); i != 0; --i) { 1176 unsigned Reg = CSI[i-1].getReg(); 1177 if (!X86::GR64RegClass.contains(Reg) && 1178 !X86::GR32RegClass.contains(Reg)) 1179 continue; 1180 // Add the callee-saved register as live-in. It's killed at the spill. 1181 MBB.addLiveIn(Reg); 1182 if (Reg == FPReg) 1183 // X86RegisterInfo::emitPrologue will handle spilling of frame register. 1184 continue; 1185 CalleeFrameSize += SlotSize; 1186 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill) 1187 .setMIFlag(MachineInstr::FrameSetup); 1188 } 1189 1190 X86FI->setCalleeSavedFrameSize(CalleeFrameSize); 1191 1192 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 1193 // It can be done by spilling XMMs to stack frame. 1194 // Note that only Win64 ABI might spill XMMs. 1195 for (unsigned i = CSI.size(); i != 0; --i) { 1196 unsigned Reg = CSI[i-1].getReg(); 1197 if (X86::GR64RegClass.contains(Reg) || 1198 X86::GR32RegClass.contains(Reg)) 1199 continue; 1200 // Add the callee-saved register as live-in. It's killed at the spill. 1201 MBB.addLiveIn(Reg); 1202 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1203 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), 1204 RC, TRI); 1205 } 1206 1207 return true; 1208} 1209 1210bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1211 MachineBasicBlock::iterator MI, 1212 const std::vector<CalleeSavedInfo> &CSI, 1213 const TargetRegisterInfo *TRI) const { 1214 if (CSI.empty()) 1215 return false; 1216 1217 DebugLoc DL = MBB.findDebugLoc(MI); 1218 1219 MachineFunction &MF = *MBB.getParent(); 1220 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1221 1222 // Reload XMMs from stack frame. 1223 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1224 unsigned Reg = CSI[i].getReg(); 1225 if (X86::GR64RegClass.contains(Reg) || 1226 X86::GR32RegClass.contains(Reg)) 1227 continue; 1228 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1229 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), 1230 RC, TRI); 1231 } 1232 1233 // POP GPRs. 1234 unsigned FPReg = TRI->getFrameRegister(MF); 1235 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 1236 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1237 unsigned Reg = CSI[i].getReg(); 1238 if (!X86::GR64RegClass.contains(Reg) && 1239 !X86::GR32RegClass.contains(Reg)) 1240 continue; 1241 if (Reg == FPReg) 1242 // X86RegisterInfo::emitEpilogue will handle restoring of frame register. 1243 continue; 1244 BuildMI(MBB, MI, DL, TII.get(Opc), Reg); 1245 } 1246 return true; 1247} 1248 1249void 1250X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 1251 RegScavenger *RS) const { 1252 MachineFrameInfo *MFI = MF.getFrameInfo(); 1253 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 1254 unsigned SlotSize = RegInfo->getSlotSize(); 1255 1256 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1257 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1258 1259 if (TailCallReturnAddrDelta < 0) { 1260 // create RETURNADDR area 1261 // arg 1262 // arg 1263 // RETADDR 1264 // { ... 1265 // RETADDR area 1266 // ... 1267 // } 1268 // [EBP] 1269 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 1270 (-1U*SlotSize)+TailCallReturnAddrDelta, true); 1271 } 1272 1273 if (hasFP(MF)) { 1274 assert((TailCallReturnAddrDelta <= 0) && 1275 "The Delta should always be zero or negative"); 1276 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering(); 1277 1278 // Create a frame entry for the EBP register that must be saved. 1279 int FrameIdx = MFI->CreateFixedObject(SlotSize, 1280 -(int)SlotSize + 1281 TFI.getOffsetOfLocalArea() + 1282 TailCallReturnAddrDelta, 1283 true); 1284 assert(FrameIdx == MFI->getObjectIndexBegin() && 1285 "Slot for EBP register must be last in order to be found!"); 1286 (void)FrameIdx; 1287 } 1288} 1289 1290static bool 1291HasNestArgument(const MachineFunction *MF) { 1292 const Function *F = MF->getFunction(); 1293 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 1294 I != E; I++) { 1295 if (I->hasNestAttr()) 1296 return true; 1297 } 1298 return false; 1299} 1300 1301static unsigned 1302GetScratchRegister(bool Is64Bit, const MachineFunction &MF) { 1303 if (Is64Bit) { 1304 return X86::R11; 1305 } else { 1306 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv(); 1307 bool IsNested = HasNestArgument(&MF); 1308 1309 if (CallingConvention == CallingConv::X86_FastCall) { 1310 if (IsNested) { 1311 report_fatal_error("Segmented stacks does not support fastcall with " 1312 "nested function."); 1313 return -1; 1314 } else { 1315 return X86::EAX; 1316 } 1317 } else { 1318 if (IsNested) 1319 return X86::EDX; 1320 else 1321 return X86::ECX; 1322 } 1323 } 1324} 1325 1326// The stack limit in the TCB is set to this many bytes above the actual stack 1327// limit. 1328static const uint64_t kSplitStackAvailable = 256; 1329 1330void 1331X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { 1332 MachineBasicBlock &prologueMBB = MF.front(); 1333 MachineFrameInfo *MFI = MF.getFrameInfo(); 1334 const X86InstrInfo &TII = *TM.getInstrInfo(); 1335 uint64_t StackSize; 1336 bool Is64Bit = STI.is64Bit(); 1337 unsigned TlsReg, TlsOffset; 1338 DebugLoc DL; 1339 const X86Subtarget *ST = &MF.getTarget().getSubtarget<X86Subtarget>(); 1340 1341 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF); 1342 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 1343 "Scratch register is live-in"); 1344 1345 if (MF.getFunction()->isVarArg()) 1346 report_fatal_error("Segmented stacks do not support vararg functions."); 1347 if (!ST->isTargetLinux()) 1348 report_fatal_error("Segmented stacks supported only on linux."); 1349 1350 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 1351 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 1352 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1353 bool IsNested = false; 1354 1355 // We need to know if the function has a nest argument only in 64 bit mode. 1356 if (Is64Bit) 1357 IsNested = HasNestArgument(&MF); 1358 1359 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 1360 // allocMBB needs to be last (terminating) instruction. 1361 1362 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(), 1363 e = prologueMBB.livein_end(); i != e; i++) { 1364 allocMBB->addLiveIn(*i); 1365 checkMBB->addLiveIn(*i); 1366 } 1367 1368 if (IsNested) 1369 allocMBB->addLiveIn(X86::R10); 1370 1371 MF.push_front(allocMBB); 1372 MF.push_front(checkMBB); 1373 1374 // Eventually StackSize will be calculated by a link-time pass; which will 1375 // also decide whether checking code needs to be injected into this particular 1376 // prologue. 1377 StackSize = MFI->getStackSize(); 1378 1379 // Read the limit off the current stacklet off the stack_guard location. 1380 if (Is64Bit) { 1381 TlsReg = X86::FS; 1382 TlsOffset = 0x70; 1383 1384 if (StackSize < kSplitStackAvailable) 1385 ScratchReg = X86::RSP; 1386 else 1387 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP) 1388 .addImm(0).addReg(0).addImm(-StackSize).addReg(0); 1389 1390 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg) 1391 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1392 } else { 1393 TlsReg = X86::GS; 1394 TlsOffset = 0x30; 1395 1396 if (StackSize < kSplitStackAvailable) 1397 ScratchReg = X86::ESP; 1398 else 1399 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 1400 .addImm(0).addReg(0).addImm(-StackSize).addReg(0); 1401 1402 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 1403 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1404 } 1405 1406 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 1407 // It jumps to normal execution of the function body. 1408 BuildMI(checkMBB, DL, TII.get(X86::JG_4)).addMBB(&prologueMBB); 1409 1410 // On 32 bit we first push the arguments size and then the frame size. On 64 1411 // bit, we pass the stack frame size in r10 and the argument size in r11. 1412 if (Is64Bit) { 1413 // Functions with nested arguments use R10, so it needs to be saved across 1414 // the call to _morestack 1415 1416 if (IsNested) 1417 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10); 1418 1419 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10) 1420 .addImm(StackSize); 1421 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11) 1422 .addImm(X86FI->getArgumentStackSize()); 1423 MF.getRegInfo().setPhysRegUsed(X86::R10); 1424 MF.getRegInfo().setPhysRegUsed(X86::R11); 1425 } else { 1426 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1427 .addImm(X86FI->getArgumentStackSize()); 1428 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1429 .addImm(StackSize); 1430 } 1431 1432 // __morestack is in libgcc 1433 if (Is64Bit) 1434 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 1435 .addExternalSymbol("__morestack"); 1436 else 1437 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 1438 .addExternalSymbol("__morestack"); 1439 1440 if (IsNested) 1441 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); 1442 else 1443 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); 1444 1445 allocMBB->addSuccessor(&prologueMBB); 1446 1447 checkMBB->addSuccessor(allocMBB); 1448 checkMBB->addSuccessor(&prologueMBB); 1449 1450#ifdef XDEBUG 1451 MF.verify(); 1452#endif 1453} 1454