X86RegisterInfo.cpp revision 52cd548525089056ac5be97e2b8eb05257bcdf3b
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/MC/MCAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42using namespace llvm; 43 44X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 45 const TargetInstrInfo &tii) 46 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? 47 X86::ADJCALLSTACKDOWN64 : 48 X86::ADJCALLSTACKDOWN32, 49 tm.getSubtarget<X86Subtarget>().is64Bit() ? 50 X86::ADJCALLSTACKUP64 : 51 X86::ADJCALLSTACKUP32), 52 TM(tm), TII(tii) { 53 // Cache some information. 54 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 55 Is64Bit = Subtarget->is64Bit(); 56 IsWin64 = Subtarget->isTargetWin64(); 57 StackAlign = TM.getFrameInfo()->getStackAlignment(); 58 59 if (Is64Bit) { 60 SlotSize = 8; 61 StackPtr = X86::RSP; 62 FramePtr = X86::RBP; 63 } else { 64 SlotSize = 4; 65 StackPtr = X86::ESP; 66 FramePtr = X86::EBP; 67 } 68} 69 70/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF 71/// specific numbering, used in debug info and exception tables. 72int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 73 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 74 unsigned Flavour = DWARFFlavour::X86_64; 75 76 if (!Subtarget->is64Bit()) { 77 if (Subtarget->isTargetDarwin()) { 78 if (isEH) 79 Flavour = DWARFFlavour::X86_32_DarwinEH; 80 else 81 Flavour = DWARFFlavour::X86_32_Generic; 82 } else if (Subtarget->isTargetCygMing()) { 83 // Unsupported by now, just quick fallback 84 Flavour = DWARFFlavour::X86_32_Generic; 85 } else { 86 Flavour = DWARFFlavour::X86_32_Generic; 87 } 88 } 89 90 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 91} 92 93/// getX86RegNum - This function maps LLVM register identifiers to their X86 94/// specific numbering, which is used in various places encoding instructions. 95unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 96 switch(RegNo) { 97 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 98 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 99 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 100 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 101 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 102 return N86::ESP; 103 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 104 return N86::EBP; 105 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 106 return N86::ESI; 107 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 108 return N86::EDI; 109 110 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 111 return N86::EAX; 112 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 113 return N86::ECX; 114 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 115 return N86::EDX; 116 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 117 return N86::EBX; 118 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 119 return N86::ESP; 120 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 121 return N86::EBP; 122 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 123 return N86::ESI; 124 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 125 return N86::EDI; 126 127 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 128 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 129 return RegNo-X86::ST0; 130 131 case X86::XMM0: case X86::XMM8: case X86::MM0: 132 return 0; 133 case X86::XMM1: case X86::XMM9: case X86::MM1: 134 return 1; 135 case X86::XMM2: case X86::XMM10: case X86::MM2: 136 return 2; 137 case X86::XMM3: case X86::XMM11: case X86::MM3: 138 return 3; 139 case X86::XMM4: case X86::XMM12: case X86::MM4: 140 return 4; 141 case X86::XMM5: case X86::XMM13: case X86::MM5: 142 return 5; 143 case X86::XMM6: case X86::XMM14: case X86::MM6: 144 return 6; 145 case X86::XMM7: case X86::XMM15: case X86::MM7: 146 return 7; 147 148 default: 149 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 150 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); 151 return 0; 152 } 153} 154 155const TargetRegisterClass * 156X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 157 const TargetRegisterClass *B, 158 unsigned SubIdx) const { 159 switch (SubIdx) { 160 default: return 0; 161 case 1: 162 // 8-bit 163 if (B == &X86::GR8RegClass) { 164 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) 165 return A; 166 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { 167 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 168 A == &X86::GR64_NOREXRegClass || 169 A == &X86::GR64_NOSPRegClass || 170 A == &X86::GR64_NOREX_NOSPRegClass) 171 return &X86::GR64_ABCDRegClass; 172 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 173 A == &X86::GR32_NOREXRegClass || 174 A == &X86::GR32_NOSPRegClass) 175 return &X86::GR32_ABCDRegClass; 176 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 177 A == &X86::GR16_NOREXRegClass) 178 return &X86::GR16_ABCDRegClass; 179 } else if (B == &X86::GR8_NOREXRegClass) { 180 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 181 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 182 return &X86::GR64_NOREXRegClass; 183 else if (A == &X86::GR64_ABCDRegClass) 184 return &X86::GR64_ABCDRegClass; 185 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 186 A == &X86::GR32_NOSPRegClass) 187 return &X86::GR32_NOREXRegClass; 188 else if (A == &X86::GR32_ABCDRegClass) 189 return &X86::GR32_ABCDRegClass; 190 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) 191 return &X86::GR16_NOREXRegClass; 192 else if (A == &X86::GR16_ABCDRegClass) 193 return &X86::GR16_ABCDRegClass; 194 } 195 break; 196 case 2: 197 // 8-bit hi 198 if (B == &X86::GR8_ABCD_HRegClass) { 199 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 200 A == &X86::GR64_NOREXRegClass || 201 A == &X86::GR64_NOSPRegClass || 202 A == &X86::GR64_NOREX_NOSPRegClass) 203 return &X86::GR64_ABCDRegClass; 204 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 205 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 206 return &X86::GR32_ABCDRegClass; 207 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || 208 A == &X86::GR16_NOREXRegClass) 209 return &X86::GR16_ABCDRegClass; 210 } 211 break; 212 case 3: 213 // 16-bit 214 if (B == &X86::GR16RegClass) { 215 if (A->getSize() == 4 || A->getSize() == 8) 216 return A; 217 } else if (B == &X86::GR16_ABCDRegClass) { 218 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 219 A == &X86::GR64_NOREXRegClass || 220 A == &X86::GR64_NOSPRegClass || 221 A == &X86::GR64_NOREX_NOSPRegClass) 222 return &X86::GR64_ABCDRegClass; 223 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || 224 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) 225 return &X86::GR32_ABCDRegClass; 226 } else if (B == &X86::GR16_NOREXRegClass) { 227 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 228 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 229 return &X86::GR64_NOREXRegClass; 230 else if (A == &X86::GR64_ABCDRegClass) 231 return &X86::GR64_ABCDRegClass; 232 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || 233 A == &X86::GR32_NOSPRegClass) 234 return &X86::GR32_NOREXRegClass; 235 else if (A == &X86::GR32_ABCDRegClass) 236 return &X86::GR64_ABCDRegClass; 237 } 238 break; 239 case 4: 240 // 32-bit 241 if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) { 242 if (A->getSize() == 8) 243 return A; 244 } else if (B == &X86::GR32_ABCDRegClass) { 245 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || 246 A == &X86::GR64_NOREXRegClass || 247 A == &X86::GR64_NOSPRegClass || 248 A == &X86::GR64_NOREX_NOSPRegClass) 249 return &X86::GR64_ABCDRegClass; 250 } else if (B == &X86::GR32_NOREXRegClass) { 251 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || 252 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) 253 return &X86::GR64_NOREXRegClass; 254 else if (A == &X86::GR64_ABCDRegClass) 255 return &X86::GR64_ABCDRegClass; 256 } 257 break; 258 } 259 return 0; 260} 261 262const TargetRegisterClass * 263X86RegisterInfo::getPointerRegClass(unsigned Kind) const { 264 switch (Kind) { 265 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 266 case 0: // Normal GPRs. 267 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 268 return &X86::GR64RegClass; 269 return &X86::GR32RegClass; 270 case 1: // Normal GRPs except the stack pointer (for encoding reasons). 271 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 272 return &X86::GR64_NOSPRegClass; 273 return &X86::GR32_NOSPRegClass; 274 } 275} 276 277const TargetRegisterClass * 278X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 279 if (RC == &X86::CCRRegClass) { 280 if (Is64Bit) 281 return &X86::GR64RegClass; 282 else 283 return &X86::GR32RegClass; 284 } 285 return NULL; 286} 287 288const unsigned * 289X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 290 bool callsEHReturn = false; 291 292 if (MF) { 293 const MachineFrameInfo *MFI = MF->getFrameInfo(); 294 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 295 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 296 } 297 298 static const unsigned CalleeSavedRegs32Bit[] = { 299 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 300 }; 301 302 static const unsigned CalleeSavedRegs32EHRet[] = { 303 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 304 }; 305 306 static const unsigned CalleeSavedRegs64Bit[] = { 307 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 308 }; 309 310 static const unsigned CalleeSavedRegs64EHRet[] = { 311 X86::RAX, X86::RDX, X86::RBX, X86::R12, 312 X86::R13, X86::R14, X86::R15, X86::RBP, 0 313 }; 314 315 static const unsigned CalleeSavedRegsWin64[] = { 316 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 317 X86::R12, X86::R13, X86::R14, X86::R15, 318 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, 319 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, 320 X86::XMM14, X86::XMM15, 0 321 }; 322 323 if (Is64Bit) { 324 if (IsWin64) 325 return CalleeSavedRegsWin64; 326 else 327 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit); 328 } else { 329 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit); 330 } 331} 332 333const TargetRegisterClass* const* 334X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 335 bool callsEHReturn = false; 336 337 if (MF) { 338 const MachineFrameInfo *MFI = MF->getFrameInfo(); 339 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 340 callsEHReturn = (MMI ? MMI->callsEHReturn() : false); 341 } 342 343 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 344 &X86::GR32RegClass, &X86::GR32RegClass, 345 &X86::GR32RegClass, &X86::GR32RegClass, 0 346 }; 347 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 348 &X86::GR32RegClass, &X86::GR32RegClass, 349 &X86::GR32RegClass, &X86::GR32RegClass, 350 &X86::GR32RegClass, &X86::GR32RegClass, 0 351 }; 352 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 353 &X86::GR64RegClass, &X86::GR64RegClass, 354 &X86::GR64RegClass, &X86::GR64RegClass, 355 &X86::GR64RegClass, &X86::GR64RegClass, 0 356 }; 357 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = { 358 &X86::GR64RegClass, &X86::GR64RegClass, 359 &X86::GR64RegClass, &X86::GR64RegClass, 360 &X86::GR64RegClass, &X86::GR64RegClass, 361 &X86::GR64RegClass, &X86::GR64RegClass, 0 362 }; 363 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { 364 &X86::GR64RegClass, &X86::GR64RegClass, 365 &X86::GR64RegClass, &X86::GR64RegClass, 366 &X86::GR64RegClass, &X86::GR64RegClass, 367 &X86::GR64RegClass, &X86::GR64RegClass, 368 &X86::VR128RegClass, &X86::VR128RegClass, 369 &X86::VR128RegClass, &X86::VR128RegClass, 370 &X86::VR128RegClass, &X86::VR128RegClass, 371 &X86::VR128RegClass, &X86::VR128RegClass, 372 &X86::VR128RegClass, &X86::VR128RegClass, 0 373 }; 374 375 if (Is64Bit) { 376 if (IsWin64) 377 return CalleeSavedRegClassesWin64; 378 else 379 return (callsEHReturn ? 380 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit); 381 } else { 382 return (callsEHReturn ? 383 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit); 384 } 385} 386 387BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 388 BitVector Reserved(getNumRegs()); 389 // Set the stack-pointer register and its aliases as reserved. 390 Reserved.set(X86::RSP); 391 Reserved.set(X86::ESP); 392 Reserved.set(X86::SP); 393 Reserved.set(X86::SPL); 394 395 // Set the instruction pointer register and its aliases as reserved. 396 Reserved.set(X86::RIP); 397 Reserved.set(X86::EIP); 398 Reserved.set(X86::IP); 399 400 // Set the frame-pointer register and its aliases as reserved if needed. 401 if (hasFP(MF)) { 402 Reserved.set(X86::RBP); 403 Reserved.set(X86::EBP); 404 Reserved.set(X86::BP); 405 Reserved.set(X86::BPL); 406 } 407 408 // Mark the x87 stack registers as reserved, since they don't behave normally 409 // with respect to liveness. We don't fully model the effects of x87 stack 410 // pushes and pops after stackification. 411 Reserved.set(X86::ST0); 412 Reserved.set(X86::ST1); 413 Reserved.set(X86::ST2); 414 Reserved.set(X86::ST3); 415 Reserved.set(X86::ST4); 416 Reserved.set(X86::ST5); 417 Reserved.set(X86::ST6); 418 Reserved.set(X86::ST7); 419 return Reserved; 420} 421 422//===----------------------------------------------------------------------===// 423// Stack Frame Processing methods 424//===----------------------------------------------------------------------===// 425 426static unsigned calculateMaxStackAlignment(const MachineFrameInfo *FFI) { 427 unsigned MaxAlign = 0; 428 429 for (int i = FFI->getObjectIndexBegin(), 430 e = FFI->getObjectIndexEnd(); i != e; ++i) { 431 if (FFI->isDeadObjectIndex(i)) 432 continue; 433 434 unsigned Align = FFI->getObjectAlignment(i); 435 MaxAlign = std::max(MaxAlign, Align); 436 } 437 438 return MaxAlign; 439} 440 441/// hasFP - Return true if the specified function should have a dedicated frame 442/// pointer register. This is true if the function has variable sized allocas 443/// or if frame pointer elimination is disabled. 444bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 445 const MachineFrameInfo *MFI = MF.getFrameInfo(); 446 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 447 448 return (NoFramePointerElim || 449 needsStackRealignment(MF) || 450 MFI->hasVarSizedObjects() || 451 MFI->isFrameAddressTaken() || 452 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 453 (MMI && MMI->callsUnwindInit())); 454} 455 456bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 457 const MachineFrameInfo *MFI = MF.getFrameInfo(); 458 bool requiresRealignment = 459 RealignStack && (MFI->getMaxAlignment() > StackAlign); 460 461 // FIXME: Currently we don't support stack realignment for functions with 462 // variable-sized allocas 463 if (requiresRealignment && MFI->hasVarSizedObjects()) 464 llvm_report_error( 465 "Stack realignment in presense of dynamic allocas is not supported"); 466 467 return requiresRealignment; 468} 469 470bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 471 return !MF.getFrameInfo()->hasVarSizedObjects(); 472} 473 474bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, 475 int &FrameIdx) const { 476 if (Reg == FramePtr && hasFP(MF)) { 477 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 478 return true; 479 } 480 return false; 481} 482 483int 484X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { 485 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 486 MachineFrameInfo *MFI = MF.getFrameInfo(); 487 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea(); 488 uint64_t StackSize = MFI->getStackSize(); 489 490 if (needsStackRealignment(MF)) { 491 if (FI < 0) { 492 // Skip the saved EBP. 493 Offset += SlotSize; 494 } else { 495 unsigned Align = MFI->getObjectAlignment(FI); 496 assert( (-(Offset + StackSize)) % Align == 0); 497 Align = 0; 498 return Offset + StackSize; 499 } 500 // FIXME: Support tail calls 501 } else { 502 if (!hasFP(MF)) 503 return Offset + StackSize; 504 505 // Skip the saved EBP. 506 Offset += SlotSize; 507 508 // Skip the RETADDR move area 509 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 510 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 511 if (TailCallReturnAddrDelta < 0) 512 Offset -= TailCallReturnAddrDelta; 513 } 514 515 return Offset; 516} 517 518void X86RegisterInfo:: 519eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 520 MachineBasicBlock::iterator I) const { 521 if (!hasReservedCallFrame(MF)) { 522 // If the stack pointer can be changed after prologue, turn the 523 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 524 // adjcallstackdown instruction into 'add ESP, <amt>' 525 // TODO: consider using push / pop instead of sub + store / add 526 MachineInstr *Old = I; 527 uint64_t Amount = Old->getOperand(0).getImm(); 528 if (Amount != 0) { 529 // We need to keep the stack aligned properly. To do this, we round the 530 // amount of space needed for the outgoing arguments up to the next 531 // alignment boundary. 532 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 533 534 MachineInstr *New = 0; 535 if (Old->getOpcode() == getCallFrameSetupOpcode()) { 536 New = BuildMI(MF, Old->getDebugLoc(), 537 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), 538 StackPtr) 539 .addReg(StackPtr) 540 .addImm(Amount); 541 } else { 542 assert(Old->getOpcode() == getCallFrameDestroyOpcode()); 543 544 // Factor out the amount the callee already popped. 545 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 546 Amount -= CalleeAmt; 547 548 if (Amount) { 549 unsigned Opc = (Amount < 128) ? 550 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 551 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 552 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) 553 .addReg(StackPtr) 554 .addImm(Amount); 555 } 556 } 557 558 if (New) { 559 // The EFLAGS implicit def is dead. 560 New->getOperand(3).setIsDead(); 561 562 // Replace the pseudo instruction with a new instruction. 563 MBB.insert(I, New); 564 } 565 } 566 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { 567 // If we are performing frame pointer elimination and if the callee pops 568 // something off the stack pointer, add it back. We do this until we have 569 // more advanced stack pointer tracking ability. 570 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 571 unsigned Opc = (CalleeAmt < 128) ? 572 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 573 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 574 MachineInstr *Old = I; 575 MachineInstr *New = 576 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), 577 StackPtr) 578 .addReg(StackPtr) 579 .addImm(CalleeAmt); 580 581 // The EFLAGS implicit def is dead. 582 New->getOperand(3).setIsDead(); 583 MBB.insert(I, New); 584 } 585 } 586 587 MBB.erase(I); 588} 589 590unsigned 591X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 592 int SPAdj, int *Value, 593 RegScavenger *RS) const{ 594 assert(SPAdj == 0 && "Unexpected"); 595 596 unsigned i = 0; 597 MachineInstr &MI = *II; 598 MachineFunction &MF = *MI.getParent()->getParent(); 599 600 while (!MI.getOperand(i).isFI()) { 601 ++i; 602 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 603 } 604 605 int FrameIndex = MI.getOperand(i).getIndex(); 606 unsigned BasePtr; 607 608 if (needsStackRealignment(MF)) 609 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 610 else 611 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 612 613 // This must be part of a four operand memory reference. Replace the 614 // FrameIndex with base register with EBP. Add an offset to the offset. 615 MI.getOperand(i).ChangeToRegister(BasePtr, false); 616 617 // Now add the frame object offset to the offset from EBP. 618 if (MI.getOperand(i+3).isImm()) { 619 // Offset is a 32-bit integer. 620 int Offset = getFrameIndexOffset(MF, FrameIndex) + 621 (int)(MI.getOperand(i + 3).getImm()); 622 623 MI.getOperand(i + 3).ChangeToImmediate(Offset); 624 } else { 625 // Offset is symbolic. This is extremely rare. 626 uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) + 627 (uint64_t)MI.getOperand(i+3).getOffset(); 628 MI.getOperand(i+3).setOffset(Offset); 629 } 630 return 0; 631} 632 633void 634X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 635 RegScavenger *RS) const { 636 MachineFrameInfo *MFI = MF.getFrameInfo(); 637 638 // Calculate and set max stack object alignment early, so we can decide 639 // whether we will need stack realignment (and thus FP). 640 unsigned MaxAlign = std::max(MFI->getMaxAlignment(), 641 calculateMaxStackAlignment(MFI)); 642 643 MFI->setMaxAlignment(MaxAlign); 644 645 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 646 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 647 648 if (TailCallReturnAddrDelta < 0) { 649 // create RETURNADDR area 650 // arg 651 // arg 652 // RETADDR 653 // { ... 654 // RETADDR area 655 // ... 656 // } 657 // [EBP] 658 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 659 (-1U*SlotSize)+TailCallReturnAddrDelta, 660 true, false); 661 } 662 663 if (hasFP(MF)) { 664 assert((TailCallReturnAddrDelta <= 0) && 665 "The Delta should always be zero or negative"); 666 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); 667 668 // Create a frame entry for the EBP register that must be saved. 669 int FrameIdx = MFI->CreateFixedObject(SlotSize, 670 -(int)SlotSize + 671 TFI.getOffsetOfLocalArea() + 672 TailCallReturnAddrDelta, 673 true, false); 674 assert(FrameIdx == MFI->getObjectIndexBegin() && 675 "Slot for EBP register must be last in order to be found!"); 676 FrameIdx = 0; 677 } 678} 679 680/// emitSPUpdate - Emit a series of instructions to increment / decrement the 681/// stack pointer by a constant value. 682static 683void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 684 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 685 const TargetInstrInfo &TII) { 686 bool isSub = NumBytes < 0; 687 uint64_t Offset = isSub ? -NumBytes : NumBytes; 688 unsigned Opc = isSub 689 ? ((Offset < 128) ? 690 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 691 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 692 : ((Offset < 128) ? 693 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 694 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 695 uint64_t Chunk = (1LL << 31) - 1; 696 DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() : 697 DebugLoc::getUnknownLoc()); 698 699 while (Offset) { 700 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 701 MachineInstr *MI = 702 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 703 .addReg(StackPtr) 704 .addImm(ThisVal); 705 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 706 Offset -= ThisVal; 707 } 708} 709 710/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 711static 712void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 713 unsigned StackPtr, uint64_t *NumBytes = NULL) { 714 if (MBBI == MBB.begin()) return; 715 716 MachineBasicBlock::iterator PI = prior(MBBI); 717 unsigned Opc = PI->getOpcode(); 718 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 719 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 720 PI->getOperand(0).getReg() == StackPtr) { 721 if (NumBytes) 722 *NumBytes += PI->getOperand(2).getImm(); 723 MBB.erase(PI); 724 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 725 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 726 PI->getOperand(0).getReg() == StackPtr) { 727 if (NumBytes) 728 *NumBytes -= PI->getOperand(2).getImm(); 729 MBB.erase(PI); 730 } 731} 732 733/// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 734static 735void mergeSPUpdatesDown(MachineBasicBlock &MBB, 736 MachineBasicBlock::iterator &MBBI, 737 unsigned StackPtr, uint64_t *NumBytes = NULL) { 738 // FIXME: THIS ISN'T RUN!!! 739 return; 740 741 if (MBBI == MBB.end()) return; 742 743 MachineBasicBlock::iterator NI = next(MBBI); 744 if (NI == MBB.end()) return; 745 746 unsigned Opc = NI->getOpcode(); 747 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 748 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 749 NI->getOperand(0).getReg() == StackPtr) { 750 if (NumBytes) 751 *NumBytes -= NI->getOperand(2).getImm(); 752 MBB.erase(NI); 753 MBBI = NI; 754 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 755 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 756 NI->getOperand(0).getReg() == StackPtr) { 757 if (NumBytes) 758 *NumBytes += NI->getOperand(2).getImm(); 759 MBB.erase(NI); 760 MBBI = NI; 761 } 762} 763 764/// mergeSPUpdates - Checks the instruction before/after the passed 765/// instruction. If it is an ADD/SUB instruction it is deleted argument and the 766/// stack adjustment is returned as a positive value for ADD and a negative for 767/// SUB. 768static int mergeSPUpdates(MachineBasicBlock &MBB, 769 MachineBasicBlock::iterator &MBBI, 770 unsigned StackPtr, 771 bool doMergeWithPrevious) { 772 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 773 (!doMergeWithPrevious && MBBI == MBB.end())) 774 return 0; 775 776 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 777 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI); 778 unsigned Opc = PI->getOpcode(); 779 int Offset = 0; 780 781 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 782 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 783 PI->getOperand(0).getReg() == StackPtr){ 784 Offset += PI->getOperand(2).getImm(); 785 MBB.erase(PI); 786 if (!doMergeWithPrevious) MBBI = NI; 787 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 788 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 789 PI->getOperand(0).getReg() == StackPtr) { 790 Offset -= PI->getOperand(2).getImm(); 791 MBB.erase(PI); 792 if (!doMergeWithPrevious) MBBI = NI; 793 } 794 795 return Offset; 796} 797 798void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, 799 unsigned LabelId, 800 unsigned FramePtr) const { 801 MachineFrameInfo *MFI = MF.getFrameInfo(); 802 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 803 if (!MMI) return; 804 805 // Add callee saved registers to move list. 806 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 807 if (CSI.empty()) return; 808 809 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 810 const TargetData *TD = MF.getTarget().getTargetData(); 811 bool HasFP = hasFP(MF); 812 813 // Calculate amount of bytes used for return address storing. 814 int stackGrowth = 815 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 816 TargetFrameInfo::StackGrowsUp ? 817 TD->getPointerSize() : -TD->getPointerSize()); 818 819 // FIXME: This is dirty hack. The code itself is pretty mess right now. 820 // It should be rewritten from scratch and generalized sometimes. 821 822 // Determine maximum offset (minumum due to stack growth). 823 int64_t MaxOffset = 0; 824 for (std::vector<CalleeSavedInfo>::const_iterator 825 I = CSI.begin(), E = CSI.end(); I != E; ++I) 826 MaxOffset = std::min(MaxOffset, 827 MFI->getObjectOffset(I->getFrameIdx())); 828 829 // Calculate offsets. 830 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 831 for (std::vector<CalleeSavedInfo>::const_iterator 832 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 833 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 834 unsigned Reg = I->getReg(); 835 Offset = MaxOffset - Offset + saveAreaOffset; 836 837 // Don't output a new machine move if we're re-saving the frame 838 // pointer. This happens when the PrologEpilogInserter has inserted an extra 839 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 840 // generates one when frame pointers are used. If we generate a "machine 841 // move" for this extra "PUSH", the linker will lose track of the fact that 842 // the frame pointer should have the value of the first "PUSH" when it's 843 // trying to unwind. 844 // 845 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 846 // another bug. I.e., one where we generate a prolog like this: 847 // 848 // pushl %ebp 849 // movl %esp, %ebp 850 // pushl %ebp 851 // pushl %esi 852 // ... 853 // 854 // The immediate re-push of EBP is unnecessary. At the least, it's an 855 // optimization bug. EBP can be used as a scratch register in certain 856 // cases, but probably not when we have a frame pointer. 857 if (HasFP && FramePtr == Reg) 858 continue; 859 860 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 861 MachineLocation CSSrc(Reg); 862 Moves.push_back(MachineMove(LabelId, CSDst, CSSrc)); 863 } 864} 865 866/// emitPrologue - Push callee-saved registers onto the stack, which 867/// automatically adjust the stack pointer. Adjust the stack pointer to allocate 868/// space for local variables. Also emit labels used by the exception handler to 869/// generate the exception handling frames. 870void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 871 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 872 MachineBasicBlock::iterator MBBI = MBB.begin(); 873 MachineFrameInfo *MFI = MF.getFrameInfo(); 874 const Function *Fn = MF.getFunction(); 875 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 876 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 877 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 878 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) || 879 !Fn->doesNotThrow() || UnwindTablesMandatory; 880 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 881 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 882 bool HasFP = hasFP(MF); 883 DebugLoc DL; 884 885 // Add RETADDR move area to callee saved frame size. 886 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 887 if (TailCallReturnAddrDelta < 0) 888 X86FI->setCalleeSavedFrameSize( 889 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 890 891 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 892 // function, and use up to 128 bytes of stack space, don't have a frame 893 // pointer, calls, or dynamic alloca then we do not need to adjust the 894 // stack pointer (we fit in the Red Zone). 895 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 896 !needsStackRealignment(MF) && 897 !MFI->hasVarSizedObjects() && // No dynamic alloca. 898 !MFI->hasCalls() && // No calls. 899 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone 900 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 901 if (HasFP) MinSize += SlotSize; 902 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 903 MFI->setStackSize(StackSize); 904 } else if (Subtarget->isTargetWin64()) { 905 // We need to always allocate 32 bytes as register spill area. 906 // FIXME: We might reuse these 32 bytes for leaf functions. 907 StackSize += 32; 908 MFI->setStackSize(StackSize); 909 } 910 911 // Insert stack pointer adjustment for later moving of return addr. Only 912 // applies to tail call optimized functions where the callee argument stack 913 // size is bigger than the callers. 914 if (TailCallReturnAddrDelta < 0) { 915 MachineInstr *MI = 916 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 917 StackPtr) 918 .addReg(StackPtr) 919 .addImm(-TailCallReturnAddrDelta); 920 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 921 } 922 923 // Mapping for machine moves: 924 // 925 // DST: VirtualFP AND 926 // SRC: VirtualFP => DW_CFA_def_cfa_offset 927 // ELSE => DW_CFA_def_cfa 928 // 929 // SRC: VirtualFP AND 930 // DST: Register => DW_CFA_def_cfa_register 931 // 932 // ELSE 933 // OFFSET < 0 => DW_CFA_offset_extended_sf 934 // REG < 64 => DW_CFA_offset + Reg 935 // ELSE => DW_CFA_offset_extended 936 937 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 938 const TargetData *TD = MF.getTarget().getTargetData(); 939 uint64_t NumBytes = 0; 940 int stackGrowth = 941 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 942 TargetFrameInfo::StackGrowsUp ? 943 TD->getPointerSize() : -TD->getPointerSize()); 944 945 if (HasFP) { 946 // Calculate required stack adjustment. 947 uint64_t FrameSize = StackSize - SlotSize; 948 if (needsStackRealignment(MF)) 949 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 950 951 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 952 953 // Get the offset of the stack slot for the EBP register, which is 954 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 955 // Update the frame offset adjustment. 956 MFI->setOffsetAdjustment(-NumBytes); 957 958 // Save EBP/RBP into the appropriate stack slot. 959 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 960 .addReg(FramePtr, RegState::Kill); 961 962 if (needsFrameMoves) { 963 // Mark the place where EBP/RBP was saved. 964 unsigned FrameLabelId = MMI->NextLabelID(); 965 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 966 967 // Define the current CFA rule to use the provided offset. 968 if (StackSize) { 969 MachineLocation SPDst(MachineLocation::VirtualFP); 970 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 971 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 972 } else { 973 // FIXME: Verify & implement for FP 974 MachineLocation SPDst(StackPtr); 975 MachineLocation SPSrc(StackPtr, stackGrowth); 976 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 977 } 978 979 // Change the rule for the FramePtr to be an "offset" rule. 980 MachineLocation FPDst(MachineLocation::VirtualFP, 981 2 * stackGrowth); 982 MachineLocation FPSrc(FramePtr); 983 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 984 } 985 986 // Update EBP with the new base value... 987 BuildMI(MBB, MBBI, DL, 988 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 989 .addReg(StackPtr); 990 991 if (needsFrameMoves) { 992 // Mark effective beginning of when frame pointer becomes valid. 993 unsigned FrameLabelId = MMI->NextLabelID(); 994 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 995 996 // Define the current CFA to use the EBP/RBP register. 997 MachineLocation FPDst(FramePtr); 998 MachineLocation FPSrc(MachineLocation::VirtualFP); 999 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); 1000 } 1001 1002 // Mark the FramePtr as live-in in every block except the entry. 1003 for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); 1004 I != E; ++I) 1005 I->addLiveIn(FramePtr); 1006 1007 // Realign stack 1008 if (needsStackRealignment(MF)) { 1009 MachineInstr *MI = 1010 BuildMI(MBB, MBBI, DL, 1011 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 1012 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 1013 1014 // The EFLAGS implicit def is dead. 1015 MI->getOperand(3).setIsDead(); 1016 } 1017 } else { 1018 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 1019 } 1020 1021 // Skip the callee-saved push instructions. 1022 bool PushedRegs = false; 1023 int StackOffset = 2 * stackGrowth; 1024 1025 while (MBBI != MBB.end() && 1026 (MBBI->getOpcode() == X86::PUSH32r || 1027 MBBI->getOpcode() == X86::PUSH64r)) { 1028 PushedRegs = true; 1029 ++MBBI; 1030 1031 if (!HasFP && needsFrameMoves) { 1032 // Mark callee-saved push instruction. 1033 unsigned LabelId = MMI->NextLabelID(); 1034 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); 1035 1036 // Define the current CFA rule to use the provided offset. 1037 unsigned Ptr = StackSize ? 1038 MachineLocation::VirtualFP : StackPtr; 1039 MachineLocation SPDst(Ptr); 1040 MachineLocation SPSrc(Ptr, StackOffset); 1041 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1042 StackOffset += stackGrowth; 1043 } 1044 } 1045 1046 if (MBBI != MBB.end()) 1047 DL = MBBI->getDebugLoc(); 1048 1049 // Adjust stack pointer: ESP -= numbytes. 1050 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 1051 // Check, whether EAX is livein for this function. 1052 bool isEAXAlive = false; 1053 for (MachineRegisterInfo::livein_iterator 1054 II = MF.getRegInfo().livein_begin(), 1055 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 1056 unsigned Reg = II->first; 1057 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1058 Reg == X86::AH || Reg == X86::AL); 1059 } 1060 1061 // Function prologue calls _alloca to probe the stack when allocating more 1062 // than 4k bytes in one go. Touching the stack at 4K increments is necessary 1063 // to ensure that the guard pages used by the OS virtual memory manager are 1064 // allocated in correct sequence. 1065 if (!isEAXAlive) { 1066 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1067 .addImm(NumBytes); 1068 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1069 .addExternalSymbol("_alloca"); 1070 } else { 1071 // Save EAX 1072 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1073 .addReg(X86::EAX, RegState::Kill); 1074 1075 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1076 // allocated bytes for EAX. 1077 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1078 .addImm(NumBytes - 4); 1079 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) 1080 .addExternalSymbol("_alloca"); 1081 1082 // Restore EAX 1083 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 1084 X86::EAX), 1085 StackPtr, false, NumBytes - 4); 1086 MBB.insert(MBBI, MI); 1087 } 1088 } else if (NumBytes) { 1089 // If there is an SUB32ri of ESP immediately before this instruction, merge 1090 // the two. This can be the case when tail call elimination is enabled and 1091 // the callee has more arguments then the caller. 1092 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 1093 1094 // If there is an ADD32ri or SUB32ri of ESP immediately after this 1095 // instruction, merge the two instructions. 1096 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 1097 1098 if (NumBytes) 1099 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1100 } 1101 1102 if ((NumBytes || PushedRegs) && needsFrameMoves) { 1103 // Mark end of stack pointer adjustment. 1104 unsigned LabelId = MMI->NextLabelID(); 1105 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); 1106 1107 if (!HasFP && NumBytes) { 1108 // Define the current CFA rule to use the provided offset. 1109 if (StackSize) { 1110 MachineLocation SPDst(MachineLocation::VirtualFP); 1111 MachineLocation SPSrc(MachineLocation::VirtualFP, 1112 -StackSize + stackGrowth); 1113 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1114 } else { 1115 // FIXME: Verify & implement for FP 1116 MachineLocation SPDst(StackPtr); 1117 MachineLocation SPSrc(StackPtr, stackGrowth); 1118 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); 1119 } 1120 } 1121 1122 // Emit DWARF info specifying the offsets of the callee-saved registers. 1123 if (PushedRegs) 1124 emitCalleeSavedFrameMoves(MF, LabelId, HasFP ? FramePtr : StackPtr); 1125 } 1126} 1127 1128void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1129 MachineBasicBlock &MBB) const { 1130 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1131 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1132 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1133 unsigned RetOpcode = MBBI->getOpcode(); 1134 DebugLoc DL = MBBI->getDebugLoc(); 1135 1136 switch (RetOpcode) { 1137 default: 1138 llvm_unreachable("Can only insert epilog into returning blocks"); 1139 case X86::RET: 1140 case X86::RETI: 1141 case X86::TCRETURNdi: 1142 case X86::TCRETURNri: 1143 case X86::TCRETURNri64: 1144 case X86::TCRETURNdi64: 1145 case X86::EH_RETURN: 1146 case X86::EH_RETURN64: 1147 case X86::TAILJMPd: 1148 case X86::TAILJMPr: 1149 case X86::TAILJMPm: 1150 break; // These are ok 1151 } 1152 1153 // Get the number of bytes to allocate from the FrameInfo. 1154 uint64_t StackSize = MFI->getStackSize(); 1155 uint64_t MaxAlign = MFI->getMaxAlignment(); 1156 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1157 uint64_t NumBytes = 0; 1158 1159 if (hasFP(MF)) { 1160 // Calculate required stack adjustment. 1161 uint64_t FrameSize = StackSize - SlotSize; 1162 if (needsStackRealignment(MF)) 1163 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 1164 1165 NumBytes = FrameSize - CSSize; 1166 1167 // Pop EBP. 1168 BuildMI(MBB, MBBI, DL, 1169 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1170 } else { 1171 NumBytes = StackSize - CSSize; 1172 } 1173 1174 // Skip the callee-saved pop instructions. 1175 MachineBasicBlock::iterator LastCSPop = MBBI; 1176 while (MBBI != MBB.begin()) { 1177 MachineBasicBlock::iterator PI = prior(MBBI); 1178 unsigned Opc = PI->getOpcode(); 1179 1180 if (Opc != X86::POP32r && Opc != X86::POP64r && 1181 !PI->getDesc().isTerminator()) 1182 break; 1183 1184 --MBBI; 1185 } 1186 1187 DL = MBBI->getDebugLoc(); 1188 1189 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1190 // instruction, merge the two instructions. 1191 if (NumBytes || MFI->hasVarSizedObjects()) 1192 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1193 1194 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1195 // slot before popping them off! Same applies for the case, when stack was 1196 // realigned. 1197 if (needsStackRealignment(MF)) { 1198 // We cannot use LEA here, because stack pointer was realigned. We need to 1199 // deallocate local frame back. 1200 if (CSSize) { 1201 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1202 MBBI = prior(LastCSPop); 1203 } 1204 1205 BuildMI(MBB, MBBI, DL, 1206 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1207 StackPtr).addReg(FramePtr); 1208 } else if (MFI->hasVarSizedObjects()) { 1209 if (CSSize) { 1210 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1211 MachineInstr *MI = 1212 addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1213 FramePtr, false, -CSSize); 1214 MBB.insert(MBBI, MI); 1215 } else { 1216 BuildMI(MBB, MBBI, DL, 1217 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1218 .addReg(FramePtr); 1219 } 1220 } else if (NumBytes) { 1221 // Adjust stack pointer back: ESP += numbytes. 1222 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1223 } 1224 1225 // We're returning from function via eh_return. 1226 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1227 MBBI = prior(MBB.end()); 1228 MachineOperand &DestAddr = MBBI->getOperand(0); 1229 assert(DestAddr.isReg() && "Offset should be in register!"); 1230 BuildMI(MBB, MBBI, DL, 1231 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1232 StackPtr).addReg(DestAddr.getReg()); 1233 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1234 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) { 1235 // Tail call return: adjust the stack pointer and jump to callee. 1236 MBBI = prior(MBB.end()); 1237 MachineOperand &JumpTarget = MBBI->getOperand(0); 1238 MachineOperand &StackAdjust = MBBI->getOperand(1); 1239 assert(StackAdjust.isImm() && "Expecting immediate value."); 1240 1241 // Adjust stack pointer. 1242 int StackAdj = StackAdjust.getImm(); 1243 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1244 int Offset = 0; 1245 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1246 1247 // Incoporate the retaddr area. 1248 Offset = StackAdj-MaxTCDelta; 1249 assert(Offset >= 0 && "Offset should never be negative"); 1250 1251 if (Offset) { 1252 // Check for possible merge with preceeding ADD instruction. 1253 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1254 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 1255 } 1256 1257 // Jump to label or value in register. 1258 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) 1259 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)). 1260 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 1261 else if (RetOpcode== X86::TCRETURNri64) 1262 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 1263 else 1264 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 1265 1266 // Delete the pseudo instruction TCRETURN. 1267 MBB.erase(MBBI); 1268 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1269 (X86FI->getTCReturnAddrDelta() < 0)) { 1270 // Add the return addr area delta back since we are not tail calling. 1271 int delta = -1*X86FI->getTCReturnAddrDelta(); 1272 MBBI = prior(MBB.end()); 1273 1274 // Check for possible merge with preceeding ADD instruction. 1275 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1276 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 1277 } 1278} 1279 1280unsigned X86RegisterInfo::getRARegister() const { 1281 return Is64Bit ? X86::RIP // Should have dwarf #16. 1282 : X86::EIP; // Should have dwarf #8. 1283} 1284 1285unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1286 return hasFP(MF) ? FramePtr : StackPtr; 1287} 1288 1289void 1290X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const { 1291 // Calculate amount of bytes used for return address storing 1292 int stackGrowth = (Is64Bit ? -8 : -4); 1293 1294 // Initial state of the frame pointer is esp+4. 1295 MachineLocation Dst(MachineLocation::VirtualFP); 1296 MachineLocation Src(StackPtr, stackGrowth); 1297 Moves.push_back(MachineMove(0, Dst, Src)); 1298 1299 // Add return address to move list 1300 MachineLocation CSDst(StackPtr, stackGrowth); 1301 MachineLocation CSSrc(getRARegister()); 1302 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1303} 1304 1305unsigned X86RegisterInfo::getEHExceptionRegister() const { 1306 llvm_unreachable("What is the exception register"); 1307 return 0; 1308} 1309 1310unsigned X86RegisterInfo::getEHHandlerRegister() const { 1311 llvm_unreachable("What is the exception handler register"); 1312 return 0; 1313} 1314 1315namespace llvm { 1316unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { 1317 switch (VT.getSimpleVT().SimpleTy) { 1318 default: return Reg; 1319 case MVT::i8: 1320 if (High) { 1321 switch (Reg) { 1322 default: return 0; 1323 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1324 return X86::AH; 1325 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1326 return X86::DH; 1327 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1328 return X86::CH; 1329 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1330 return X86::BH; 1331 } 1332 } else { 1333 switch (Reg) { 1334 default: return 0; 1335 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1336 return X86::AL; 1337 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1338 return X86::DL; 1339 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1340 return X86::CL; 1341 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1342 return X86::BL; 1343 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1344 return X86::SIL; 1345 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1346 return X86::DIL; 1347 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1348 return X86::BPL; 1349 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1350 return X86::SPL; 1351 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1352 return X86::R8B; 1353 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1354 return X86::R9B; 1355 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1356 return X86::R10B; 1357 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1358 return X86::R11B; 1359 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1360 return X86::R12B; 1361 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1362 return X86::R13B; 1363 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1364 return X86::R14B; 1365 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1366 return X86::R15B; 1367 } 1368 } 1369 case MVT::i16: 1370 switch (Reg) { 1371 default: return Reg; 1372 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1373 return X86::AX; 1374 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1375 return X86::DX; 1376 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1377 return X86::CX; 1378 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1379 return X86::BX; 1380 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1381 return X86::SI; 1382 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1383 return X86::DI; 1384 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1385 return X86::BP; 1386 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1387 return X86::SP; 1388 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1389 return X86::R8W; 1390 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1391 return X86::R9W; 1392 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1393 return X86::R10W; 1394 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1395 return X86::R11W; 1396 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1397 return X86::R12W; 1398 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1399 return X86::R13W; 1400 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1401 return X86::R14W; 1402 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1403 return X86::R15W; 1404 } 1405 case MVT::i32: 1406 switch (Reg) { 1407 default: return Reg; 1408 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1409 return X86::EAX; 1410 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1411 return X86::EDX; 1412 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1413 return X86::ECX; 1414 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1415 return X86::EBX; 1416 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1417 return X86::ESI; 1418 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1419 return X86::EDI; 1420 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1421 return X86::EBP; 1422 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1423 return X86::ESP; 1424 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1425 return X86::R8D; 1426 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1427 return X86::R9D; 1428 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1429 return X86::R10D; 1430 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1431 return X86::R11D; 1432 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1433 return X86::R12D; 1434 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1435 return X86::R13D; 1436 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1437 return X86::R14D; 1438 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1439 return X86::R15D; 1440 } 1441 case MVT::i64: 1442 switch (Reg) { 1443 default: return Reg; 1444 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1445 return X86::RAX; 1446 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1447 return X86::RDX; 1448 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1449 return X86::RCX; 1450 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1451 return X86::RBX; 1452 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1453 return X86::RSI; 1454 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1455 return X86::RDI; 1456 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1457 return X86::RBP; 1458 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1459 return X86::RSP; 1460 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1461 return X86::R8; 1462 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1463 return X86::R9; 1464 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1465 return X86::R10; 1466 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1467 return X86::R11; 1468 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1469 return X86::R12; 1470 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1471 return X86::R13; 1472 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1473 return X86::R14; 1474 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1475 return X86::R15; 1476 } 1477 } 1478 1479 return Reg; 1480} 1481} 1482 1483#include "X86GenRegisterInfo.inc" 1484 1485namespace { 1486 struct MSAC : public MachineFunctionPass { 1487 static char ID; 1488 MSAC() : MachineFunctionPass(&ID) {} 1489 1490 virtual bool runOnMachineFunction(MachineFunction &MF) { 1491 MachineFrameInfo *FFI = MF.getFrameInfo(); 1492 MachineRegisterInfo &RI = MF.getRegInfo(); 1493 1494 // Calculate max stack alignment of all already allocated stack objects. 1495 unsigned MaxAlign = calculateMaxStackAlignment(FFI); 1496 1497 // Be over-conservative: scan over all vreg defs and find, whether vector 1498 // registers are used. If yes - there is probability, that vector register 1499 // will be spilled and thus stack needs to be aligned properly. 1500 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; 1501 RegNum < RI.getLastVirtReg(); ++RegNum) 1502 MaxAlign = std::max(MaxAlign, RI.getRegClass(RegNum)->getAlignment()); 1503 1504 if (FFI->getMaxAlignment() == MaxAlign) 1505 return false; 1506 1507 FFI->setMaxAlignment(MaxAlign); 1508 return true; 1509 } 1510 1511 virtual const char *getPassName() const { 1512 return "X86 Maximal Stack Alignment Calculator"; 1513 } 1514 1515 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1516 AU.setPreservesCFG(); 1517 MachineFunctionPass::getAnalysisUsage(AU); 1518 } 1519 }; 1520 1521 char MSAC::ID = 0; 1522} 1523 1524FunctionPass* 1525llvm::createX86MaxStackAlignmentCalculatorPass() { return new MSAC(); } 1526