X86RegisterInfo.cpp revision a425e0073df51ab99665062a8c00d704e89d2ef8
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetRegisterInfo class. 11// This file is responsible for the frame pointer elimination optimization 12// on X86. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86RegisterInfo.h" 18#include "X86InstrBuilder.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Constants.h" 23#include "llvm/Function.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/ValueTypes.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFunctionPass.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/Target/TargetAsmInfo.h" 34#include "llvm/Target/TargetFrameInfo.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/STLExtras.h" 40#include "llvm/Support/Compiler.h" 41using namespace llvm; 42 43X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 44 const TargetInstrInfo &tii) 45 : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP), 46 TM(tm), TII(tii) { 47 // Cache some information. 48 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 49 Is64Bit = Subtarget->is64Bit(); 50 IsWin64 = Subtarget->isTargetWin64(); 51 StackAlign = TM.getFrameInfo()->getStackAlignment(); 52 if (Is64Bit) { 53 SlotSize = 8; 54 StackPtr = X86::RSP; 55 FramePtr = X86::RBP; 56 } else { 57 SlotSize = 4; 58 StackPtr = X86::ESP; 59 FramePtr = X86::EBP; 60 } 61} 62 63// getDwarfRegNum - This function maps LLVM register identifiers to the 64// Dwarf specific numbering, used in debug info and exception tables. 65 66int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { 67 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 68 unsigned Flavour = DWARFFlavour::X86_64; 69 if (!Subtarget->is64Bit()) { 70 if (Subtarget->isTargetDarwin()) { 71 if (isEH) 72 Flavour = DWARFFlavour::X86_32_DarwinEH; 73 else 74 Flavour = DWARFFlavour::X86_32_Generic; 75 } else if (Subtarget->isTargetCygMing()) { 76 // Unsupported by now, just quick fallback 77 Flavour = DWARFFlavour::X86_32_Generic; 78 } else { 79 Flavour = DWARFFlavour::X86_32_Generic; 80 } 81 } 82 83 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); 84} 85 86// getX86RegNum - This function maps LLVM register identifiers to their X86 87// specific numbering, which is used in various places encoding instructions. 88// 89unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 90 switch(RegNo) { 91 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 92 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 93 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 94 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 95 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 96 return N86::ESP; 97 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 98 return N86::EBP; 99 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 100 return N86::ESI; 101 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 102 return N86::EDI; 103 104 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 105 return N86::EAX; 106 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 107 return N86::ECX; 108 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 109 return N86::EDX; 110 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 111 return N86::EBX; 112 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 113 return N86::ESP; 114 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 115 return N86::EBP; 116 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 117 return N86::ESI; 118 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 119 return N86::EDI; 120 121 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 122 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 123 return RegNo-X86::ST0; 124 125 case X86::XMM0: case X86::XMM8: case X86::MM0: 126 return 0; 127 case X86::XMM1: case X86::XMM9: case X86::MM1: 128 return 1; 129 case X86::XMM2: case X86::XMM10: case X86::MM2: 130 return 2; 131 case X86::XMM3: case X86::XMM11: case X86::MM3: 132 return 3; 133 case X86::XMM4: case X86::XMM12: case X86::MM4: 134 return 4; 135 case X86::XMM5: case X86::XMM13: case X86::MM5: 136 return 5; 137 case X86::XMM6: case X86::XMM14: case X86::MM6: 138 return 6; 139 case X86::XMM7: case X86::XMM15: case X86::MM7: 140 return 7; 141 142 default: 143 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 144 assert(0 && "Register allocator hasn't allocated reg correctly yet!"); 145 return 0; 146 } 147} 148 149const TargetRegisterClass * 150X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 151 if (RC == &X86::CCRRegClass) { 152 if (Is64Bit) 153 return &X86::GR64RegClass; 154 else 155 return &X86::GR32RegClass; 156 } 157 return NULL; 158} 159 160const unsigned * 161X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 162 static const unsigned CalleeSavedRegs32Bit[] = { 163 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 164 }; 165 166 static const unsigned CalleeSavedRegs32EHRet[] = { 167 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 168 }; 169 170 static const unsigned CalleeSavedRegs64Bit[] = { 171 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 172 }; 173 174 static const unsigned CalleeSavedRegsWin64[] = { 175 X86::RBX, X86::RBP, X86::RDI, X86::RSI, 176 X86::R12, X86::R13, X86::R14, X86::R15, 0 177 }; 178 179 if (Is64Bit) { 180 if (IsWin64) 181 return CalleeSavedRegsWin64; 182 else 183 return CalleeSavedRegs64Bit; 184 } else { 185 if (MF) { 186 const MachineFrameInfo *MFI = MF->getFrameInfo(); 187 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 188 if (MMI && MMI->callsEHReturn()) 189 return CalleeSavedRegs32EHRet; 190 } 191 return CalleeSavedRegs32Bit; 192 } 193} 194 195const TargetRegisterClass* const* 196X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 197 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 198 &X86::GR32RegClass, &X86::GR32RegClass, 199 &X86::GR32RegClass, &X86::GR32RegClass, 0 200 }; 201 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 202 &X86::GR32RegClass, &X86::GR32RegClass, 203 &X86::GR32RegClass, &X86::GR32RegClass, 204 &X86::GR32RegClass, &X86::GR32RegClass, 0 205 }; 206 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 207 &X86::GR64RegClass, &X86::GR64RegClass, 208 &X86::GR64RegClass, &X86::GR64RegClass, 209 &X86::GR64RegClass, &X86::GR64RegClass, 0 210 }; 211 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { 212 &X86::GR64RegClass, &X86::GR64RegClass, 213 &X86::GR64RegClass, &X86::GR64RegClass, 214 &X86::GR64RegClass, &X86::GR64RegClass, 215 &X86::GR64RegClass, &X86::GR64RegClass, 0 216 }; 217 218 if (Is64Bit) { 219 if (IsWin64) 220 return CalleeSavedRegClassesWin64; 221 else 222 return CalleeSavedRegClasses64Bit; 223 } else { 224 if (MF) { 225 const MachineFrameInfo *MFI = MF->getFrameInfo(); 226 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 227 if (MMI && MMI->callsEHReturn()) 228 return CalleeSavedRegClasses32EHRet; 229 } 230 return CalleeSavedRegClasses32Bit; 231 } 232 233} 234 235BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 236 BitVector Reserved(getNumRegs()); 237 Reserved.set(X86::RSP); 238 Reserved.set(X86::ESP); 239 Reserved.set(X86::SP); 240 Reserved.set(X86::SPL); 241 if (hasFP(MF)) { 242 Reserved.set(X86::RBP); 243 Reserved.set(X86::EBP); 244 Reserved.set(X86::BP); 245 Reserved.set(X86::BPL); 246 } 247 return Reserved; 248} 249 250//===----------------------------------------------------------------------===// 251// Stack Frame Processing methods 252//===----------------------------------------------------------------------===// 253 254static unsigned calculateMaxStackAlignment(const MachineFrameInfo *FFI) { 255 unsigned MaxAlign = 0; 256 for (int i = FFI->getObjectIndexBegin(), 257 e = FFI->getObjectIndexEnd(); i != e; ++i) { 258 if (FFI->isDeadObjectIndex(i)) 259 continue; 260 unsigned Align = FFI->getObjectAlignment(i); 261 MaxAlign = std::max(MaxAlign, Align); 262 } 263 264 return MaxAlign; 265} 266 267// hasFP - Return true if the specified function should have a dedicated frame 268// pointer register. This is true if the function has variable sized allocas or 269// if frame pointer elimination is disabled. 270// 271bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 272 const MachineFrameInfo *MFI = MF.getFrameInfo(); 273 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 274 275 return (NoFramePointerElim || 276 needsStackRealignment(MF) || 277 MFI->hasVarSizedObjects() || 278 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 279 (MMI && MMI->callsUnwindInit())); 280} 281 282bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 283 const MachineFrameInfo *MFI = MF.getFrameInfo();; 284 285 // FIXME: Currently we don't support stack realignment for functions with 286 // variable-sized allocas 287 return (RealignStack && 288 (MFI->getMaxAlignment() > StackAlign && 289 !MFI->hasVarSizedObjects())); 290} 291 292bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 293 return !MF.getFrameInfo()->hasVarSizedObjects(); 294} 295 296int 297X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { 298 int Offset = MF.getFrameInfo()->getObjectOffset(FI) + SlotSize; 299 uint64_t StackSize = MF.getFrameInfo()->getStackSize(); 300 301 if (needsStackRealignment(MF)) { 302 if (FI < 0) 303 // Skip the saved EBP 304 Offset += SlotSize; 305 else { 306 unsigned Align = MF.getFrameInfo()->getObjectAlignment(FI); 307 assert( (-(Offset + StackSize)) % Align == 0); 308 return Offset + StackSize; 309 } 310 311 // FIXME: Support tail calls 312 } else { 313 if (!hasFP(MF)) 314 return Offset + StackSize; 315 316 // Skip the saved EBP 317 Offset += SlotSize; 318 319 // Skip the RETADDR move area 320 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 321 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 322 if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta; 323 } 324 325 return Offset; 326} 327 328void X86RegisterInfo:: 329eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 330 MachineBasicBlock::iterator I) const { 331 if (!hasReservedCallFrame(MF)) { 332 // If the stack pointer can be changed after prologue, turn the 333 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 334 // adjcallstackdown instruction into 'add ESP, <amt>' 335 // TODO: consider using push / pop instead of sub + store / add 336 MachineInstr *Old = I; 337 uint64_t Amount = Old->getOperand(0).getImm(); 338 if (Amount != 0) { 339 // We need to keep the stack aligned properly. To do this, we round the 340 // amount of space needed for the outgoing arguments up to the next 341 // alignment boundary. 342 Amount = (Amount+StackAlign-1)/StackAlign*StackAlign; 343 344 MachineInstr *New = 0; 345 if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) { 346 New = BuildMI(MF, TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), 347 StackPtr).addReg(StackPtr).addImm(Amount); 348 } else { 349 assert(Old->getOpcode() == X86::ADJCALLSTACKUP); 350 // factor out the amount the callee already popped. 351 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 352 Amount -= CalleeAmt; 353 if (Amount) { 354 unsigned Opc = (Amount < 128) ? 355 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 356 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 357 New = BuildMI(MF, TII.get(Opc), StackPtr) 358 .addReg(StackPtr).addImm(Amount); 359 } 360 } 361 362 // Replace the pseudo instruction with a new instruction... 363 if (New) MBB.insert(I, New); 364 } 365 } else if (I->getOpcode() == X86::ADJCALLSTACKUP) { 366 // If we are performing frame pointer elimination and if the callee pops 367 // something off the stack pointer, add it back. We do this until we have 368 // more advanced stack pointer tracking ability. 369 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 370 unsigned Opc = (CalleeAmt < 128) ? 371 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 372 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 373 MachineInstr *New = 374 BuildMI(MF, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(CalleeAmt); 375 MBB.insert(I, New); 376 } 377 } 378 379 MBB.erase(I); 380} 381 382void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 383 int SPAdj, RegScavenger *RS) const{ 384 assert(SPAdj == 0 && "Unexpected"); 385 386 unsigned i = 0; 387 MachineInstr &MI = *II; 388 MachineFunction &MF = *MI.getParent()->getParent(); 389 while (!MI.getOperand(i).isFrameIndex()) { 390 ++i; 391 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 392 } 393 394 int FrameIndex = MI.getOperand(i).getIndex(); 395 396 unsigned BasePtr; 397 if (needsStackRealignment(MF)) 398 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 399 else 400 BasePtr = (hasFP(MF) ? FramePtr : StackPtr); 401 402 // This must be part of a four operand memory reference. Replace the 403 // FrameIndex with base register with EBP. Add an offset to the offset. 404 MI.getOperand(i).ChangeToRegister(BasePtr, false); 405 406 // Now add the frame object offset to the offset from EBP. 407 int64_t Offset = getFrameIndexOffset(MF, FrameIndex) + 408 MI.getOperand(i+3).getImm(); 409 410 MI.getOperand(i+3).ChangeToImmediate(Offset); 411} 412 413void 414X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 415 RegScavenger *RS) const { 416 MachineFrameInfo *FFI = MF.getFrameInfo(); 417 418 // Calculate and set max stack object alignment early, so we can decide 419 // whether we will need stack realignment (and thus FP). 420 unsigned MaxAlign = std::max(FFI->getMaxAlignment(), 421 calculateMaxStackAlignment(FFI)); 422 423 FFI->setMaxAlignment(MaxAlign); 424} 425 426void 427X86RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const{ 428 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 429 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 430 if (TailCallReturnAddrDelta < 0) { 431 // create RETURNADDR area 432 // arg 433 // arg 434 // RETADDR 435 // { ... 436 // RETADDR area 437 // ... 438 // } 439 // [EBP] 440 MF.getFrameInfo()-> 441 CreateFixedObject(-TailCallReturnAddrDelta, 442 (-1*SlotSize)+TailCallReturnAddrDelta); 443 } 444 if (hasFP(MF)) { 445 assert((TailCallReturnAddrDelta <= 0) && 446 "The Delta should always be zero or negative"); 447 // Create a frame entry for the EBP register that must be saved. 448 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, 449 (int)SlotSize * -2+ 450 TailCallReturnAddrDelta); 451 assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && 452 "Slot for EBP register must be last in order to be found!"); 453 } 454} 455 456/// emitSPUpdate - Emit a series of instructions to increment / decrement the 457/// stack pointer by a constant value. 458static 459void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 460 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 461 const TargetInstrInfo &TII) { 462 bool isSub = NumBytes < 0; 463 uint64_t Offset = isSub ? -NumBytes : NumBytes; 464 unsigned Opc = isSub 465 ? ((Offset < 128) ? 466 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 467 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 468 : ((Offset < 128) ? 469 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 470 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 471 uint64_t Chunk = (1LL << 31) - 1; 472 473 while (Offset) { 474 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 475 BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal); 476 Offset -= ThisVal; 477 } 478} 479 480// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 481static 482void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 483 unsigned StackPtr, uint64_t *NumBytes = NULL) { 484 if (MBBI == MBB.begin()) return; 485 486 MachineBasicBlock::iterator PI = prior(MBBI); 487 unsigned Opc = PI->getOpcode(); 488 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 489 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 490 PI->getOperand(0).getReg() == StackPtr) { 491 if (NumBytes) 492 *NumBytes += PI->getOperand(2).getImm(); 493 MBB.erase(PI); 494 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 495 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 496 PI->getOperand(0).getReg() == StackPtr) { 497 if (NumBytes) 498 *NumBytes -= PI->getOperand(2).getImm(); 499 MBB.erase(PI); 500 } 501} 502 503// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. 504static 505void mergeSPUpdatesDown(MachineBasicBlock &MBB, 506 MachineBasicBlock::iterator &MBBI, 507 unsigned StackPtr, uint64_t *NumBytes = NULL) { 508 return; 509 510 if (MBBI == MBB.end()) return; 511 512 MachineBasicBlock::iterator NI = next(MBBI); 513 if (NI == MBB.end()) return; 514 515 unsigned Opc = NI->getOpcode(); 516 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 517 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 518 NI->getOperand(0).getReg() == StackPtr) { 519 if (NumBytes) 520 *NumBytes -= NI->getOperand(2).getImm(); 521 MBB.erase(NI); 522 MBBI = NI; 523 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 524 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 525 NI->getOperand(0).getReg() == StackPtr) { 526 if (NumBytes) 527 *NumBytes += NI->getOperand(2).getImm(); 528 MBB.erase(NI); 529 MBBI = NI; 530 } 531} 532 533/// mergeSPUpdates - Checks the instruction before/after the passed 534/// instruction. If it is an ADD/SUB instruction it is deleted 535/// argument and the stack adjustment is returned as a positive value for ADD 536/// and a negative for SUB. 537static int mergeSPUpdates(MachineBasicBlock &MBB, 538 MachineBasicBlock::iterator &MBBI, 539 unsigned StackPtr, 540 bool doMergeWithPrevious) { 541 542 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 543 (!doMergeWithPrevious && MBBI == MBB.end())) 544 return 0; 545 546 int Offset = 0; 547 548 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 549 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI); 550 unsigned Opc = PI->getOpcode(); 551 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 552 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 553 PI->getOperand(0).getReg() == StackPtr){ 554 Offset += PI->getOperand(2).getImm(); 555 MBB.erase(PI); 556 if (!doMergeWithPrevious) MBBI = NI; 557 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 558 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 559 PI->getOperand(0).getReg() == StackPtr) { 560 Offset -= PI->getOperand(2).getImm(); 561 MBB.erase(PI); 562 if (!doMergeWithPrevious) MBBI = NI; 563 } 564 565 return Offset; 566} 567 568void X86RegisterInfo::emitFrameMoves(MachineFunction &MF, 569 unsigned FrameLabelId, 570 unsigned ReadyLabelId) const { 571 MachineFrameInfo *MFI = MF.getFrameInfo(); 572 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 573 if (!MMI) 574 return; 575 576 uint64_t StackSize = MFI->getStackSize(); 577 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 578 const TargetData *TD = MF.getTarget().getTargetData(); 579 580 // Calculate amount of bytes used for return address storing 581 int stackGrowth = 582 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 583 TargetFrameInfo::StackGrowsUp ? 584 TD->getPointerSize() : -TD->getPointerSize()); 585 586 if (StackSize) { 587 // Show update of SP. 588 if (hasFP(MF)) { 589 // Adjust SP 590 MachineLocation SPDst(MachineLocation::VirtualFP); 591 MachineLocation SPSrc(MachineLocation::VirtualFP, 2*stackGrowth); 592 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 593 } else { 594 MachineLocation SPDst(MachineLocation::VirtualFP); 595 MachineLocation SPSrc(MachineLocation::VirtualFP, 596 -StackSize+stackGrowth); 597 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 598 } 599 } else { 600 //FIXME: Verify & implement for FP 601 MachineLocation SPDst(StackPtr); 602 MachineLocation SPSrc(StackPtr, stackGrowth); 603 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 604 } 605 606 // Add callee saved registers to move list. 607 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 608 609 // FIXME: This is dirty hack. The code itself is pretty mess right now. 610 // It should be rewritten from scratch and generalized sometimes. 611 612 // Determine maximum offset (minumum due to stack growth) 613 int64_t MaxOffset = 0; 614 for (unsigned I = 0, E = CSI.size(); I!=E; ++I) 615 MaxOffset = std::min(MaxOffset, 616 MFI->getObjectOffset(CSI[I].getFrameIdx())); 617 618 // Calculate offsets 619 int64_t saveAreaOffset = (hasFP(MF) ? 3 : 2)*stackGrowth; 620 for (unsigned I = 0, E = CSI.size(); I!=E; ++I) { 621 int64_t Offset = MFI->getObjectOffset(CSI[I].getFrameIdx()); 622 unsigned Reg = CSI[I].getReg(); 623 Offset = (MaxOffset-Offset+saveAreaOffset); 624 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 625 MachineLocation CSSrc(Reg); 626 Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc)); 627 } 628 629 if (hasFP(MF)) { 630 // Save FP 631 MachineLocation FPDst(MachineLocation::VirtualFP, 2*stackGrowth); 632 MachineLocation FPSrc(FramePtr); 633 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 634 } 635 636 MachineLocation FPDst(hasFP(MF) ? FramePtr : StackPtr); 637 MachineLocation FPSrc(MachineLocation::VirtualFP); 638 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 639} 640 641 642void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 643 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB 644 MachineFrameInfo *MFI = MF.getFrameInfo(); 645 const Function* Fn = MF.getFunction(); 646 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 647 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 648 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 649 MachineBasicBlock::iterator MBBI = MBB.begin(); 650 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) || 651 !Fn->doesNotThrow() || 652 UnwindTablesMandatory; 653 // Prepare for frame info. 654 unsigned FrameLabelId = 0; 655 656 // Get the number of bytes to allocate from the FrameInfo. 657 uint64_t StackSize = MFI->getStackSize(); 658 // Get desired stack alignment 659 uint64_t MaxAlign = MFI->getMaxAlignment(); 660 661 // Add RETADDR move area to callee saved frame size. 662 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 663 if (TailCallReturnAddrDelta < 0) 664 X86FI->setCalleeSavedFrameSize( 665 X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta)); 666 667 // Insert stack pointer adjustment for later moving of return addr. Only 668 // applies to tail call optimized functions where the callee argument stack 669 // size is bigger than the callers. 670 if (TailCallReturnAddrDelta < 0) { 671 BuildMI(MBB, MBBI, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), 672 StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta); 673 } 674 675 uint64_t NumBytes = 0; 676 if (hasFP(MF)) { 677 // Calculate required stack adjustment 678 uint64_t FrameSize = StackSize - SlotSize; 679 if (needsStackRealignment(MF)) 680 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 681 682 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 683 684 // Get the offset of the stack slot for the EBP register... which is 685 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 686 // Update the frame offset adjustment. 687 MFI->setOffsetAdjustment(-NumBytes); 688 689 // Save EBP into the appropriate stack slot... 690 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 691 .addReg(FramePtr); 692 693 if (needsFrameMoves) { 694 // Mark effective beginning of when frame pointer becomes valid. 695 FrameLabelId = MMI->NextLabelID(); 696 BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); 697 } 698 699 // Update EBP with the new base value... 700 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 701 .addReg(StackPtr); 702 703 // Realign stack 704 if (needsStackRealignment(MF)) 705 BuildMI(MBB, MBBI, 706 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), 707 StackPtr).addReg(StackPtr).addImm(-MaxAlign); 708 } else 709 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 710 711 unsigned ReadyLabelId = 0; 712 if (needsFrameMoves) { 713 // Mark effective beginning of when frame pointer is ready. 714 ReadyLabelId = MMI->NextLabelID(); 715 BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(ReadyLabelId); 716 } 717 718 // Skip the callee-saved push instructions. 719 while (MBBI != MBB.end() && 720 (MBBI->getOpcode() == X86::PUSH32r || 721 MBBI->getOpcode() == X86::PUSH64r)) 722 ++MBBI; 723 724 if (NumBytes) { // adjust stack pointer: ESP -= numbytes 725 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 726 // Check, whether EAX is livein for this function 727 bool isEAXAlive = false; 728 for (MachineRegisterInfo::livein_iterator 729 II = MF.getRegInfo().livein_begin(), 730 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { 731 unsigned Reg = II->first; 732 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 733 Reg == X86::AH || Reg == X86::AL); 734 } 735 736 // Function prologue calls _alloca to probe the stack when allocating 737 // more than 4k bytes in one go. Touching the stack at 4K increments is 738 // necessary to ensure that the guard pages used by the OS virtual memory 739 // manager are allocated in correct sequence. 740 if (!isEAXAlive) { 741 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes); 742 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) 743 .addExternalSymbol("_alloca"); 744 } else { 745 // Save EAX 746 BuildMI(MBB, MBBI, TII.get(X86::PUSH32r), X86::EAX); 747 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 748 // allocated bytes for EAX. 749 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4); 750 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) 751 .addExternalSymbol("_alloca"); 752 // Restore EAX 753 MachineInstr *MI = addRegOffset(BuildMI(MF, TII.get(X86::MOV32rm),X86::EAX), 754 StackPtr, false, NumBytes-4); 755 MBB.insert(MBBI, MI); 756 } 757 } else { 758 // If there is an SUB32ri of ESP immediately before this instruction, 759 // merge the two. This can be the case when tail call elimination is 760 // enabled and the callee has more arguments then the caller. 761 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 762 // If there is an ADD32ri or SUB32ri of ESP immediately after this 763 // instruction, merge the two instructions. 764 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 765 766 if (NumBytes) 767 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 768 } 769 } 770 771 if (needsFrameMoves) 772 emitFrameMoves(MF, FrameLabelId, ReadyLabelId); 773} 774 775void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 776 MachineBasicBlock &MBB) const { 777 const MachineFrameInfo *MFI = MF.getFrameInfo(); 778 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 779 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 780 unsigned RetOpcode = MBBI->getOpcode(); 781 782 switch (RetOpcode) { 783 case X86::RET: 784 case X86::RETI: 785 case X86::TCRETURNdi: 786 case X86::TCRETURNri: 787 case X86::TCRETURNri64: 788 case X86::TCRETURNdi64: 789 case X86::EH_RETURN: 790 case X86::TAILJMPd: 791 case X86::TAILJMPr: 792 case X86::TAILJMPm: break; // These are ok 793 default: 794 assert(0 && "Can only insert epilog into returning blocks"); 795 } 796 797 // Get the number of bytes to allocate from the FrameInfo 798 uint64_t StackSize = MFI->getStackSize(); 799 uint64_t MaxAlign = MFI->getMaxAlignment(); 800 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 801 uint64_t NumBytes = 0; 802 803 if (hasFP(MF)) { 804 // Calculate required stack adjustment 805 uint64_t FrameSize = StackSize - SlotSize; 806 if (needsStackRealignment(MF)) 807 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 808 809 NumBytes = FrameSize - CSSize; 810 811 // pop EBP. 812 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 813 } else 814 NumBytes = StackSize - CSSize; 815 816 // Skip the callee-saved pop instructions. 817 MachineBasicBlock::iterator LastCSPop = MBBI; 818 while (MBBI != MBB.begin()) { 819 MachineBasicBlock::iterator PI = prior(MBBI); 820 unsigned Opc = PI->getOpcode(); 821 if (Opc != X86::POP32r && Opc != X86::POP64r && 822 !PI->getDesc().isTerminator()) 823 break; 824 --MBBI; 825 } 826 827 // If there is an ADD32ri or SUB32ri of ESP immediately before this 828 // instruction, merge the two instructions. 829 if (NumBytes || MFI->hasVarSizedObjects()) 830 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 831 832 // If dynamic alloca is used, then reset esp to point to the last callee-saved 833 // slot before popping them off! Same applies for the case, when stack was 834 // realigned 835 if (needsStackRealignment(MF)) { 836 // We cannot use LEA here, because stack pointer was realigned. We need to 837 // deallocate local frame back 838 if (CSSize) { 839 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 840 MBBI = prior(LastCSPop); 841 } 842 843 BuildMI(MBB, MBBI, 844 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 845 StackPtr).addReg(FramePtr); 846 } else if (MFI->hasVarSizedObjects()) { 847 if (CSSize) { 848 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 849 MachineInstr *MI = addRegOffset(BuildMI(MF, TII.get(Opc), StackPtr), 850 FramePtr, false, -CSSize); 851 MBB.insert(MBBI, MI); 852 } else 853 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 854 StackPtr).addReg(FramePtr); 855 856 } else { 857 // adjust stack pointer back: ESP += numbytes 858 if (NumBytes) 859 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 860 } 861 862 // We're returning from function via eh_return. 863 if (RetOpcode == X86::EH_RETURN) { 864 MBBI = prior(MBB.end()); 865 MachineOperand &DestAddr = MBBI->getOperand(0); 866 assert(DestAddr.isRegister() && "Offset should be in register!"); 867 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr). 868 addReg(DestAddr.getReg()); 869 // Tail call return: adjust the stack pointer and jump to callee 870 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 871 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) { 872 MBBI = prior(MBB.end()); 873 MachineOperand &JumpTarget = MBBI->getOperand(0); 874 MachineOperand &StackAdjust = MBBI->getOperand(1); 875 assert( StackAdjust.isImmediate() && "Expecting immediate value."); 876 877 // Adjust stack pointer. 878 int StackAdj = StackAdjust.getImm(); 879 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 880 int Offset = 0; 881 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 882 // Incoporate the retaddr area. 883 Offset = StackAdj-MaxTCDelta; 884 assert(Offset >= 0 && "Offset should never be negative"); 885 if (Offset) { 886 // Check for possible merge with preceeding ADD instruction. 887 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 888 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); 889 } 890 // Jump to label or value in register. 891 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) 892 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPd)). 893 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 894 else if (RetOpcode== X86::TCRETURNri64) { 895 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); 896 } else 897 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr), JumpTarget.getReg()); 898 // Delete the pseudo instruction TCRETURN. 899 MBB.erase(MBBI); 900 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 901 (X86FI->getTCReturnAddrDelta() < 0)) { 902 // Add the return addr area delta back since we are not tail calling. 903 int delta = -1*X86FI->getTCReturnAddrDelta(); 904 MBBI = prior(MBB.end()); 905 // Check for possible merge with preceeding ADD instruction. 906 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 907 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); 908 } 909} 910 911unsigned X86RegisterInfo::getRARegister() const { 912 if (Is64Bit) 913 return X86::RIP; // Should have dwarf #16 914 else 915 return X86::EIP; // Should have dwarf #8 916} 917 918unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const { 919 return hasFP(MF) ? FramePtr : StackPtr; 920} 921 922void X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) 923 const { 924 // Calculate amount of bytes used for return address storing 925 int stackGrowth = (Is64Bit ? -8 : -4); 926 927 // Initial state of the frame pointer is esp+4. 928 MachineLocation Dst(MachineLocation::VirtualFP); 929 MachineLocation Src(StackPtr, stackGrowth); 930 Moves.push_back(MachineMove(0, Dst, Src)); 931 932 // Add return address to move list 933 MachineLocation CSDst(StackPtr, stackGrowth); 934 MachineLocation CSSrc(getRARegister()); 935 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 936} 937 938unsigned X86RegisterInfo::getEHExceptionRegister() const { 939 assert(0 && "What is the exception register"); 940 return 0; 941} 942 943unsigned X86RegisterInfo::getEHHandlerRegister() const { 944 assert(0 && "What is the exception handler register"); 945 return 0; 946} 947 948namespace llvm { 949unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) { 950 switch (VT.getSimpleVT()) { 951 default: return Reg; 952 case MVT::i8: 953 if (High) { 954 switch (Reg) { 955 default: return 0; 956 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 957 return X86::AH; 958 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 959 return X86::DH; 960 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 961 return X86::CH; 962 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 963 return X86::BH; 964 } 965 } else { 966 switch (Reg) { 967 default: return 0; 968 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 969 return X86::AL; 970 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 971 return X86::DL; 972 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 973 return X86::CL; 974 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 975 return X86::BL; 976 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 977 return X86::SIL; 978 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 979 return X86::DIL; 980 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 981 return X86::BPL; 982 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 983 return X86::SPL; 984 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 985 return X86::R8B; 986 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 987 return X86::R9B; 988 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 989 return X86::R10B; 990 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 991 return X86::R11B; 992 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 993 return X86::R12B; 994 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 995 return X86::R13B; 996 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 997 return X86::R14B; 998 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 999 return X86::R15B; 1000 } 1001 } 1002 case MVT::i16: 1003 switch (Reg) { 1004 default: return Reg; 1005 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1006 return X86::AX; 1007 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1008 return X86::DX; 1009 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1010 return X86::CX; 1011 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1012 return X86::BX; 1013 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1014 return X86::SI; 1015 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1016 return X86::DI; 1017 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1018 return X86::BP; 1019 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1020 return X86::SP; 1021 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1022 return X86::R8W; 1023 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1024 return X86::R9W; 1025 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1026 return X86::R10W; 1027 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1028 return X86::R11W; 1029 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1030 return X86::R12W; 1031 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1032 return X86::R13W; 1033 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1034 return X86::R14W; 1035 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1036 return X86::R15W; 1037 } 1038 case MVT::i32: 1039 switch (Reg) { 1040 default: return Reg; 1041 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1042 return X86::EAX; 1043 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1044 return X86::EDX; 1045 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1046 return X86::ECX; 1047 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1048 return X86::EBX; 1049 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1050 return X86::ESI; 1051 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1052 return X86::EDI; 1053 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1054 return X86::EBP; 1055 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1056 return X86::ESP; 1057 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1058 return X86::R8D; 1059 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1060 return X86::R9D; 1061 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1062 return X86::R10D; 1063 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1064 return X86::R11D; 1065 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1066 return X86::R12D; 1067 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1068 return X86::R13D; 1069 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1070 return X86::R14D; 1071 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1072 return X86::R15D; 1073 } 1074 case MVT::i64: 1075 switch (Reg) { 1076 default: return Reg; 1077 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1078 return X86::RAX; 1079 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1080 return X86::RDX; 1081 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1082 return X86::RCX; 1083 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1084 return X86::RBX; 1085 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1086 return X86::RSI; 1087 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1088 return X86::RDI; 1089 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1090 return X86::RBP; 1091 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1092 return X86::RSP; 1093 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1094 return X86::R8; 1095 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1096 return X86::R9; 1097 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1098 return X86::R10; 1099 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1100 return X86::R11; 1101 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1102 return X86::R12; 1103 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1104 return X86::R13; 1105 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1106 return X86::R14; 1107 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1108 return X86::R15; 1109 } 1110 } 1111 1112 return Reg; 1113} 1114} 1115 1116#include "X86GenRegisterInfo.inc" 1117 1118namespace { 1119 struct VISIBILITY_HIDDEN MSAC : public MachineFunctionPass { 1120 static char ID; 1121 MSAC() : MachineFunctionPass(&ID) {} 1122 1123 virtual bool runOnMachineFunction(MachineFunction &MF) { 1124 MachineFrameInfo *FFI = MF.getFrameInfo(); 1125 MachineRegisterInfo &RI = MF.getRegInfo(); 1126 1127 // Calculate max stack alignment of all already allocated stack objects. 1128 unsigned MaxAlign = calculateMaxStackAlignment(FFI); 1129 1130 // Be over-conservative: scan over all vreg defs and find, whether vector 1131 // registers are used. If yes - there is probability, that vector register 1132 // will be spilled and thus stack needs to be aligned properly. 1133 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; 1134 RegNum < RI.getLastVirtReg(); ++RegNum) 1135 MaxAlign = std::max(MaxAlign, RI.getRegClass(RegNum)->getAlignment()); 1136 1137 FFI->setMaxAlignment(MaxAlign); 1138 1139 return false; 1140 } 1141 1142 virtual const char *getPassName() const { 1143 return "X86 Maximal Stack Alignment Calculator"; 1144 } 1145 }; 1146 1147 char MSAC::ID = 0; 1148} 1149 1150FunctionPass* 1151llvm::createX86MaxStackAlignmentCalculatorPass() { return new MSAC(); } 1152