X86RegisterInfo.cpp revision ee465749313579ccd91575ca8acf70b75c221a2c
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the MRegisterInfo class. This 11// file is responsible for the frame pointer elimination optimization on X86. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86RegisterInfo.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86Subtarget.h" 20#include "X86TargetMachine.h" 21#include "llvm/Constants.h" 22#include "llvm/Function.h" 23#include "llvm/Type.h" 24#include "llvm/CodeGen/ValueTypes.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineFunction.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineLocation.h" 29#include "llvm/Target/TargetAsmInfo.h" 30#include "llvm/Target/TargetFrameInfo.h" 31#include "llvm/Target/TargetInstrInfo.h" 32#include "llvm/Target/TargetMachine.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/ADT/BitVector.h" 36#include "llvm/ADT/STLExtras.h" 37using namespace llvm; 38 39namespace { 40 cl::opt<bool> 41 NoFusing("disable-spill-fusing", 42 cl::desc("Disable fusing of spill code into instructions")); 43 cl::opt<bool> 44 PrintFailedFusing("print-failed-fuse-candidates", 45 cl::desc("Print instructions that the allocator wants to" 46 " fuse, but the X86 backend currently can't"), 47 cl::Hidden); 48} 49 50X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 51 const TargetInstrInfo &tii) 52 : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP), 53 TM(tm), TII(tii) { 54 // Cache some information. 55 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 56 Is64Bit = Subtarget->is64Bit(); 57 if (Is64Bit) { 58 SlotSize = 8; 59 StackPtr = X86::RSP; 60 FramePtr = X86::RBP; 61 } else { 62 SlotSize = 4; 63 StackPtr = X86::ESP; 64 FramePtr = X86::EBP; 65 } 66} 67 68// getX86RegNum - This function maps LLVM register identifiers to their X86 69// specific numbering, which is used in various places encoding instructions. 70// 71unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { 72 switch(RegNo) { 73 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; 74 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; 75 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; 76 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; 77 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: 78 return N86::ESP; 79 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: 80 return N86::EBP; 81 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: 82 return N86::ESI; 83 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: 84 return N86::EDI; 85 86 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: 87 return N86::EAX; 88 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: 89 return N86::ECX; 90 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: 91 return N86::EDX; 92 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: 93 return N86::EBX; 94 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: 95 return N86::ESP; 96 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: 97 return N86::EBP; 98 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: 99 return N86::ESI; 100 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: 101 return N86::EDI; 102 103 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: 104 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: 105 return RegNo-X86::ST0; 106 107 case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3: 108 case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: 109 return getDwarfRegNum(RegNo) - getDwarfRegNum(X86::XMM0); 110 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: 111 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: 112 return getDwarfRegNum(RegNo) - getDwarfRegNum(X86::XMM8); 113 114 default: 115 assert(isVirtualRegister(RegNo) && "Unknown physical register!"); 116 assert(0 && "Register allocator hasn't allocated reg correctly yet!"); 117 return 0; 118 } 119} 120 121bool X86RegisterInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 122 MachineBasicBlock::iterator MI, 123 const std::vector<CalleeSavedInfo> &CSI) const { 124 if (CSI.empty()) 125 return false; 126 127 MachineFunction &MF = *MBB.getParent(); 128 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 129 X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize); 130 unsigned Opc = Is64Bit ? X86::PUSH64r : X86::PUSH32r; 131 for (unsigned i = CSI.size(); i != 0; --i) { 132 unsigned Reg = CSI[i-1].getReg(); 133 // Add the callee-saved register as live-in. It's killed at the spill. 134 MBB.addLiveIn(Reg); 135 BuildMI(MBB, MI, TII.get(Opc)).addReg(Reg); 136 } 137 return true; 138} 139 140bool X86RegisterInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 141 MachineBasicBlock::iterator MI, 142 const std::vector<CalleeSavedInfo> &CSI) const { 143 if (CSI.empty()) 144 return false; 145 146 unsigned Opc = Is64Bit ? X86::POP64r : X86::POP32r; 147 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 148 unsigned Reg = CSI[i].getReg(); 149 BuildMI(MBB, MI, TII.get(Opc), Reg); 150 } 151 return true; 152} 153 154void X86RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 155 MachineBasicBlock::iterator MI, 156 unsigned SrcReg, int FrameIdx, 157 const TargetRegisterClass *RC) const { 158 unsigned Opc; 159 if (RC == &X86::GR64RegClass) { 160 Opc = X86::MOV64mr; 161 } else if (RC == &X86::GR32RegClass) { 162 Opc = X86::MOV32mr; 163 } else if (RC == &X86::GR16RegClass) { 164 Opc = X86::MOV16mr; 165 } else if (RC == &X86::GR8RegClass) { 166 Opc = X86::MOV8mr; 167 } else if (RC == &X86::GR32_RegClass) { 168 Opc = X86::MOV32_mr; 169 } else if (RC == &X86::GR16_RegClass) { 170 Opc = X86::MOV16_mr; 171 } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) { 172 Opc = X86::ST_Fp64m; 173 } else if (RC == &X86::RFP32RegClass) { 174 Opc = X86::ST_Fp32m; 175 } else if (RC == &X86::FR32RegClass) { 176 Opc = X86::MOVSSmr; 177 } else if (RC == &X86::FR64RegClass) { 178 Opc = X86::MOVSDmr; 179 } else if (RC == &X86::VR128RegClass) { 180 Opc = X86::MOVAPSmr; 181 } else if (RC == &X86::VR64RegClass) { 182 Opc = X86::MMX_MOVQ64mr; 183 } else { 184 assert(0 && "Unknown regclass"); 185 abort(); 186 } 187 addFrameReference(BuildMI(MBB, MI, TII.get(Opc)), FrameIdx) 188 .addReg(SrcReg, false, false, true); 189} 190 191void X86RegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 192 MachineBasicBlock::iterator MI, 193 unsigned DestReg, int FrameIdx, 194 const TargetRegisterClass *RC) const{ 195 unsigned Opc; 196 if (RC == &X86::GR64RegClass) { 197 Opc = X86::MOV64rm; 198 } else if (RC == &X86::GR32RegClass) { 199 Opc = X86::MOV32rm; 200 } else if (RC == &X86::GR16RegClass) { 201 Opc = X86::MOV16rm; 202 } else if (RC == &X86::GR8RegClass) { 203 Opc = X86::MOV8rm; 204 } else if (RC == &X86::GR32_RegClass) { 205 Opc = X86::MOV32_rm; 206 } else if (RC == &X86::GR16_RegClass) { 207 Opc = X86::MOV16_rm; 208 } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) { 209 Opc = X86::LD_Fp64m; 210 } else if (RC == &X86::RFP32RegClass) { 211 Opc = X86::LD_Fp32m; 212 } else if (RC == &X86::FR32RegClass) { 213 Opc = X86::MOVSSrm; 214 } else if (RC == &X86::FR64RegClass) { 215 Opc = X86::MOVSDrm; 216 } else if (RC == &X86::VR128RegClass) { 217 Opc = X86::MOVAPSrm; 218 } else if (RC == &X86::VR64RegClass) { 219 Opc = X86::MMX_MOVQ64rm; 220 } else { 221 assert(0 && "Unknown regclass"); 222 abort(); 223 } 224 addFrameReference(BuildMI(MBB, MI, TII.get(Opc), DestReg), FrameIdx); 225} 226 227void X86RegisterInfo::copyRegToReg(MachineBasicBlock &MBB, 228 MachineBasicBlock::iterator MI, 229 unsigned DestReg, unsigned SrcReg, 230 const TargetRegisterClass *RC) const { 231 unsigned Opc; 232 if (RC == &X86::GR64RegClass) { 233 Opc = X86::MOV64rr; 234 } else if (RC == &X86::GR32RegClass) { 235 Opc = X86::MOV32rr; 236 } else if (RC == &X86::GR16RegClass) { 237 Opc = X86::MOV16rr; 238 } else if (RC == &X86::GR8RegClass) { 239 Opc = X86::MOV8rr; 240 } else if (RC == &X86::GR32_RegClass) { 241 Opc = X86::MOV32_rr; 242 } else if (RC == &X86::GR16_RegClass) { 243 Opc = X86::MOV16_rr; 244 } else if (RC == &X86::RFP32RegClass) { 245 Opc = X86::MOV_Fp3232; 246 } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) { 247 Opc = X86::MOV_Fp6464; 248 } else if (RC == &X86::FR32RegClass) { 249 Opc = X86::FsMOVAPSrr; 250 } else if (RC == &X86::FR64RegClass) { 251 Opc = X86::FsMOVAPDrr; 252 } else if (RC == &X86::VR128RegClass) { 253 Opc = X86::MOVAPSrr; 254 } else if (RC == &X86::VR64RegClass) { 255 Opc = X86::MMX_MOVQ64rr; 256 } else { 257 assert(0 && "Unknown regclass"); 258 abort(); 259 } 260 BuildMI(MBB, MI, TII.get(Opc), DestReg).addReg(SrcReg); 261} 262 263 264void X86RegisterInfo::reMaterialize(MachineBasicBlock &MBB, 265 MachineBasicBlock::iterator I, 266 unsigned DestReg, 267 const MachineInstr *Orig) const { 268 MachineInstr *MI = Orig->clone(); 269 MI->getOperand(0).setReg(DestReg); 270 MBB.insert(I, MI); 271} 272 273static MachineInstr *FuseTwoAddrInst(unsigned Opcode, unsigned FrameIndex, 274 MachineInstr *MI, 275 const TargetInstrInfo &TII) { 276 unsigned NumOps = TII.getNumOperands(MI->getOpcode())-2; 277 // Create the base instruction with the memory operand as the first part. 278 MachineInstrBuilder MIB = addFrameReference(BuildMI(TII.get(Opcode)), 279 FrameIndex); 280 281 // Loop over the rest of the ri operands, converting them over. 282 for (unsigned i = 0; i != NumOps; ++i) { 283 MachineOperand &MO = MI->getOperand(i+2); 284 if (MO.isReg()) 285 MIB = MIB.addReg(MO.getReg(), false, MO.isImplicit()); 286 else if (MO.isImm()) 287 MIB = MIB.addImm(MO.getImm()); 288 else if (MO.isGlobalAddress()) 289 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset()); 290 else if (MO.isJumpTableIndex()) 291 MIB = MIB.addJumpTableIndex(MO.getJumpTableIndex()); 292 else if (MO.isExternalSymbol()) 293 MIB = MIB.addExternalSymbol(MO.getSymbolName()); 294 else 295 assert(0 && "Unknown operand type!"); 296 } 297 return MIB; 298} 299 300static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo, 301 unsigned FrameIndex, MachineInstr *MI, 302 const TargetInstrInfo &TII) { 303 MachineInstrBuilder MIB = BuildMI(TII.get(Opcode)); 304 305 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 306 MachineOperand &MO = MI->getOperand(i); 307 if (i == OpNo) { 308 assert(MO.isReg() && "Expected to fold into reg operand!"); 309 MIB = addFrameReference(MIB, FrameIndex); 310 } else if (MO.isReg()) 311 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit()); 312 else if (MO.isImm()) 313 MIB = MIB.addImm(MO.getImm()); 314 else if (MO.isGlobalAddress()) 315 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset()); 316 else if (MO.isJumpTableIndex()) 317 MIB = MIB.addJumpTableIndex(MO.getJumpTableIndex()); 318 else if (MO.isExternalSymbol()) 319 MIB = MIB.addExternalSymbol(MO.getSymbolName()); 320 else 321 assert(0 && "Unknown operand for FuseInst!"); 322 } 323 return MIB; 324} 325 326static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, 327 unsigned Opcode, unsigned FrameIndex, 328 MachineInstr *MI) { 329 return addFrameReference(BuildMI(TII.get(Opcode)), FrameIndex).addImm(0); 330} 331 332 333//===----------------------------------------------------------------------===// 334// Efficient Lookup Table Support 335//===----------------------------------------------------------------------===// 336 337namespace { 338 /// TableEntry - Maps the 'from' opcode to a fused form of the 'to' opcode. 339 /// 340 struct TableEntry { 341 unsigned from; // Original opcode. 342 unsigned to; // New opcode. 343 344 // less operators used by STL search. 345 bool operator<(const TableEntry &TE) const { return from < TE.from; } 346 friend bool operator<(const TableEntry &TE, unsigned V) { 347 return TE.from < V; 348 } 349 friend bool operator<(unsigned V, const TableEntry &TE) { 350 return V < TE.from; 351 } 352 }; 353} 354 355/// TableIsSorted - Return true if the table is in 'from' opcode order. 356/// 357static bool TableIsSorted(const TableEntry *Table, unsigned NumEntries) { 358 for (unsigned i = 1; i != NumEntries; ++i) 359 if (!(Table[i-1] < Table[i])) { 360 cerr << "Entries out of order " << Table[i-1].from 361 << " " << Table[i].from << "\n"; 362 return false; 363 } 364 return true; 365} 366 367/// TableLookup - Return the table entry matching the specified opcode. 368/// Otherwise return NULL. 369static const TableEntry *TableLookup(const TableEntry *Table, unsigned N, 370 unsigned Opcode) { 371 const TableEntry *I = std::lower_bound(Table, Table+N, Opcode); 372 if (I != Table+N && I->from == Opcode) 373 return I; 374 return NULL; 375} 376 377#define ARRAY_SIZE(TABLE) \ 378 (sizeof(TABLE)/sizeof(TABLE[0])) 379 380#ifdef NDEBUG 381#define ASSERT_SORTED(TABLE) 382#else 383#define ASSERT_SORTED(TABLE) \ 384 { static bool TABLE##Checked = false; \ 385 if (!TABLE##Checked) { \ 386 assert(TableIsSorted(TABLE, ARRAY_SIZE(TABLE)) && \ 387 "All lookup tables must be sorted for efficient access!"); \ 388 TABLE##Checked = true; \ 389 } \ 390 } 391#endif 392 393 394MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, 395 unsigned i, 396 int FrameIndex) const { 397 // Check switch flag 398 if (NoFusing) return NULL; 399 400 // Table (and size) to search 401 const TableEntry *OpcodeTablePtr = NULL; 402 unsigned OpcodeTableSize = 0; 403 bool isTwoAddrFold = false; 404 unsigned NumOps = TII.getNumOperands(MI->getOpcode()); 405 bool isTwoAddr = NumOps > 1 && 406 MI->getInstrDescriptor()->getOperandConstraint(1, TOI::TIED_TO) != -1; 407 408 MachineInstr *NewMI = NULL; 409 // Folding a memory location into the two-address part of a two-address 410 // instruction is different than folding it other places. It requires 411 // replacing the *two* registers with the memory location. 412 if (isTwoAddr && NumOps >= 2 && i < 2 && 413 MI->getOperand(0).isReg() && 414 MI->getOperand(1).isReg() && 415 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 416 static const TableEntry OpcodeTable[] = { 417 { X86::ADC32ri, X86::ADC32mi }, 418 { X86::ADC32ri8, X86::ADC32mi8 }, 419 { X86::ADC32rr, X86::ADC32mr }, 420 { X86::ADC64ri32, X86::ADC64mi32 }, 421 { X86::ADC64ri8, X86::ADC64mi8 }, 422 { X86::ADC64rr, X86::ADC64mr }, 423 { X86::ADD16ri, X86::ADD16mi }, 424 { X86::ADD16ri8, X86::ADD16mi8 }, 425 { X86::ADD16rr, X86::ADD16mr }, 426 { X86::ADD32ri, X86::ADD32mi }, 427 { X86::ADD32ri8, X86::ADD32mi8 }, 428 { X86::ADD32rr, X86::ADD32mr }, 429 { X86::ADD64ri32, X86::ADD64mi32 }, 430 { X86::ADD64ri8, X86::ADD64mi8 }, 431 { X86::ADD64rr, X86::ADD64mr }, 432 { X86::ADD8ri, X86::ADD8mi }, 433 { X86::ADD8rr, X86::ADD8mr }, 434 { X86::AND16ri, X86::AND16mi }, 435 { X86::AND16ri8, X86::AND16mi8 }, 436 { X86::AND16rr, X86::AND16mr }, 437 { X86::AND32ri, X86::AND32mi }, 438 { X86::AND32ri8, X86::AND32mi8 }, 439 { X86::AND32rr, X86::AND32mr }, 440 { X86::AND64ri32, X86::AND64mi32 }, 441 { X86::AND64ri8, X86::AND64mi8 }, 442 { X86::AND64rr, X86::AND64mr }, 443 { X86::AND8ri, X86::AND8mi }, 444 { X86::AND8rr, X86::AND8mr }, 445 { X86::DEC16r, X86::DEC16m }, 446 { X86::DEC32r, X86::DEC32m }, 447 { X86::DEC64_16r, X86::DEC16m }, 448 { X86::DEC64_32r, X86::DEC32m }, 449 { X86::DEC64r, X86::DEC64m }, 450 { X86::DEC8r, X86::DEC8m }, 451 { X86::INC16r, X86::INC16m }, 452 { X86::INC32r, X86::INC32m }, 453 { X86::INC64_16r, X86::INC16m }, 454 { X86::INC64_32r, X86::INC32m }, 455 { X86::INC64r, X86::INC64m }, 456 { X86::INC8r, X86::INC8m }, 457 { X86::NEG16r, X86::NEG16m }, 458 { X86::NEG32r, X86::NEG32m }, 459 { X86::NEG64r, X86::NEG64m }, 460 { X86::NEG8r, X86::NEG8m }, 461 { X86::NOT16r, X86::NOT16m }, 462 { X86::NOT32r, X86::NOT32m }, 463 { X86::NOT64r, X86::NOT64m }, 464 { X86::NOT8r, X86::NOT8m }, 465 { X86::OR16ri, X86::OR16mi }, 466 { X86::OR16ri8, X86::OR16mi8 }, 467 { X86::OR16rr, X86::OR16mr }, 468 { X86::OR32ri, X86::OR32mi }, 469 { X86::OR32ri8, X86::OR32mi8 }, 470 { X86::OR32rr, X86::OR32mr }, 471 { X86::OR64ri32, X86::OR64mi32 }, 472 { X86::OR64ri8, X86::OR64mi8 }, 473 { X86::OR64rr, X86::OR64mr }, 474 { X86::OR8ri, X86::OR8mi }, 475 { X86::OR8rr, X86::OR8mr }, 476 { X86::ROL16r1, X86::ROL16m1 }, 477 { X86::ROL16rCL, X86::ROL16mCL }, 478 { X86::ROL16ri, X86::ROL16mi }, 479 { X86::ROL32r1, X86::ROL32m1 }, 480 { X86::ROL32rCL, X86::ROL32mCL }, 481 { X86::ROL32ri, X86::ROL32mi }, 482 { X86::ROL64r1, X86::ROL64m1 }, 483 { X86::ROL64rCL, X86::ROL64mCL }, 484 { X86::ROL64ri, X86::ROL64mi }, 485 { X86::ROL8r1, X86::ROL8m1 }, 486 { X86::ROL8rCL, X86::ROL8mCL }, 487 { X86::ROL8ri, X86::ROL8mi }, 488 { X86::ROR16r1, X86::ROR16m1 }, 489 { X86::ROR16rCL, X86::ROR16mCL }, 490 { X86::ROR16ri, X86::ROR16mi }, 491 { X86::ROR32r1, X86::ROR32m1 }, 492 { X86::ROR32rCL, X86::ROR32mCL }, 493 { X86::ROR32ri, X86::ROR32mi }, 494 { X86::ROR64r1, X86::ROR64m1 }, 495 { X86::ROR64rCL, X86::ROR64mCL }, 496 { X86::ROR64ri, X86::ROR64mi }, 497 { X86::ROR8r1, X86::ROR8m1 }, 498 { X86::ROR8rCL, X86::ROR8mCL }, 499 { X86::ROR8ri, X86::ROR8mi }, 500 { X86::SAR16r1, X86::SAR16m1 }, 501 { X86::SAR16rCL, X86::SAR16mCL }, 502 { X86::SAR16ri, X86::SAR16mi }, 503 { X86::SAR32r1, X86::SAR32m1 }, 504 { X86::SAR32rCL, X86::SAR32mCL }, 505 { X86::SAR32ri, X86::SAR32mi }, 506 { X86::SAR64r1, X86::SAR64m1 }, 507 { X86::SAR64rCL, X86::SAR64mCL }, 508 { X86::SAR64ri, X86::SAR64mi }, 509 { X86::SAR8r1, X86::SAR8m1 }, 510 { X86::SAR8rCL, X86::SAR8mCL }, 511 { X86::SAR8ri, X86::SAR8mi }, 512 { X86::SBB32ri, X86::SBB32mi }, 513 { X86::SBB32ri8, X86::SBB32mi8 }, 514 { X86::SBB32rr, X86::SBB32mr }, 515 { X86::SBB64ri32, X86::SBB64mi32 }, 516 { X86::SBB64ri8, X86::SBB64mi8 }, 517 { X86::SBB64rr, X86::SBB64mr }, 518 { X86::SHL16r1, X86::SHL16m1 }, 519 { X86::SHL16rCL, X86::SHL16mCL }, 520 { X86::SHL16ri, X86::SHL16mi }, 521 { X86::SHL32r1, X86::SHL32m1 }, 522 { X86::SHL32rCL, X86::SHL32mCL }, 523 { X86::SHL32ri, X86::SHL32mi }, 524 { X86::SHL64r1, X86::SHL64m1 }, 525 { X86::SHL64rCL, X86::SHL64mCL }, 526 { X86::SHL64ri, X86::SHL64mi }, 527 { X86::SHL8r1, X86::SHL8m1 }, 528 { X86::SHL8rCL, X86::SHL8mCL }, 529 { X86::SHL8ri, X86::SHL8mi }, 530 { X86::SHLD16rrCL, X86::SHLD16mrCL }, 531 { X86::SHLD16rri8, X86::SHLD16mri8 }, 532 { X86::SHLD32rrCL, X86::SHLD32mrCL }, 533 { X86::SHLD32rri8, X86::SHLD32mri8 }, 534 { X86::SHLD64rrCL, X86::SHLD64mrCL }, 535 { X86::SHLD64rri8, X86::SHLD64mri8 }, 536 { X86::SHR16r1, X86::SHR16m1 }, 537 { X86::SHR16rCL, X86::SHR16mCL }, 538 { X86::SHR16ri, X86::SHR16mi }, 539 { X86::SHR32r1, X86::SHR32m1 }, 540 { X86::SHR32rCL, X86::SHR32mCL }, 541 { X86::SHR32ri, X86::SHR32mi }, 542 { X86::SHR64r1, X86::SHR64m1 }, 543 { X86::SHR64rCL, X86::SHR64mCL }, 544 { X86::SHR64ri, X86::SHR64mi }, 545 { X86::SHR8r1, X86::SHR8m1 }, 546 { X86::SHR8rCL, X86::SHR8mCL }, 547 { X86::SHR8ri, X86::SHR8mi }, 548 { X86::SHRD16rrCL, X86::SHRD16mrCL }, 549 { X86::SHRD16rri8, X86::SHRD16mri8 }, 550 { X86::SHRD32rrCL, X86::SHRD32mrCL }, 551 { X86::SHRD32rri8, X86::SHRD32mri8 }, 552 { X86::SHRD64rrCL, X86::SHRD64mrCL }, 553 { X86::SHRD64rri8, X86::SHRD64mri8 }, 554 { X86::SUB16ri, X86::SUB16mi }, 555 { X86::SUB16ri8, X86::SUB16mi8 }, 556 { X86::SUB16rr, X86::SUB16mr }, 557 { X86::SUB32ri, X86::SUB32mi }, 558 { X86::SUB32ri8, X86::SUB32mi8 }, 559 { X86::SUB32rr, X86::SUB32mr }, 560 { X86::SUB64ri32, X86::SUB64mi32 }, 561 { X86::SUB64ri8, X86::SUB64mi8 }, 562 { X86::SUB64rr, X86::SUB64mr }, 563 { X86::SUB8ri, X86::SUB8mi }, 564 { X86::SUB8rr, X86::SUB8mr }, 565 { X86::XOR16ri, X86::XOR16mi }, 566 { X86::XOR16ri8, X86::XOR16mi8 }, 567 { X86::XOR16rr, X86::XOR16mr }, 568 { X86::XOR32ri, X86::XOR32mi }, 569 { X86::XOR32ri8, X86::XOR32mi8 }, 570 { X86::XOR32rr, X86::XOR32mr }, 571 { X86::XOR64ri32, X86::XOR64mi32 }, 572 { X86::XOR64ri8, X86::XOR64mi8 }, 573 { X86::XOR64rr, X86::XOR64mr }, 574 { X86::XOR8ri, X86::XOR8mi }, 575 { X86::XOR8rr, X86::XOR8mr } 576 }; 577 ASSERT_SORTED(OpcodeTable); 578 OpcodeTablePtr = OpcodeTable; 579 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 580 isTwoAddrFold = true; 581 } else if (i == 0) { // If operand 0 582 if (MI->getOpcode() == X86::MOV16r0) 583 NewMI = MakeM0Inst(TII, X86::MOV16mi, FrameIndex, MI); 584 else if (MI->getOpcode() == X86::MOV32r0) 585 NewMI = MakeM0Inst(TII, X86::MOV32mi, FrameIndex, MI); 586 else if (MI->getOpcode() == X86::MOV64r0) 587 NewMI = MakeM0Inst(TII, X86::MOV64mi32, FrameIndex, MI); 588 else if (MI->getOpcode() == X86::MOV8r0) 589 NewMI = MakeM0Inst(TII, X86::MOV8mi, FrameIndex, MI); 590 if (NewMI) { 591 NewMI->copyKillDeadInfo(MI); 592 return NewMI; 593 } 594 595 static const TableEntry OpcodeTable[] = { 596 { X86::CMP16ri, X86::CMP16mi }, 597 { X86::CMP16ri8, X86::CMP16mi8 }, 598 { X86::CMP32ri, X86::CMP32mi }, 599 { X86::CMP32ri8, X86::CMP32mi8 }, 600 { X86::CMP8ri, X86::CMP8mi }, 601 { X86::DIV16r, X86::DIV16m }, 602 { X86::DIV32r, X86::DIV32m }, 603 { X86::DIV64r, X86::DIV64m }, 604 { X86::DIV8r, X86::DIV8m }, 605 { X86::FsMOVAPDrr, X86::MOVSDmr }, 606 { X86::FsMOVAPSrr, X86::MOVSSmr }, 607 { X86::IDIV16r, X86::IDIV16m }, 608 { X86::IDIV32r, X86::IDIV32m }, 609 { X86::IDIV64r, X86::IDIV64m }, 610 { X86::IDIV8r, X86::IDIV8m }, 611 { X86::IMUL16r, X86::IMUL16m }, 612 { X86::IMUL32r, X86::IMUL32m }, 613 { X86::IMUL64r, X86::IMUL64m }, 614 { X86::IMUL8r, X86::IMUL8m }, 615 { X86::MOV16ri, X86::MOV16mi }, 616 { X86::MOV16rr, X86::MOV16mr }, 617 { X86::MOV32ri, X86::MOV32mi }, 618 { X86::MOV32rr, X86::MOV32mr }, 619 { X86::MOV64ri32, X86::MOV64mi32 }, 620 { X86::MOV64rr, X86::MOV64mr }, 621 { X86::MOV8ri, X86::MOV8mi }, 622 { X86::MOV8rr, X86::MOV8mr }, 623 { X86::MOVAPDrr, X86::MOVAPDmr }, 624 { X86::MOVAPSrr, X86::MOVAPSmr }, 625 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr }, 626 { X86::MOVPQIto64rr,X86::MOVPQIto64mr }, 627 { X86::MOVPS2SSrr, X86::MOVPS2SSmr }, 628 { X86::MOVSDrr, X86::MOVSDmr }, 629 { X86::MOVSDto64rr, X86::MOVSDto64mr }, 630 { X86::MOVSS2DIrr, X86::MOVSS2DImr }, 631 { X86::MOVSSrr, X86::MOVSSmr }, 632 { X86::MOVUPDrr, X86::MOVUPDmr }, 633 { X86::MOVUPSrr, X86::MOVUPSmr }, 634 { X86::MUL16r, X86::MUL16m }, 635 { X86::MUL32r, X86::MUL32m }, 636 { X86::MUL64r, X86::MUL64m }, 637 { X86::MUL8r, X86::MUL8m }, 638 { X86::SETAEr, X86::SETAEm }, 639 { X86::SETAr, X86::SETAm }, 640 { X86::SETBEr, X86::SETBEm }, 641 { X86::SETBr, X86::SETBm }, 642 { X86::SETEr, X86::SETEm }, 643 { X86::SETGEr, X86::SETGEm }, 644 { X86::SETGr, X86::SETGm }, 645 { X86::SETLEr, X86::SETLEm }, 646 { X86::SETLr, X86::SETLm }, 647 { X86::SETNEr, X86::SETNEm }, 648 { X86::SETNPr, X86::SETNPm }, 649 { X86::SETNSr, X86::SETNSm }, 650 { X86::SETPr, X86::SETPm }, 651 { X86::SETSr, X86::SETSm }, 652 { X86::TEST16ri, X86::TEST16mi }, 653 { X86::TEST32ri, X86::TEST32mi }, 654 { X86::TEST64ri32, X86::TEST64mi32 }, 655 { X86::TEST8ri, X86::TEST8mi }, 656 { X86::XCHG16rr, X86::XCHG16mr }, 657 { X86::XCHG32rr, X86::XCHG32mr }, 658 { X86::XCHG64rr, X86::XCHG64mr }, 659 { X86::XCHG8rr, X86::XCHG8mr } 660 }; 661 ASSERT_SORTED(OpcodeTable); 662 OpcodeTablePtr = OpcodeTable; 663 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 664 } else if (i == 1) { 665 static const TableEntry OpcodeTable[] = { 666 { X86::CMP16rr, X86::CMP16rm }, 667 { X86::CMP32rr, X86::CMP32rm }, 668 { X86::CMP64ri32, X86::CMP64mi32 }, 669 { X86::CMP64ri8, X86::CMP64mi8 }, 670 { X86::CMP64rr, X86::CMP64rm }, 671 { X86::CMP8rr, X86::CMP8rm }, 672 { X86::CMPPDrri, X86::CMPPDrmi }, 673 { X86::CMPPSrri, X86::CMPPSrmi }, 674 { X86::CMPSDrr, X86::CMPSDrm }, 675 { X86::CMPSSrr, X86::CMPSSrm }, 676 { X86::CVTSD2SSrr, X86::CVTSD2SSrm }, 677 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm }, 678 { X86::CVTSI2SDrr, X86::CVTSI2SDrm }, 679 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm }, 680 { X86::CVTSI2SSrr, X86::CVTSI2SSrm }, 681 { X86::CVTSS2SDrr, X86::CVTSS2SDrm }, 682 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm }, 683 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm }, 684 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm }, 685 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm }, 686 { X86::FsMOVAPDrr, X86::MOVSDrm }, 687 { X86::FsMOVAPSrr, X86::MOVSSrm }, 688 { X86::IMUL16rri, X86::IMUL16rmi }, 689 { X86::IMUL16rri8, X86::IMUL16rmi8 }, 690 { X86::IMUL32rri, X86::IMUL32rmi }, 691 { X86::IMUL32rri8, X86::IMUL32rmi8 }, 692 { X86::IMUL64rr, X86::IMUL64rm }, 693 { X86::IMUL64rri32, X86::IMUL64rmi32 }, 694 { X86::IMUL64rri8, X86::IMUL64rmi8 }, 695 { X86::Int_CMPSDrr, X86::Int_CMPSDrm }, 696 { X86::Int_CMPSSrr, X86::Int_CMPSSrm }, 697 { X86::Int_COMISDrr, X86::Int_COMISDrm }, 698 { X86::Int_COMISSrr, X86::Int_COMISSrm }, 699 { X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm }, 700 { X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm }, 701 { X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm }, 702 { X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm }, 703 { X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm }, 704 { X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm }, 705 { X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm }, 706 { X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm }, 707 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm }, 708 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm }, 709 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm }, 710 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm }, 711 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm }, 712 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm }, 713 { X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm }, 714 { X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm }, 715 { X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm }, 716 { X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm }, 717 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm }, 718 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm }, 719 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm }, 720 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm }, 721 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm }, 722 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm }, 723 { X86::MOV16rr, X86::MOV16rm }, 724 { X86::MOV32rr, X86::MOV32rm }, 725 { X86::MOV64rr, X86::MOV64rm }, 726 { X86::MOV64toPQIrr, X86::MOV64toPQIrm }, 727 { X86::MOV64toSDrr, X86::MOV64toSDrm }, 728 { X86::MOV8rr, X86::MOV8rm }, 729 { X86::MOVAPDrr, X86::MOVAPDrm }, 730 { X86::MOVAPSrr, X86::MOVAPSrm }, 731 { X86::MOVDDUPrr, X86::MOVDDUPrm }, 732 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm }, 733 { X86::MOVDI2SSrr, X86::MOVDI2SSrm }, 734 { X86::MOVSD2PDrr, X86::MOVSD2PDrm }, 735 { X86::MOVSDrr, X86::MOVSDrm }, 736 { X86::MOVSHDUPrr, X86::MOVSHDUPrm }, 737 { X86::MOVSLDUPrr, X86::MOVSLDUPrm }, 738 { X86::MOVSS2PSrr, X86::MOVSS2PSrm }, 739 { X86::MOVSSrr, X86::MOVSSrm }, 740 { X86::MOVSX16rr8, X86::MOVSX16rm8 }, 741 { X86::MOVSX32rr16, X86::MOVSX32rm16 }, 742 { X86::MOVSX32rr8, X86::MOVSX32rm8 }, 743 { X86::MOVSX64rr16, X86::MOVSX64rm16 }, 744 { X86::MOVSX64rr32, X86::MOVSX64rm32 }, 745 { X86::MOVSX64rr8, X86::MOVSX64rm8 }, 746 { X86::MOVUPDrr, X86::MOVUPDrm }, 747 { X86::MOVUPSrr, X86::MOVUPSrm }, 748 { X86::MOVZX16rr8, X86::MOVZX16rm8 }, 749 { X86::MOVZX32rr16, X86::MOVZX32rm16 }, 750 { X86::MOVZX32rr8, X86::MOVZX32rm8 }, 751 { X86::MOVZX64rr16, X86::MOVZX64rm16 }, 752 { X86::MOVZX64rr8, X86::MOVZX64rm8 }, 753 { X86::PSHUFDri, X86::PSHUFDmi }, 754 { X86::PSHUFHWri, X86::PSHUFHWmi }, 755 { X86::PSHUFLWri, X86::PSHUFLWmi }, 756 { X86::PsMOVZX64rr32, X86::PsMOVZX64rm32 }, 757 { X86::TEST16rr, X86::TEST16rm }, 758 { X86::TEST32rr, X86::TEST32rm }, 759 { X86::TEST64rr, X86::TEST64rm }, 760 { X86::TEST8rr, X86::TEST8rm }, 761 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 762 { X86::UCOMISDrr, X86::UCOMISDrm }, 763 { X86::UCOMISSrr, X86::UCOMISSrm }, 764 { X86::XCHG16rr, X86::XCHG16rm }, 765 { X86::XCHG32rr, X86::XCHG32rm }, 766 { X86::XCHG64rr, X86::XCHG64rm }, 767 { X86::XCHG8rr, X86::XCHG8rm } 768 }; 769 ASSERT_SORTED(OpcodeTable); 770 OpcodeTablePtr = OpcodeTable; 771 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 772 } else if (i == 2) { 773 static const TableEntry OpcodeTable[] = { 774 { X86::ADC32rr, X86::ADC32rm }, 775 { X86::ADC64rr, X86::ADC64rm }, 776 { X86::ADD16rr, X86::ADD16rm }, 777 { X86::ADD32rr, X86::ADD32rm }, 778 { X86::ADD64rr, X86::ADD64rm }, 779 { X86::ADD8rr, X86::ADD8rm }, 780 { X86::ADDPDrr, X86::ADDPDrm }, 781 { X86::ADDPSrr, X86::ADDPSrm }, 782 { X86::ADDSDrr, X86::ADDSDrm }, 783 { X86::ADDSSrr, X86::ADDSSrm }, 784 { X86::ADDSUBPDrr, X86::ADDSUBPDrm }, 785 { X86::ADDSUBPSrr, X86::ADDSUBPSrm }, 786 { X86::AND16rr, X86::AND16rm }, 787 { X86::AND32rr, X86::AND32rm }, 788 { X86::AND64rr, X86::AND64rm }, 789 { X86::AND8rr, X86::AND8rm }, 790 { X86::ANDNPDrr, X86::ANDNPDrm }, 791 { X86::ANDNPSrr, X86::ANDNPSrm }, 792 { X86::ANDPDrr, X86::ANDPDrm }, 793 { X86::ANDPSrr, X86::ANDPSrm }, 794 { X86::CMOVA16rr, X86::CMOVA16rm }, 795 { X86::CMOVA32rr, X86::CMOVA32rm }, 796 { X86::CMOVA64rr, X86::CMOVA64rm }, 797 { X86::CMOVAE16rr, X86::CMOVAE16rm }, 798 { X86::CMOVAE32rr, X86::CMOVAE32rm }, 799 { X86::CMOVAE64rr, X86::CMOVAE64rm }, 800 { X86::CMOVB16rr, X86::CMOVB16rm }, 801 { X86::CMOVB32rr, X86::CMOVB32rm }, 802 { X86::CMOVB64rr, X86::CMOVB64rm }, 803 { X86::CMOVBE16rr, X86::CMOVBE16rm }, 804 { X86::CMOVBE32rr, X86::CMOVBE32rm }, 805 { X86::CMOVBE64rr, X86::CMOVBE64rm }, 806 { X86::CMOVE16rr, X86::CMOVE16rm }, 807 { X86::CMOVE32rr, X86::CMOVE32rm }, 808 { X86::CMOVE64rr, X86::CMOVE64rm }, 809 { X86::CMOVG16rr, X86::CMOVG16rm }, 810 { X86::CMOVG32rr, X86::CMOVG32rm }, 811 { X86::CMOVG64rr, X86::CMOVG64rm }, 812 { X86::CMOVGE16rr, X86::CMOVGE16rm }, 813 { X86::CMOVGE32rr, X86::CMOVGE32rm }, 814 { X86::CMOVGE64rr, X86::CMOVGE64rm }, 815 { X86::CMOVL16rr, X86::CMOVL16rm }, 816 { X86::CMOVL32rr, X86::CMOVL32rm }, 817 { X86::CMOVL64rr, X86::CMOVL64rm }, 818 { X86::CMOVLE16rr, X86::CMOVLE16rm }, 819 { X86::CMOVLE32rr, X86::CMOVLE32rm }, 820 { X86::CMOVLE64rr, X86::CMOVLE64rm }, 821 { X86::CMOVNE16rr, X86::CMOVNE16rm }, 822 { X86::CMOVNE32rr, X86::CMOVNE32rm }, 823 { X86::CMOVNE64rr, X86::CMOVNE64rm }, 824 { X86::CMOVNP16rr, X86::CMOVNP16rm }, 825 { X86::CMOVNP32rr, X86::CMOVNP32rm }, 826 { X86::CMOVNP64rr, X86::CMOVNP64rm }, 827 { X86::CMOVNS16rr, X86::CMOVNS16rm }, 828 { X86::CMOVNS32rr, X86::CMOVNS32rm }, 829 { X86::CMOVNS64rr, X86::CMOVNS64rm }, 830 { X86::CMOVP16rr, X86::CMOVP16rm }, 831 { X86::CMOVP32rr, X86::CMOVP32rm }, 832 { X86::CMOVP64rr, X86::CMOVP64rm }, 833 { X86::CMOVS16rr, X86::CMOVS16rm }, 834 { X86::CMOVS32rr, X86::CMOVS32rm }, 835 { X86::CMOVS64rr, X86::CMOVS64rm }, 836 { X86::DIVPDrr, X86::DIVPDrm }, 837 { X86::DIVPSrr, X86::DIVPSrm }, 838 { X86::DIVSDrr, X86::DIVSDrm }, 839 { X86::DIVSSrr, X86::DIVSSrm }, 840 { X86::HADDPDrr, X86::HADDPDrm }, 841 { X86::HADDPSrr, X86::HADDPSrm }, 842 { X86::HSUBPDrr, X86::HSUBPDrm }, 843 { X86::HSUBPSrr, X86::HSUBPSrm }, 844 { X86::IMUL16rr, X86::IMUL16rm }, 845 { X86::IMUL32rr, X86::IMUL32rm }, 846 { X86::MAXPDrr, X86::MAXPDrm }, 847 { X86::MAXPDrr_Int, X86::MAXPDrm_Int }, 848 { X86::MAXPSrr, X86::MAXPSrm }, 849 { X86::MAXPSrr_Int, X86::MAXPSrm_Int }, 850 { X86::MAXSDrr, X86::MAXSDrm }, 851 { X86::MAXSDrr_Int, X86::MAXSDrm_Int }, 852 { X86::MAXSSrr, X86::MAXSSrm }, 853 { X86::MAXSSrr_Int, X86::MAXSSrm_Int }, 854 { X86::MINPDrr, X86::MINPDrm }, 855 { X86::MINPDrr_Int, X86::MINPDrm_Int }, 856 { X86::MINPSrr, X86::MINPSrm }, 857 { X86::MINPSrr_Int, X86::MINPSrm_Int }, 858 { X86::MINSDrr, X86::MINSDrm }, 859 { X86::MINSDrr_Int, X86::MINSDrm_Int }, 860 { X86::MINSSrr, X86::MINSSrm }, 861 { X86::MINSSrr_Int, X86::MINSSrm_Int }, 862 { X86::MULPDrr, X86::MULPDrm }, 863 { X86::MULPSrr, X86::MULPSrm }, 864 { X86::MULSDrr, X86::MULSDrm }, 865 { X86::MULSSrr, X86::MULSSrm }, 866 { X86::OR16rr, X86::OR16rm }, 867 { X86::OR32rr, X86::OR32rm }, 868 { X86::OR64rr, X86::OR64rm }, 869 { X86::OR8rr, X86::OR8rm }, 870 { X86::ORPDrr, X86::ORPDrm }, 871 { X86::ORPSrr, X86::ORPSrm }, 872 { X86::PACKSSDWrr, X86::PACKSSDWrm }, 873 { X86::PACKSSWBrr, X86::PACKSSWBrm }, 874 { X86::PACKUSWBrr, X86::PACKUSWBrm }, 875 { X86::PADDBrr, X86::PADDBrm }, 876 { X86::PADDDrr, X86::PADDDrm }, 877 { X86::PADDQrr, X86::PADDQrm }, 878 { X86::PADDSBrr, X86::PADDSBrm }, 879 { X86::PADDSWrr, X86::PADDSWrm }, 880 { X86::PADDWrr, X86::PADDWrm }, 881 { X86::PANDNrr, X86::PANDNrm }, 882 { X86::PANDrr, X86::PANDrm }, 883 { X86::PAVGBrr, X86::PAVGBrm }, 884 { X86::PAVGWrr, X86::PAVGWrm }, 885 { X86::PCMPEQBrr, X86::PCMPEQBrm }, 886 { X86::PCMPEQDrr, X86::PCMPEQDrm }, 887 { X86::PCMPEQWrr, X86::PCMPEQWrm }, 888 { X86::PCMPGTBrr, X86::PCMPGTBrm }, 889 { X86::PCMPGTDrr, X86::PCMPGTDrm }, 890 { X86::PCMPGTWrr, X86::PCMPGTWrm }, 891 { X86::PINSRWrri, X86::PINSRWrmi }, 892 { X86::PMADDWDrr, X86::PMADDWDrm }, 893 { X86::PMAXSWrr, X86::PMAXSWrm }, 894 { X86::PMAXUBrr, X86::PMAXUBrm }, 895 { X86::PMINSWrr, X86::PMINSWrm }, 896 { X86::PMINUBrr, X86::PMINUBrm }, 897 { X86::PMULHUWrr, X86::PMULHUWrm }, 898 { X86::PMULHWrr, X86::PMULHWrm }, 899 { X86::PMULLWrr, X86::PMULLWrm }, 900 { X86::PMULUDQrr, X86::PMULUDQrm }, 901 { X86::PORrr, X86::PORrm }, 902 { X86::PSADBWrr, X86::PSADBWrm }, 903 { X86::PSLLDrr, X86::PSLLDrm }, 904 { X86::PSLLQrr, X86::PSLLQrm }, 905 { X86::PSLLWrr, X86::PSLLWrm }, 906 { X86::PSRADrr, X86::PSRADrm }, 907 { X86::PSRAWrr, X86::PSRAWrm }, 908 { X86::PSRLDrr, X86::PSRLDrm }, 909 { X86::PSRLQrr, X86::PSRLQrm }, 910 { X86::PSRLWrr, X86::PSRLWrm }, 911 { X86::PSUBBrr, X86::PSUBBrm }, 912 { X86::PSUBDrr, X86::PSUBDrm }, 913 { X86::PSUBSBrr, X86::PSUBSBrm }, 914 { X86::PSUBSWrr, X86::PSUBSWrm }, 915 { X86::PSUBWrr, X86::PSUBWrm }, 916 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm }, 917 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm }, 918 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm }, 919 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm }, 920 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm }, 921 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm }, 922 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm }, 923 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm }, 924 { X86::PXORrr, X86::PXORrm }, 925 { X86::RCPPSr, X86::RCPPSm }, 926 { X86::RCPPSr_Int, X86::RCPPSm_Int }, 927 { X86::RSQRTPSr, X86::RSQRTPSm }, 928 { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int }, 929 { X86::RSQRTSSr, X86::RSQRTSSm }, 930 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int }, 931 { X86::SBB32rr, X86::SBB32rm }, 932 { X86::SBB64rr, X86::SBB64rm }, 933 { X86::SHUFPDrri, X86::SHUFPDrmi }, 934 { X86::SHUFPSrri, X86::SHUFPSrmi }, 935 { X86::SQRTPDr, X86::SQRTPDm }, 936 { X86::SQRTPDr_Int, X86::SQRTPDm_Int }, 937 { X86::SQRTPSr, X86::SQRTPSm }, 938 { X86::SQRTPSr_Int, X86::SQRTPSm_Int }, 939 { X86::SQRTSDr, X86::SQRTSDm }, 940 { X86::SQRTSDr_Int, X86::SQRTSDm_Int }, 941 { X86::SQRTSSr, X86::SQRTSSm }, 942 { X86::SQRTSSr_Int, X86::SQRTSSm_Int }, 943 { X86::SUB16rr, X86::SUB16rm }, 944 { X86::SUB32rr, X86::SUB32rm }, 945 { X86::SUB64rr, X86::SUB64rm }, 946 { X86::SUB8rr, X86::SUB8rm }, 947 { X86::SUBPDrr, X86::SUBPDrm }, 948 { X86::SUBPSrr, X86::SUBPSrm }, 949 { X86::SUBSDrr, X86::SUBSDrm }, 950 { X86::SUBSSrr, X86::SUBSSrm }, 951 // FIXME: TEST*rr -> swapped operand of TEST*mr. 952 { X86::UNPCKHPDrr, X86::UNPCKHPDrm }, 953 { X86::UNPCKHPSrr, X86::UNPCKHPSrm }, 954 { X86::UNPCKLPDrr, X86::UNPCKLPDrm }, 955 { X86::UNPCKLPSrr, X86::UNPCKLPSrm }, 956 { X86::XOR16rr, X86::XOR16rm }, 957 { X86::XOR32rr, X86::XOR32rm }, 958 { X86::XOR64rr, X86::XOR64rm }, 959 { X86::XOR8rr, X86::XOR8rm }, 960 { X86::XORPDrr, X86::XORPDrm }, 961 { X86::XORPSrr, X86::XORPSrm } 962 }; 963 ASSERT_SORTED(OpcodeTable); 964 OpcodeTablePtr = OpcodeTable; 965 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 966 } 967 968 // If table selected... 969 if (OpcodeTablePtr) { 970 // Find the Opcode to fuse 971 unsigned fromOpcode = MI->getOpcode(); 972 // Lookup fromOpcode in table 973 if (const TableEntry *Entry = TableLookup(OpcodeTablePtr, OpcodeTableSize, 974 fromOpcode)) { 975 if (isTwoAddrFold) 976 NewMI = FuseTwoAddrInst(Entry->to, FrameIndex, MI, TII); 977 else 978 NewMI = FuseInst(Entry->to, i, FrameIndex, MI, TII); 979 NewMI->copyKillDeadInfo(MI); 980 return NewMI; 981 } 982 } 983 984 // No fusion 985 if (PrintFailedFusing) 986 cerr << "We failed to fuse (" 987 << ((i == 1) ? "r" : "s") << "): " << *MI; 988 return NULL; 989} 990 991 992const unsigned * 993X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 994 static const unsigned CalleeSavedRegs32Bit[] = { 995 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 996 }; 997 998 static const unsigned CalleeSavedRegs32EHRet[] = { 999 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 1000 }; 1001 1002 static const unsigned CalleeSavedRegs64Bit[] = { 1003 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 1004 }; 1005 1006 if (Is64Bit) 1007 return CalleeSavedRegs64Bit; 1008 else { 1009 if (MF) { 1010 MachineFrameInfo *MFI = MF->getFrameInfo(); 1011 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 1012 if (MMI && MMI->callsEHReturn()) 1013 return CalleeSavedRegs32EHRet; 1014 } 1015 return CalleeSavedRegs32Bit; 1016 } 1017} 1018 1019const TargetRegisterClass* const* 1020X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { 1021 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 1022 &X86::GR32RegClass, &X86::GR32RegClass, 1023 &X86::GR32RegClass, &X86::GR32RegClass, 0 1024 }; 1025 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { 1026 &X86::GR32RegClass, &X86::GR32RegClass, 1027 &X86::GR32RegClass, &X86::GR32RegClass, 1028 &X86::GR32RegClass, &X86::GR32RegClass, 0 1029 }; 1030 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 1031 &X86::GR64RegClass, &X86::GR64RegClass, 1032 &X86::GR64RegClass, &X86::GR64RegClass, 1033 &X86::GR64RegClass, &X86::GR64RegClass, 0 1034 }; 1035 1036 if (Is64Bit) 1037 return CalleeSavedRegClasses64Bit; 1038 else { 1039 if (MF) { 1040 MachineFrameInfo *MFI = MF->getFrameInfo(); 1041 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 1042 if (MMI && MMI->callsEHReturn()) 1043 return CalleeSavedRegClasses32EHRet; 1044 } 1045 return CalleeSavedRegClasses32Bit; 1046 } 1047 1048} 1049 1050BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 1051 BitVector Reserved(getNumRegs()); 1052 Reserved.set(X86::RSP); 1053 Reserved.set(X86::ESP); 1054 Reserved.set(X86::SP); 1055 Reserved.set(X86::SPL); 1056 if (hasFP(MF)) { 1057 Reserved.set(X86::RBP); 1058 Reserved.set(X86::EBP); 1059 Reserved.set(X86::BP); 1060 Reserved.set(X86::BPL); 1061 } 1062 return Reserved; 1063} 1064 1065//===----------------------------------------------------------------------===// 1066// Stack Frame Processing methods 1067//===----------------------------------------------------------------------===// 1068 1069// hasFP - Return true if the specified function should have a dedicated frame 1070// pointer register. This is true if the function has variable sized allocas or 1071// if frame pointer elimination is disabled. 1072// 1073bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 1074 MachineFrameInfo *MFI = MF.getFrameInfo(); 1075 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 1076 1077 return (NoFramePointerElim || 1078 MFI->hasVarSizedObjects() || 1079 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 1080 (MMI && MMI->callsUnwindInit())); 1081} 1082 1083bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { 1084 return !MF.getFrameInfo()->hasVarSizedObjects(); 1085} 1086 1087void X86RegisterInfo:: 1088eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 1089 MachineBasicBlock::iterator I) const { 1090 if (!hasReservedCallFrame(MF)) { 1091 // If the stack pointer can be changed after prologue, turn the 1092 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 1093 // adjcallstackdown instruction into 'add ESP, <amt>' 1094 // TODO: consider using push / pop instead of sub + store / add 1095 MachineInstr *Old = I; 1096 uint64_t Amount = Old->getOperand(0).getImm(); 1097 if (Amount != 0) { 1098 // We need to keep the stack aligned properly. To do this, we round the 1099 // amount of space needed for the outgoing arguments up to the next 1100 // alignment boundary. 1101 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 1102 Amount = (Amount+Align-1)/Align*Align; 1103 1104 MachineInstr *New = 0; 1105 if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) { 1106 New=BuildMI(TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), StackPtr) 1107 .addReg(StackPtr).addImm(Amount); 1108 } else { 1109 assert(Old->getOpcode() == X86::ADJCALLSTACKUP); 1110 // factor out the amount the callee already popped. 1111 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 1112 Amount -= CalleeAmt; 1113 if (Amount) { 1114 unsigned Opc = (Amount < 128) ? 1115 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 1116 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 1117 New = BuildMI(TII.get(Opc), StackPtr) 1118 .addReg(StackPtr).addImm(Amount); 1119 } 1120 } 1121 1122 // Replace the pseudo instruction with a new instruction... 1123 if (New) MBB.insert(I, New); 1124 } 1125 } else if (I->getOpcode() == X86::ADJCALLSTACKUP) { 1126 // If we are performing frame pointer elimination and if the callee pops 1127 // something off the stack pointer, add it back. We do this until we have 1128 // more advanced stack pointer tracking ability. 1129 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 1130 unsigned Opc = (CalleeAmt < 128) ? 1131 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 1132 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 1133 MachineInstr *New = 1134 BuildMI(TII.get(Opc), StackPtr).addReg(StackPtr).addImm(CalleeAmt); 1135 MBB.insert(I, New); 1136 } 1137 } 1138 1139 MBB.erase(I); 1140} 1141 1142void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 1143 int SPAdj, RegScavenger *RS) const{ 1144 assert(SPAdj == 0 && "Unexpected"); 1145 1146 unsigned i = 0; 1147 MachineInstr &MI = *II; 1148 MachineFunction &MF = *MI.getParent()->getParent(); 1149 while (!MI.getOperand(i).isFrameIndex()) { 1150 ++i; 1151 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 1152 } 1153 1154 int FrameIndex = MI.getOperand(i).getFrameIndex(); 1155 // This must be part of a four operand memory reference. Replace the 1156 // FrameIndex with base register with EBP. Add an offset to the offset. 1157 MI.getOperand(i).ChangeToRegister(hasFP(MF) ? FramePtr : StackPtr, false); 1158 1159 // Now add the frame object offset to the offset from EBP. 1160 int64_t Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) + 1161 MI.getOperand(i+3).getImm()+SlotSize; 1162 1163 if (!hasFP(MF)) 1164 Offset += MF.getFrameInfo()->getStackSize(); 1165 else 1166 Offset += SlotSize; // Skip the saved EBP 1167 1168 MI.getOperand(i+3).ChangeToImmediate(Offset); 1169} 1170 1171void 1172X86RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const{ 1173 if (hasFP(MF)) { 1174 // Create a frame entry for the EBP register that must be saved. 1175 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, 1176 (int)SlotSize * -2); 1177 assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && 1178 "Slot for EBP register must be last in order to be found!"); 1179 } 1180} 1181 1182/// emitSPUpdate - Emit a series of instructions to increment / decrement the 1183/// stack pointer by a constant value. 1184static 1185void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 1186 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 1187 const TargetInstrInfo &TII) { 1188 bool isSub = NumBytes < 0; 1189 uint64_t Offset = isSub ? -NumBytes : NumBytes; 1190 unsigned Opc = isSub 1191 ? ((Offset < 128) ? 1192 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 1193 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 1194 : ((Offset < 128) ? 1195 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 1196 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 1197 uint64_t Chunk = (1LL << 31) - 1; 1198 1199 while (Offset) { 1200 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 1201 BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal); 1202 Offset -= ThisVal; 1203 } 1204} 1205 1206void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 1207 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB 1208 MachineFrameInfo *MFI = MF.getFrameInfo(); 1209 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 1210 const Function* Fn = MF.getFunction(); 1211 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 1212 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 1213 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1214 MachineBasicBlock::iterator MBBI = MBB.begin(); 1215 1216 // Prepare for frame info. 1217 unsigned FrameLabelId = 0, StartLabelId = 0; 1218 1219 // Get the number of bytes to allocate from the FrameInfo 1220 uint64_t StackSize = MFI->getStackSize(); 1221 uint64_t NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 1222 1223 if (MMI && MMI->needsFrameInfo()) { 1224 // Mark function start 1225 StartLabelId = MMI->NextLabelID(); 1226 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(StartLabelId); 1227 } 1228 1229 if (hasFP(MF)) { 1230 // Get the offset of the stack slot for the EBP register... which is 1231 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 1232 // Update the frame offset adjustment. 1233 MFI->setOffsetAdjustment(SlotSize-NumBytes); 1234 1235 // Save EBP into the appropriate stack slot... 1236 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 1237 .addReg(FramePtr); 1238 NumBytes -= SlotSize; 1239 1240 if (MMI && MMI->needsFrameInfo()) { 1241 // Mark effective beginning of when frame pointer becomes valid. 1242 FrameLabelId = MMI->NextLabelID(); 1243 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(FrameLabelId); 1244 } 1245 1246 // Update EBP with the new base value... 1247 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 1248 .addReg(StackPtr); 1249 } 1250 1251 unsigned ReadyLabelId = 0; 1252 if (MMI && MMI->needsFrameInfo()) { 1253 // Mark effective beginning of when frame pointer is ready. 1254 ReadyLabelId = MMI->NextLabelID(); 1255 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(ReadyLabelId); 1256 } 1257 1258 // Skip the callee-saved push instructions. 1259 while (MBBI != MBB.end() && 1260 (MBBI->getOpcode() == X86::PUSH32r || 1261 MBBI->getOpcode() == X86::PUSH64r)) 1262 ++MBBI; 1263 1264 if (NumBytes) { // adjust stack pointer: ESP -= numbytes 1265 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 1266 // Check, whether EAX is livein for this function 1267 bool isEAXAlive = false; 1268 for (MachineFunction::livein_iterator II = MF.livein_begin(), 1269 EE = MF.livein_end(); (II != EE) && !isEAXAlive; ++II) { 1270 unsigned Reg = II->first; 1271 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1272 Reg == X86::AH || Reg == X86::AL); 1273 } 1274 1275 // Function prologue calls _alloca to probe the stack when allocating 1276 // more than 4k bytes in one go. Touching the stack at 4K increments is 1277 // necessary to ensure that the guard pages used by the OS virtual memory 1278 // manager are allocated in correct sequence. 1279 if (!isEAXAlive) { 1280 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes); 1281 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) 1282 .addExternalSymbol("_alloca"); 1283 } else { 1284 // Save EAX 1285 BuildMI(MBB, MBBI, TII.get(X86::PUSH32r), X86::EAX); 1286 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1287 // allocated bytes for EAX. 1288 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4); 1289 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) 1290 .addExternalSymbol("_alloca"); 1291 // Restore EAX 1292 MachineInstr *MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm),X86::EAX), 1293 StackPtr, NumBytes-4); 1294 MBB.insert(MBBI, MI); 1295 } 1296 } else { 1297 // If there is an ADD32ri or SUB32ri of ESP immediately after this 1298 // instruction, merge the two instructions. 1299 if (MBBI != MBB.end()) { 1300 MachineBasicBlock::iterator NI = next(MBBI); 1301 unsigned Opc = MBBI->getOpcode(); 1302 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 1303 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 1304 MBBI->getOperand(0).getReg() == StackPtr) { 1305 NumBytes -= MBBI->getOperand(2).getImm(); 1306 MBB.erase(MBBI); 1307 MBBI = NI; 1308 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 1309 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 1310 MBBI->getOperand(0).getReg() == StackPtr) { 1311 NumBytes += MBBI->getOperand(2).getImm(); 1312 MBB.erase(MBBI); 1313 MBBI = NI; 1314 } 1315 } 1316 1317 if (NumBytes) 1318 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1319 } 1320 } 1321 1322 if (MMI && MMI->needsFrameInfo()) { 1323 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 1324 const TargetAsmInfo *TAI = MF.getTarget().getTargetAsmInfo(); 1325 1326 // Calculate amount of bytes used for return address storing 1327 int stackGrowth = 1328 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 1329 TargetFrameInfo::StackGrowsUp ? 1330 TAI->getAddressSize() : -TAI->getAddressSize()); 1331 1332 if (StackSize) { 1333 // Show update of SP. 1334 if (hasFP(MF)) { 1335 // Adjust SP 1336 MachineLocation SPDst(MachineLocation::VirtualFP); 1337 MachineLocation SPSrc(MachineLocation::VirtualFP, 2*stackGrowth); 1338 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 1339 } else { 1340 MachineLocation SPDst(MachineLocation::VirtualFP); 1341 MachineLocation SPSrc(MachineLocation::VirtualFP, -StackSize+stackGrowth); 1342 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 1343 } 1344 } else { 1345 //FIXME: Verify & implement for FP 1346 MachineLocation SPDst(StackPtr); 1347 MachineLocation SPSrc(StackPtr, stackGrowth); 1348 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 1349 } 1350 1351 // Add callee saved registers to move list. 1352 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 1353 1354 // FIXME: This is dirty hack. The code itself is pretty mess right now. 1355 // It should be rewritten from scratch and generalized sometimes. 1356 1357 // Determine maximum offset (minumum due to stack growth) 1358 int64_t MaxOffset = 0; 1359 for (unsigned I = 0, E = CSI.size(); I!=E; ++I) 1360 MaxOffset = std::min(MaxOffset, 1361 MFI->getObjectOffset(CSI[I].getFrameIdx())); 1362 1363 // Calculate offsets 1364 for (unsigned I = 0, E = CSI.size(); I!=E; ++I) { 1365 int64_t Offset = MFI->getObjectOffset(CSI[I].getFrameIdx()); 1366 unsigned Reg = CSI[I].getReg(); 1367 Offset = (MaxOffset-Offset+3*stackGrowth); 1368 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 1369 MachineLocation CSSrc(Reg); 1370 Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc)); 1371 } 1372 1373 if (hasFP(MF)) { 1374 // Save FP 1375 MachineLocation FPDst(MachineLocation::VirtualFP, 2*stackGrowth); 1376 MachineLocation FPSrc(FramePtr); 1377 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 1378 } 1379 1380 MachineLocation FPDst(hasFP(MF) ? FramePtr : StackPtr); 1381 MachineLocation FPSrc(MachineLocation::VirtualFP); 1382 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 1383 } 1384 1385 // If it's main() on Cygwin\Mingw32 we should align stack as well 1386 if (Fn->hasExternalLinkage() && Fn->getName() == "main" && 1387 Subtarget->isTargetCygMing()) { 1388 BuildMI(MBB, MBBI, TII.get(X86::AND32ri), X86::ESP) 1389 .addReg(X86::ESP).addImm(-Align); 1390 1391 // Probe the stack 1392 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(Align); 1393 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); 1394 } 1395} 1396 1397void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1398 MachineBasicBlock &MBB) const { 1399 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1400 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1401 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1402 unsigned RetOpcode = MBBI->getOpcode(); 1403 1404 switch (RetOpcode) { 1405 case X86::RET: 1406 case X86::RETI: 1407 case X86::EH_RETURN: 1408 case X86::TAILJMPd: 1409 case X86::TAILJMPr: 1410 case X86::TAILJMPm: break; // These are ok 1411 default: 1412 assert(0 && "Can only insert epilog into returning blocks"); 1413 } 1414 1415 // Get the number of bytes to allocate from the FrameInfo 1416 uint64_t StackSize = MFI->getStackSize(); 1417 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1418 uint64_t NumBytes = StackSize - CSSize; 1419 1420 if (hasFP(MF)) { 1421 // pop EBP. 1422 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1423 NumBytes -= SlotSize; 1424 } 1425 1426 // Skip the callee-saved pop instructions. 1427 while (MBBI != MBB.begin()) { 1428 MachineBasicBlock::iterator PI = prior(MBBI); 1429 unsigned Opc = PI->getOpcode(); 1430 if (Opc != X86::POP32r && Opc != X86::POP64r && !TII.isTerminatorInstr(Opc)) 1431 break; 1432 --MBBI; 1433 } 1434 1435 if (NumBytes || MFI->hasVarSizedObjects()) { 1436 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1437 // instruction, merge the two instructions. 1438 if (MBBI != MBB.begin()) { 1439 MachineBasicBlock::iterator PI = prior(MBBI); 1440 unsigned Opc = PI->getOpcode(); 1441 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 1442 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 1443 PI->getOperand(0).getReg() == StackPtr) { 1444 NumBytes += PI->getOperand(2).getImm(); 1445 MBB.erase(PI); 1446 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 1447 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 1448 PI->getOperand(0).getReg() == StackPtr) { 1449 NumBytes -= PI->getOperand(2).getImm(); 1450 MBB.erase(PI); 1451 } 1452 } 1453 } 1454 1455 // If dynamic alloca is used, then reset esp to point to the last 1456 // callee-saved slot before popping them off! 1457 if (MFI->hasVarSizedObjects()) { 1458 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1459 if (CSSize) { 1460 MachineInstr *MI = addRegOffset(BuildMI(TII.get(Opc), StackPtr), 1461 FramePtr, -CSSize); 1462 MBB.insert(MBBI, MI); 1463 } else 1464 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr). 1465 addReg(FramePtr); 1466 1467 NumBytes = 0; 1468 } 1469 1470 // adjust stack pointer back: ESP += numbytes 1471 if (NumBytes) 1472 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1473 1474 // We're returning from function via eh_return. 1475 if (RetOpcode == X86::EH_RETURN) { 1476 MBBI = prior(MBB.end()); 1477 MachineOperand &DestAddr = MBBI->getOperand(0); 1478 assert(DestAddr.isReg() && "Offset should be in register!"); 1479 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr). 1480 addReg(DestAddr.getReg()); 1481 } 1482} 1483 1484unsigned X86RegisterInfo::getRARegister() const { 1485 if (Is64Bit) 1486 return X86::RIP; // Should have dwarf #16 1487 else 1488 return X86::EIP; // Should have dwarf #8 1489} 1490 1491unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const { 1492 return hasFP(MF) ? FramePtr : StackPtr; 1493} 1494 1495void X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) 1496 const { 1497 // Calculate amount of bytes used for return address storing 1498 int stackGrowth = (Is64Bit ? -8 : -4); 1499 1500 // Initial state of the frame pointer is esp+4. 1501 MachineLocation Dst(MachineLocation::VirtualFP); 1502 MachineLocation Src(StackPtr, stackGrowth); 1503 Moves.push_back(MachineMove(0, Dst, Src)); 1504 1505 // Add return address to move list 1506 MachineLocation CSDst(StackPtr, stackGrowth); 1507 MachineLocation CSSrc(getRARegister()); 1508 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1509} 1510 1511unsigned X86RegisterInfo::getEHExceptionRegister() const { 1512 assert(0 && "What is the exception register"); 1513 return 0; 1514} 1515 1516unsigned X86RegisterInfo::getEHHandlerRegister() const { 1517 assert(0 && "What is the exception handler register"); 1518 return 0; 1519} 1520 1521namespace llvm { 1522unsigned getX86SubSuperRegister(unsigned Reg, MVT::ValueType VT, bool High) { 1523 switch (VT) { 1524 default: return Reg; 1525 case MVT::i8: 1526 if (High) { 1527 switch (Reg) { 1528 default: return 0; 1529 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1530 return X86::AH; 1531 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1532 return X86::DH; 1533 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1534 return X86::CH; 1535 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1536 return X86::BH; 1537 } 1538 } else { 1539 switch (Reg) { 1540 default: return 0; 1541 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1542 return X86::AL; 1543 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1544 return X86::DL; 1545 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1546 return X86::CL; 1547 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1548 return X86::BL; 1549 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1550 return X86::SIL; 1551 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1552 return X86::DIL; 1553 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1554 return X86::BPL; 1555 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1556 return X86::SPL; 1557 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1558 return X86::R8B; 1559 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1560 return X86::R9B; 1561 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1562 return X86::R10B; 1563 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1564 return X86::R11B; 1565 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1566 return X86::R12B; 1567 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1568 return X86::R13B; 1569 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1570 return X86::R14B; 1571 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1572 return X86::R15B; 1573 } 1574 } 1575 case MVT::i16: 1576 switch (Reg) { 1577 default: return Reg; 1578 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1579 return X86::AX; 1580 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1581 return X86::DX; 1582 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1583 return X86::CX; 1584 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1585 return X86::BX; 1586 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1587 return X86::SI; 1588 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1589 return X86::DI; 1590 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1591 return X86::BP; 1592 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1593 return X86::SP; 1594 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1595 return X86::R8W; 1596 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1597 return X86::R9W; 1598 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1599 return X86::R10W; 1600 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1601 return X86::R11W; 1602 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1603 return X86::R12W; 1604 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1605 return X86::R13W; 1606 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1607 return X86::R14W; 1608 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1609 return X86::R15W; 1610 } 1611 case MVT::i32: 1612 switch (Reg) { 1613 default: return Reg; 1614 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1615 return X86::EAX; 1616 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1617 return X86::EDX; 1618 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1619 return X86::ECX; 1620 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1621 return X86::EBX; 1622 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1623 return X86::ESI; 1624 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1625 return X86::EDI; 1626 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1627 return X86::EBP; 1628 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1629 return X86::ESP; 1630 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1631 return X86::R8D; 1632 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1633 return X86::R9D; 1634 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1635 return X86::R10D; 1636 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1637 return X86::R11D; 1638 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1639 return X86::R12D; 1640 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1641 return X86::R13D; 1642 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1643 return X86::R14D; 1644 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1645 return X86::R15D; 1646 } 1647 case MVT::i64: 1648 switch (Reg) { 1649 default: return Reg; 1650 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1651 return X86::RAX; 1652 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1653 return X86::RDX; 1654 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1655 return X86::RCX; 1656 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1657 return X86::RBX; 1658 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1659 return X86::RSI; 1660 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1661 return X86::RDI; 1662 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1663 return X86::RBP; 1664 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1665 return X86::RSP; 1666 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1667 return X86::R8; 1668 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1669 return X86::R9; 1670 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1671 return X86::R10; 1672 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1673 return X86::R11; 1674 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1675 return X86::R12; 1676 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1677 return X86::R13; 1678 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1679 return X86::R14; 1680 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1681 return X86::R15; 1682 } 1683 } 1684 1685 return Reg; 1686} 1687} 1688 1689#include "X86GenRegisterInfo.inc" 1690 1691